prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Tests for functions in preprocess.py."""
import puget.preprocess as pp
import puget
import os
import os.path as op
import pandas as pd
import pandas.util.testing as pdt
import numpy as np
import tempfile
import json
from numpy.testing import assert_equal
import pytest
def test_std_path_setup():
filename = 'test'
data_dir = 'data'
# test with one year
paths = ['test_2012']
file_spec = pp.std_path_setup(filename, data_dir, paths)
test_dict = {'test_2012': op.join(data_dir, 'test_2012', filename)}
assert_equal(file_spec, test_dict)
# test with limited years
paths = ['test_2012', 'test_2013']
file_spec = pp.std_path_setup(filename, data_dir, paths)
test_dict = {'test_2012': op.join(data_dir, 'test_2012', filename),
'test_2013': op.join(data_dir, 'test_2013', filename)}
assert_equal(file_spec, test_dict)
# test with all years
paths = ['test_2011', 'test_2012', 'test_2013', 'test_2014']
file_spec = pp.std_path_setup(filename, data_dir, paths)
test_dict = {'test_2011': op.join(data_dir, 'test_2011', filename),
'test_2012': op.join(data_dir, 'test_2012', filename),
'test_2013': op.join(data_dir, 'test_2013', filename),
'test_2014': op.join(data_dir, 'test_2014', filename)}
assert_equal(file_spec, test_dict)
def test_read_table():
"""Test read_table function."""
# create temporary csv file
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df = pd.DataFrame({'id': [1, 1, 2, 2],
'time1': ['2001-01-13', '2004-05-21', '2003-06-10',
'2003-06-10'], 'drop1': [2, 3, 4, 5],
'ig_dedup1': [5, 6, 7, 8], 'categ1': [0, 8, 0, 0]})
df.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
file_spec = {'2011': temp_csv_file.name}
df = pp.read_table(file_spec, data_dir=None, paths=None,
columns_to_drop=['drop1'], categorical_var=['categ1'],
time_var=['time1'],
duplicate_check_columns=['id', 'time1', 'categ1'])
df_test = pd.DataFrame({'id': [1, 1, 2],
'time1':
pd.to_datetime(['2001-01-13', '2004-05-21',
'2003-06-10'], errors='coerce'),
'ig_dedup1': [5, 6, 8], 'categ1': [0, np.nan, 0]})
# Have to change the index to match the one we de-duplicated
df_test.index = pd.Int64Index([0, 1, 3])
pdt.assert_frame_equal(df, df_test)
# test passing a string filename with data_dir and path
path, fname = op.split(temp_csv_file.name)
path0, path1 = op.split(path)
df = pp.read_table(fname, data_dir=path0, paths=[path1],
columns_to_drop=['drop1'], categorical_var=['categ1'],
time_var=['time1'],
duplicate_check_columns=['id', 'time1', 'categ1'])
temp_csv_file.close()
# test error checking
with pytest.raises(ValueError):
pp.read_table(file_spec,
data_dir=op.join(pp.DATA_PATH, 'king'))
# test error checking
with pytest.raises(ValueError):
pp.read_table('test', data_dir=None, paths=None)
def test_read_entry_exit():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'id': [11, 11, 12],
'stage': [0, 1, 0], 'value': [0, 1, 0]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['id', 'stage', 'value'],
'categorical_var': ['value'],
'collection_stage_column': 'stage', 'entry_stage_val': 0,
'exit_stage_val': 1, 'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'person_enrollment_ID': 'id'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.read_entry_exit_table(file_spec=file_spec, data_dir=None,
paths=None,
metadata=temp_meta_file.name)
# make sure values are floats
df_test = pd.DataFrame({'id': [11, 12], 'value_entry': [0, 0],
'value_exit': [1, np.NaN]})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
# test error checking
temp_meta_file2 = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['id', 'stage', 'value'],
'categorical_var': ['value']}
metadata_json = json.dumps(metadata)
temp_meta_file2.file.write(metadata_json)
temp_meta_file2.seek(0)
with pytest.raises(ValueError):
pp.read_entry_exit_table(file_spec=file_spec,
metadata=temp_meta_file2.name)
temp_csv_file.close()
temp_meta_file.close()
temp_meta_file2.close()
def test_get_enrollment():
"""Test get_enrollment function."""
# create temporary csv file & metadata file to read in
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
df = pd.DataFrame({'id': [1, 1, 2, 2],
'time1': ['2001-01-13', '2004-05-21', '2003-06-10',
'2003-06-10'], 'drop1': [2, 3, 4, 5],
'ig_dedup1': [5, 6, 7, 8], 'categ1': [0, 8, 0, 0]})
df.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
metadata = ({'name': 'test',
'person_enrollment_ID': 'id',
'person_ID': 'id',
'program_ID': 'id',
'duplicate_check_columns': ['id', 'time1', 'categ1'],
'columns_to_drop': ['drop1'],
'categorical_var': ['categ1'], 'time_var': ['time1'],
'groupID_column': 'id',
'entry_date': 'time1'
})
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
# first try with groups=True (default)
df = pp.get_enrollment(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
df_test = pd.DataFrame({'id': [1, 1], 'time1':
pd.to_datetime(['2001-01-13', '2004-05-21'],
errors='coerce'), 'ig_dedup1': [5, 6],
'categ1': [0, np.nan]})
pdt.assert_frame_equal(df, df_test)
# try again with groups=False
df = pp.get_enrollment(groups=False, file_spec=file_spec, data_dir=None,
paths=None, metadata_file=temp_meta_file.name)
df_test = pd.DataFrame({'id': [1, 1, 2],
'time1':
pd.to_datetime(['2001-01-13', '2004-05-21',
'2003-06-10'], errors='coerce'),
'ig_dedup1': [5, 6, 8],
'categ1': [0, np.nan, 0]})
# Have to change the index to match the one we de-duplicated
df_test.index = pd.Int64Index([0, 1, 3])
pdt.assert_frame_equal(df, df_test)
temp_csv_file.close()
temp_meta_file.close()
def test_get_exit():
"""test get_exit function."""
# create temporary csv file & metadata file to read in
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
dest_rand_ints = np.random.random_integers(1, 30, 3)
df_init = pd.DataFrame({'id': [11, 12, 13], 'dest': dest_rand_ints})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
metadata = ({'name': 'test', 'duplicate_check_columns': ['id'],
"destination_column": 'dest', 'person_enrollment_ID': ['id']})
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_exit(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
mapping_table = pd.read_csv(op.join(puget.data.DATA_PATH, 'metadata',
'destination_mappings.csv'))
map_table_test_ints = [2, 25, 26]
map_table_test = pd.DataFrame({'Standard': np.array(['New Standards']*3),
'DestinationNumeric': np.array(
map_table_test_ints).astype(float),
'DestinationDescription': ['Transitional housing for homeless persons (including homeless youth)',
'Long-term care facility or nursing home',
'Moved from one HOPWA funded project to HOPWA PH'],
'DestinationGroup': ['Temporary',
'Permanent',
'Permanent'],
'DestinationSuccess': ['Other Exit',
'Successful Exit',
'Successful Exit'],
'Subsidy': ['No', 'No', 'Yes']})
map_table_subset = mapping_table[mapping_table['DestinationNumeric'] ==
map_table_test_ints[0]]
map_table_subset = map_table_subset.append(mapping_table[
mapping_table['DestinationNumeric'] == map_table_test_ints[1]])
map_table_subset = map_table_subset.append(mapping_table[
mapping_table['DestinationNumeric'] == map_table_test_ints[2]])
# Have to change the index to match the one we made up
map_table_subset.index = pd.Int64Index([0, 1, 2])
# sort because column order is not assured because started with dicts
map_table_test = map_table_test.sort_index(axis=1)
map_table_subset = map_table_subset.sort_index(axis=1)
pdt.assert_frame_equal(map_table_subset, map_table_test)
mapping_table = mapping_table[mapping_table.Standard == 'New Standards']
mapping_table['Subsidy'] = mapping_table['Subsidy'].map({'Yes': True,
'No': False})
mapping_table = mapping_table.drop(['Standard'], axis=1)
df_test = pd.DataFrame({'id': [11, 12, 13],
'dest': dest_rand_ints})
df_test = pd.merge(left=df_test, right=mapping_table, how='left',
left_on='dest', right_on='DestinationNumeric')
df_test = df_test.drop('dest', axis=1)
pdt.assert_frame_equal(df, df_test)
temp_csv_file.close()
temp_meta_file.close()
def test_get_client():
# create temporary csv files & metadata file to read in
temp_csv_file1 = tempfile.NamedTemporaryFile(mode='w')
temp_csv_file2 = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'id': [11, 12, 13, 15, 16, 17],
'first_name':['AAA', 'BBB', 'CCC',
'EEE', 'FFF', 'noname'],
'dob_col': ['1990-01-13', '2012-05-21',
'1850-06-14', '1965-11-22',
'1948-09-03', '2012-03-18'],
'time_col': ['1996-01-13', '2014-05-21',
'1950-06-14', '1985-11-22',
'1978-09-03', '2014-03-18'],
'bool_col': [1, 99, 1, 8, 0, 1],
'numeric': [99, 3, 6, 0, 8, np.NaN]})
df2_init = pd.DataFrame({'id': [11, 12, 13, 14, 15, 16, 17, 18],
'first_name':['AAA', 'BBB', 'CCC', 'DDD',
'EEE', 'FFF', 'noname', 'HHH'],
'dob_col': ['1990-01-15', '2012-05-21',
'1850-06-14', '1975-12-08',
'1967-11-22', pd.NaT, '2010-03-18',
'2014-04-30'],
'time_col': ['1996-01-15', '2014-05-21',
'1950-06-14', '1995-12-08',
'1987-11-22', pd.NaT, '2012-03-18',
'2015-04-30'],
'bool_col': [0, 0, 1, 0, 8, 0, np.NaN, 1],
'numeric': [5, 3, 7, 1, 0, 8, 6, 0]})
df_init.to_csv(temp_csv_file1, index=False)
temp_csv_file1.seek(0)
df2_init.to_csv(temp_csv_file2, index=False)
temp_csv_file2.seek(0)
file_spec = {'2011': temp_csv_file1.name, '2012': temp_csv_file2.name}
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = ({'name': 'test', 'person_ID': 'id',
'duplicate_check_columns': ['id', 'dob_col', 'first_name'],
'columns_to_drop': [],
'categorical_var': ['bool_col', 'numeric'],
'time_var': ['dob_col', 'time_col'],
'boolean': ['bool_col'], 'numeric_code': ['numeric'],
'dob_column': 'dob_col',
'name_columns': ["first_name"]})
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
for name_exclusion in [False, True]:
# get path & filenames
df = pp.get_client(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name,
name_exclusion=name_exclusion)
df_test = pd.DataFrame({'id': [11, 11, 12, 13, 14, 15, 15, 16, 16, 17, 17,
18],
'first_name':['AAA', 'AAA', 'BBB', 'CCC', 'DDD',
'EEE', 'EEE', 'FFF', 'FFF', 'noname', 'noname',
'HHH'],
'dob_col': pd.to_datetime(['1990-01-13',
'1990-01-15',
'2012-05-21',
'1850-06-14',
'1975-12-08',
'1965-11-22',
'1967-11-22',
'1948-09-03', pd.NaT,
'2012-03-18',
'2010-03-18',
'2014-04-30']),
'time_col': pd.to_datetime(['1996-01-14',
'1996-01-14',
'2014-05-21',
'1950-06-14',
'1995-12-08', pd.NaT,
pd.NaT, '1978-09-03',
'1978-09-03', pd.NaT,
pd.NaT, '2015-04-30']),
'bool_col': [1, 1, 0, 1, 0, np.NaN, np.NaN, 0, 0,
1, 1, 1],
'numeric': [5, 5, 3, np.NaN, 1, 0, 0, np.NaN,
np.NaN, 6, 6, 0]})
if name_exclusion:
df_test = df_test[~(df_test['first_name'] == 'noname')]
# Have to sort & change the indexes to match
df = df.sort_values(by=['id', 'dob_col'])
df = df.reset_index(drop=True)
df_test = df_test.sort_values(by=['id', 'dob_col'])
df_test = df_test.reset_index(drop=True)
print(df.dtypes)
print(df_test.dtypes)
pdt.assert_frame_equal(df, df_test)
# test error checking
temp_meta_file2 = tempfile.NamedTemporaryFile(mode='w')
metadata = ({'name': 'test',
'duplicate_check_columns': ['id', 'dob_col'],
'categorical_var': ['bool_col', 'numeric'],
'time_var': ['time_col'],
'boolean': ['bool_col'], 'numeric_code': ['numeric'],
'dob_column': 'dob_col'})
metadata_json = json.dumps(metadata)
temp_meta_file2.file.write(metadata_json)
temp_meta_file2.seek(0)
with pytest.raises(ValueError):
pp.get_client(file_spec=file_spec,
data_dir=None,
paths=None,
metadata_file=temp_meta_file2.name)
temp_csv_file1.close()
temp_csv_file2.close()
temp_meta_file.close()
def test_get_disabilities():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'pid': [11, 11, 11, 11, 12, 12, 12, 12],
'stage': [10, 10, 20, 20, 10, 10, 20, 20],
'type': [5, 6, 5, 6, 5, 6, 5, 6],
'response': [0, 1, 0, 1, 99, 0, 0, 1]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['pid', 'stage', 'type'],
'columns_to_drop': [],
'categorical_var': ['response'],
'collection_stage_column': 'stage', 'entry_stage_val': 10,
'exit_stage_val': 20, 'update_stage_val': 30,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'type_column': 'type', 'response_column': 'response',
'person_enrollment_ID': 'pid'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_disabilities(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
type_dict = {5: 'Physical', 6: 'Developmental', 7: 'ChronicHealth',
8: 'HIVAIDS', 9: 'MentalHealth', 10: 'SubstanceAbuse'}
# make sure values are floats
df_test = pd.DataFrame({'pid': [11, 12], 'Physical_entry': [0, np.NaN],
'Physical_exit': [0.0, 0.0],
'Developmental_entry': [1.0, 0.0],
'Developmental_exit': [1.0, 1.0]})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
# test error checking
temp_meta_file2 = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['pid', 'stage', 'type'],
'categorical_var': ['response']}
metadata_json = json.dumps(metadata)
temp_meta_file2.file.write(metadata_json)
temp_meta_file2.seek(0)
with pytest.raises(ValueError):
pp.get_disabilities(file_spec=file_spec,
data_dir=None, paths=None,
metadata_file=temp_meta_file2.name)
temp_csv_file.close()
temp_meta_file.close()
temp_meta_file2.close()
def test_get_employment_education():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'id': [11, 11, 12],
'stage': [0, 1, 0], 'value': [0, 1, 0]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['id', 'stage', 'value'],
'columns_to_drop': [],
'categorical_var': ['value'],
'collection_stage_column': 'stage', 'entry_stage_val': 0,
'exit_stage_val': 1, 'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'person_enrollment_ID': 'id'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_employment_education(file_spec=file_spec, data_dir=None,
paths=None,
metadata_file=temp_meta_file.name)
# make sure values are floats
df_test = pd.DataFrame({'id': [11, 12], 'value_entry': [0, 0],
'value_exit': [1, np.NaN]})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
temp_csv_file.close()
temp_meta_file.close()
def test_get_health_dv():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'id': [11, 11, 12],
'stage': [0, 1, 0], 'value': [0, 1, 0]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['id', 'stage', 'value'],
'columns_to_drop': [],
'categorical_var': ['value'],
'collection_stage_column': 'stage', 'entry_stage_val': 0,
'exit_stage_val': 1, 'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'person_enrollment_ID': 'id'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_health_dv(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
# make sure values are floats
df_test = pd.DataFrame({'id': [11, 12], 'value_entry': [0, 0],
'value_exit': [1, np.NaN]})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
temp_csv_file.close()
temp_meta_file.close()
def test_get_income():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'pid': [11, 11, 11, 12, 12, 12, 12],
'stage': [0, 0, 1, 0, 0, 1, 1],
'income': [1, 1, 1, 0, 1, np.NaN, 1],
'incomeAmount': [5, 8, 12, 0, 6, 0, 3]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['pid', 'stage', 'income',
'incomeAmount'],
'columns_to_drop': [],
'categorical_var': ['income'],
'collection_stage_column': 'stage', 'entry_stage_val': 0,
'exit_stage_val': 1, 'update_stage_val': 2,
'annual_assessment_stage_val': 5, 'post_exit_stage_val': 6,
'person_enrollment_ID': 'pid',
'columns_to_take_max': ['income', 'incomeAmount']}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_income(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
df_test = pd.DataFrame({'pid': [11, 12],
'income_entry': [1.0, 1.0],
'income_exit': [1.0, 1.0],
'incomeAmount_entry': [8, 6],
'incomeAmount_exit': [12, 3]})
# Have to change the index to match the one we de-duplicated
df_test.index = pd.Int64Index([0, 2])
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
pdt.assert_frame_equal(df, df_test)
# test error checking
temp_meta_file2 = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test',
'duplicate_check_columns': ['pid', 'stage', 'type'],
'categorical_var': ['response']}
metadata_json = json.dumps(metadata)
temp_meta_file2.file.write(metadata_json)
temp_meta_file2.seek(0)
with pytest.raises(ValueError):
pp.get_income(file_spec=file_spec,
data_dir=None, paths=None,
metadata_file=temp_meta_file2.name)
temp_csv_file.close()
temp_meta_file.close()
temp_meta_file2.close()
def test_get_project():
temp_csv_file = tempfile.NamedTemporaryFile(mode='w')
df_init = pd.DataFrame({'pid': [3, 4], 'name': ['shelter1', 'rrh2'],
'ProjectType': [1, 13]})
df_init.to_csv(temp_csv_file, index=False)
temp_csv_file.seek(0)
temp_meta_file = tempfile.NamedTemporaryFile(mode='w')
metadata = {'name': 'test', 'program_ID': 'pid',
'duplicate_check_columns': ['pid', 'name', 'ProjectType'],
'columns_to_drop': [],
'project_type_column': 'ProjectType'}
metadata_json = json.dumps(metadata)
temp_meta_file.file.write(metadata_json)
temp_meta_file.seek(0)
file_spec = {2011: temp_csv_file.name}
df = pp.get_project(file_spec=file_spec, data_dir=None, paths=None,
metadata_file=temp_meta_file.name)
df_test = pd.DataFrame({'pid': [3, 4], 'name': ['shelter1', 'rrh2'],
'ProjectNumeric': [1, 13],
'ProjectType': ['Emergency Shelter',
'PH - Rapid Re-Housing']})
# sort because column order is not assured because started with dicts
df = df.sort_index(axis=1)
df_test = df_test.sort_index(axis=1)
| pdt.assert_frame_equal(df, df_test) | pandas.util.testing.assert_frame_equal |
import numpy as np
import pandas as pd
from scripts.utils import remove_outliers, linear_regression
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.preprocessing import normalize
#from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import torch
lan = "all"
if lan == "es":
language = "Spanish"
elif lan == "fr":
language = "French"
elif lan == "all":
language = "French & Spanish"
# Load time taken to translate and calculate sentence length
if lan == "all":
es = pd.read_csv("data/un-timed-sentences/en-es.processed", sep='\t')
fr = pd.read_csv("data/un-timed-sentences/en-fr.processed", sep='\t')
wpd = pd.concat([es,fr], axis=0, ignore_index=True).drop_duplicates()
else:
wpd = pd.read_csv("data/un-timed-sentences/en-"+lan+".processed", sep='\t').drop_duplicates()
words=[]
for i in wpd.index:
words.append(len(wpd['Segment'][i].split()))
wpd["words"] = words
# Filter empty sentences (with only serial number)
time = wpd.loc[~wpd['Segment'].str.contains("^\s*\S*[0-9]\S*\s*$"), ['Time-to-edit', 'words']].reset_index(drop=True)
time.columns= ["time (ms)", "words"]
""" TER - words per day"""
# Load TER scores
if lan == "all":
es = pd.read_csv("data/un-timed-sentences/en-es-gs.score", header=None, sep='\t')
fr = pd.read_csv("data/un-timed-sentences/en-fr-gs.score", header=None, sep='\t')
ter = pd.concat([es,fr], axis=0, ignore_index=True)
else:
ter = pd.read_csv("data/un-timed-sentences/en-"+lan+"-gs.score", header=None, sep='\t')
ter.columns = ["score"]
# Join important columns to single dataframe
df = pd.concat([ter, time], axis=1)
# Calculate translation rate (and normalise)
#df['perms'] = df['words'] / df['time (ms)'] # words per ms
df['spw'] = (df['time (ms)'])/1000 / df['words'] # seconds per word
#df['rate'] = (df['perms'] - df['perms'].min()) / (df['perms'].max() - df['perms'].min())
# Remove perfect translations
dft = df.loc[df['score'] != 0]
# Remove outliers
dfr = remove_outliers(df, 'spw', lq=0.05, uq=0.95)
# Correlation
print(dfr.corr().round(3)['score'])
# Quantiles
def quantiles(df):
""" Output distribution of each quantile in the data set. """
q1 = df.loc[df['perms'] <= df['perms'].quantile(0.25)]
q2 = df.loc[(df['perms'] >= df['perms'].quantile(0.25)) & (df['perms'] <= df['perms'].quantile(0.50))]
q3 = df.loc[(df['perms'] >= df['perms'].quantile(0.50)) & (df['perms'] <= df['perms'].quantile(0.75))]
q4 = df.loc[df['perms'] >= df['perms'].quantile(0.75)]
q_corr={}
q_df={1:q1, 2:q2, 3:q3, 4:q4}
for q in range(1,5):
q_corr[q] = q_df[q].corr()['score']
qcor_df = pd.DataFrame.from_dict(q_corr)
qcor_df.columns=['q1', 'q2', 'q3', 'q4']
print(qcor_df.round(3))
#dfr = dfr.loc[dfr['spw'] < 8] # filter out extreme cases
dfr = dfr.loc[dfr['score'] <= 1.0]
dfr = dfr.loc[dfr['words'] <= 90]
# scatter plots
plt.scatter(dfr['spw'], dfr['score'])
plt.xlabel("seconds per word")
plt.ylabel("TER")
#plt.xlim([min(df['spw'])-0.0001, max(df['spw'])+0.0001])
#plt.scatter(q3['perms'], q3['score'])
c, m = np.polynomial.polynomial.polyfit(dfr['spw'], dfr['score'], 1)
y_pred = m*dfr['spw'] + c
residuals = dfr['score'] - y_pred
median_error = abs(residuals).median()
MAE = mean_absolute_error(dfr['score'], y_pred) # mean absolute error
plt.plot(np.unique(dfr['spw']), np.poly1d(np.polyfit(dfr['spw'], dfr['score'], 1))(np.unique(dfr['spw'])), 'k--')
x1 = np.linspace(min(dfr['spw']), max(dfr['spw']))
y1 = m*x1 + c
plt.plot(x1, y1+MAE, 'r--') # confidence intervals (bestfit +/- MAE)
plt.plot(x1, y1-MAE, 'r--')
plt.show()
plt.figure()
plt.scatter(dfr['words'], dfr['score'])
#plt.plot(np.unique(dfr['words']), np.poly1d(np.polyfit(dfr['words'], dfr['score'], 1))(np.unique(dfr['words'])), 'r--')
plt.xlabel("Sentence length (words)")
plt.ylabel("TER")
plt.title("Timed Sentences - %s" % language)
plt.show()
plt.figure()
plt.scatter(dfr['words'], dfr['time (ms)'])
plt.plot(np.unique(dfr['words']), np.poly1d(np.polyfit(dfr['words'], dfr['time (ms)'], 1))(np.unique(dfr['words'])), 'k--')
plt.xlabel("Sentence length (words)")
plt.ylabel("Time taken to translate (ms)")
plt.title("Timed Sentences - %s" % language)
#plt.show()
# Line of best fit and distance from each point to the line
c, m = np.polynomial.polynomial.polyfit(dfr['words'], dfr['time (ms)'], 1)
y_pred = m*dfr['words'] + c
residuals = dfr['time (ms)'] - y_pred
median_error = abs(residuals).median()
MAE = mean_absolute_error(dfr['time (ms)'], y_pred) # mean absolute error
x1 = np.linspace(min(dfr['words']), max(dfr['words']))
y1 = m*x1 + c
plt.plot(x1, y1+MAE, 'r--') # confidence intervals (bestfit +/- MAE)
plt.plot(x1, y1-MAE, 'r--')
plt.show()
pos_res = residuals.loc[residuals > MAE] # points above the line
neg_res = residuals.loc[residuals < -MAE] # points below the line
# Load biber dimension and select useful dimensions
if lan == "all":
es = pd.read_csv("data/un-timed-sentences/en-es-biber.en", sep='\t')
fr = pd.read_csv("data/un-timed-sentences/en-fr-biber.en", sep='\t')
biber = pd.concat([es,fr], axis=0, ignore_index=True)
else:
biber = pd.read_csv("data/un-timed-sentences/en-"+lan+"-biber.en", sep='\t')
drop_cols = biber.columns[(biber == 0).sum() > 0.75*biber.shape[0]]
biber.drop(drop_cols, axis=1, inplace=True)
pos_res_df = biber.loc[pos_res.index]
neg_res_df = biber.loc[neg_res.index]
# biber plots
fig, axs = plt.subplots(1, 3, figsize=(15,15))
fig.suptitle('Timed Sentences - %s' % language, fontsize=16)
axs[0].scatter(pos_res_df['f26.NOUN'], pos_res_df['f27.wordLength'])
axs[0].scatter(neg_res_df['f26.NOUN'], neg_res_df['f27.wordLength'])
axs[0].set_xlabel('f26.NOUN')
axs[0].set_ylabel('f27.wordLength')
axs[0].legend(['below-lobf', 'above-lobf'])
axs[1].scatter(pos_res_df['f28.ADP'], pos_res_df['f29.typeTokenRatio'])
axs[1].scatter(neg_res_df['f28.ADP'], neg_res_df['f29.typeTokenRatio'])
axs[1].set_xlabel('f28.ADP')
axs[1].set_ylabel('f29.typeTokenRatio')
axs[1].legend(['below-lobf', 'above-lobf'])
if lan=='fr':
axs[2].scatter(pos_res_df['f30.attributiveAdjectives'], pos_res_df['f57.conjuncts'])
axs[2].scatter(neg_res_df['f30.attributiveAdjectives'], neg_res_df['f57.conjuncts'])
axs[2].set_xlabel('f30.attributiveAdjectives')
axs[2].set_ylabel('f57.conjuncts')
axs[2].legend(['below-lobf', 'above-lobf'])
elif lan=='es':
axs[2].scatter(pos_res_df['f30.attributiveAdjectives'], pos_res_df['f47.nominalizations'])
axs[2].scatter(neg_res_df['f30.attributiveAdjectives'], neg_res_df['f47.nominalizations'])
axs[2].set_xlabel('f30.attributiveAdjectives')
axs[2].set_ylabel('f47.nominalizations')
axs[2].legend(['below-lobf', 'above-lobf'])
plt.show()
""" XLM / BERT - words per day / TER """
def embedding_regression(var_name='spw', model='xlm'):
# Load sentece embeddings
if model == 'xlm' or model == 'XLM':
features = pd.DataFrame(torch.load("data/un-timed-sentences/en-"+lan+"-gs-xlm-embeddings.pt").data.numpy())
elif model == 'bert' or model == 'BERT':
features = pd.read_csv("data/un-timed-sentences/bert-embeddings-timed-sentences-"+lan+"-multi.csv", header=None)
reg_df = dfr.merge(features, left_index=True, right_index=True)
print("Predicting %s using %s..." % (var_name, model))
ols, scaler, X_test, y_test = linear_regression(reg_df.drop(columns=["score", "time (ms)", "words", "spw"]), reg_df[var_name])
def embedding_classification(var_name='spw', model='xlm'):
# Load sentece embeddings
if model == 'xlm' or model == 'XLM':
features = pd.DataFrame(torch.load("data/un-timed-sentences/en-"+lan+"-gs-xlm-embeddings.pt").data.numpy())
elif model == 'bert' or model == 'BERT':
features = | pd.read_csv("data/un-timed-sentences/bert-embeddings-timed-sentences-"+lan+"-multi.csv", header=None) | pandas.read_csv |
import pandas as pd
import numpy as np
import glob, math
# 合併月營收資料
val = pd.read_excel(r"./Data/營收/上市電子業_營收.xlsx")
val['年份'] = val['年月'].apply(lambda x: x[:4])
val['月份'] = val['年月'].apply(lambda x: int(x[5:]))
val['季'] = val['年月'].apply(lambda x: math.ceil(int(x[5:]) / 3))
val_season = val.groupby(['代號', '年份', '季'])['單月營收(千元)'].count().to_frame('紀錄').reset_index()
val_season = val_season[val_season['紀錄']==3]
save_list = list(zip(val_season['代號'], val_season['年份'], val_season['季']))
new_val = pd.DataFrame()
for x, y, z in save_list:
a = val['代號']==x
b = val['年份']==y
c = val['季']==z
tmp = val[(a)&(b)&(c)]
new_val = pd.concat([new_val, tmp], 0)
season = new_val.groupby(['代號', '年份', '季'])['單月營收(千元)'].sum().to_frame('單季營收(千元)').reset_index()
new_val = | pd.merge(new_val, season, on=['代號', '年份', '季'], how='left') | pandas.merge |
import pandas as pd
from fairlens.metrics.correlation import distance_cn_correlation, distance_nn_correlation
from fairlens.sensitive.correlation import find_column_correlation, find_sensitive_correlations
pair_race = "race", "Ethnicity"
pair_age = "age", "Age"
pair_marital = "marital", "Family Status"
pair_gender = "gender", "Gender"
pair_nationality = "nationality", "Nationality"
def test_correlation():
col_names = ["gender", "random", "score"]
data = [
["male", 10, 60],
["female", 10, 80],
["male", 10, 60],
["female", 10, 80],
["male", 9, 59],
["female", 11, 80],
["male", 12, 61],
["female", 10, 83],
]
df = pd.DataFrame(data, columns=col_names)
res = {"score": [pair_gender]}
assert find_sensitive_correlations(df) == res
def test_double_correlation():
col_names = ["gender", "nationality", "random", "corr1", "corr2"]
data = [
["woman", "spanish", 715, 10, 20],
["man", "spanish", 1008, 20, 20],
["man", "french", 932, 20, 10],
["woman", "french", 1300, 10, 10],
]
df = pd.DataFrame(data, columns=col_names)
res = {"corr1": [pair_gender], "corr2": [pair_nationality]}
assert find_sensitive_correlations(df) == res
def test_multiple_correlation():
col_names = ["race", "age", "score", "entries", "marital", "credit", "corr1"]
data = [
["arabian", 21, 10, 2000, "married", 10, 60],
["carribean", 20, 10, 3000, "single", 10, 90],
["indo-european", 41, 10, 1900, "widowed", 10, 120],
["carribean", 40, 10, 2000, "single", 10, 90],
["indo-european", 42, 10, 2500, "widowed", 10, 120],
["arabian", 19, 10, 2200, "married", 10, 60],
]
df = pd.DataFrame(data, columns=col_names)
res = {"corr1": [pair_race, pair_marital]}
assert find_sensitive_correlations(df, corr_cutoff=0.9) == res
def test_common_correlation():
col_names = ["race", "age", "score", "entries", "marital", "credit", "corr1", "corr2"]
data = [
["arabian", 21, 10, 2000, "married", 10, 60, 120],
["carribean", 20, 10, 3000, "single", 10, 90, 130],
["indo-european", 41, 10, 1900, "widowed", 10, 120, 210],
["carribean", 40, 10, 2000, "single", 10, 90, 220],
["indo-european", 42, 10, 2500, "widowed", 10, 120, 200],
["arabian", 19, 10, 2200, "married", 10, 60, 115],
]
df = pd.DataFrame(data, columns=col_names)
res = {
"corr1": [pair_race, pair_age, pair_marital],
"corr2": [pair_age],
}
assert find_sensitive_correlations(df) == res
def test_column_correlation():
col_names = ["gender", "nationality", "random", "corr1", "corr2"]
data = [
["woman", "spanish", 715, 10, 20],
["man", "spanish", 1008, 20, 20],
["man", "french", 932, 20, 10],
["woman", "french", 1300, 10, 10],
]
df = pd.DataFrame(data, columns=col_names)
res1 = [pair_gender]
res2 = [pair_nationality]
assert find_column_correlation("corr1", df) == res1
assert find_column_correlation("corr2", df) == res2
def test_series_correlation():
col_names = ["race", "age", "score", "entries", "marital", "credit"]
data = [
["arabian", 21, 10, 2000, "married", 10],
["carribean", 20, 10, 3000, "single", 10],
["indo-european", 41, 10, 1900, "widowed", 10],
["carribean", 40, 10, 2000, "single", 10],
["indo-european", 42, 10, 2500, "widowed", 10],
["arabian", 19, 10, 2200, "married", 10],
]
df = | pd.DataFrame(data, columns=col_names) | pandas.DataFrame |
import pytest
from olliepy.InteractiveDashboard import InteractiveDashboard
import pandas as pd
from .utils import delete_directory
import olliepy
valid_output_directory = './tests/output'
@pytest.mark.parametrize("title", [
None,
20,
False
])
def test_invalid_title(title):
with pytest.raises(TypeError):
InteractiveDashboard(title=title,
output_directory=valid_output_directory,
dataframes=[pd.DataFrame()],
dataframes_names=['TestDF'],
numerical_columns=[],
categorical_columns=[],
date_columns=None,
dashboard_folder_name=None,
encryption_secret=None,
generate_encryption_secret=False)
@pytest.mark.parametrize("output_directory", [
'fake_output',
'./fake_output'
])
def test_invalid_output_directory(output_directory):
with pytest.raises(NotADirectoryError):
InteractiveDashboard(title='Test dashboard title',
output_directory=output_directory,
dataframes=[pd.DataFrame()],
dataframes_names=['TestDF'],
numerical_columns=[],
categorical_columns=[],
date_columns=None,
dashboard_folder_name=None,
encryption_secret=None,
generate_encryption_secret=False)
@pytest.mark.parametrize("dataframes", [
[{'A': ['1', '2', '3']}],
['a', 'b'],
False,
3
])
def test_invalid_dataframes(dataframes):
with pytest.raises(TypeError):
InteractiveDashboard(title='Test dashboard title',
output_directory=valid_output_directory,
dataframes=dataframes,
dataframes_names=['TestDF'],
numerical_columns=[],
categorical_columns=[],
date_columns=None,
dashboard_folder_name=None,
encryption_secret=None,
generate_encryption_secret=False)
@pytest.mark.parametrize("dataframes_names", [
[{'A': ['1', '2', '3']}],
[pd.DataFrame()],
False,
3,
'test'
])
def test_invalid_dataframes_names(dataframes_names):
with pytest.raises(TypeError):
InteractiveDashboard(title='Test dashboard title',
output_directory=valid_output_directory,
dataframes=[pd.DataFrame()],
dataframes_names=dataframes_names,
numerical_columns=[],
categorical_columns=[],
date_columns=None,
dashboard_folder_name=None,
encryption_secret=None,
generate_encryption_secret=False)
@pytest.mark.parametrize("numerical_columns", [
[1, 2, 3],
[1, 'test', 3],
'test',
3
])
def test_invalid_numerical_columns_type(numerical_columns):
with pytest.raises(TypeError):
InteractiveDashboard(title='Test dashboard title',
output_directory=valid_output_directory,
dataframes=[pd.DataFrame()],
dataframes_names=['test'],
numerical_columns=numerical_columns,
categorical_columns=[],
date_columns=None,
dashboard_folder_name=None,
encryption_secret=None,
generate_encryption_secret=False)
@pytest.mark.parametrize("categorical_columns", [
[1, 2, 3],
[1, 'test', 3],
'test',
3
])
def test_invalid_categorical_columns_type(categorical_columns):
with pytest.raises(TypeError):
InteractiveDashboard(title='Test dashboard title',
output_directory=valid_output_directory,
dataframes=[pd.DataFrame()],
dataframes_names=['test'],
numerical_columns=[],
categorical_columns=categorical_columns,
date_columns=None,
dashboard_folder_name=None,
encryption_secret=None,
generate_encryption_secret=False)
@pytest.mark.parametrize("date_columns", [
[1, 2, 3],
[1, 'test', 3],
'test',
3
])
def test_invalid_date_columns_type(date_columns):
with pytest.raises(TypeError):
InteractiveDashboard(title='Test dashboard title',
output_directory=valid_output_directory,
dataframes=[pd.DataFrame()],
dataframes_names=['test'],
numerical_columns=[],
categorical_columns=[],
date_columns=date_columns,
dashboard_folder_name=None,
encryption_secret=None,
generate_encryption_secret=False)
@pytest.mark.parametrize("dataframes, dataframes_names", [
([ | pd.DataFrame() | pandas.DataFrame |
"""shell
pip install autokeras
pip install git+https://github.com/keras-team/[email protected]
"""
"""
## A Simple Example
The first step is to prepare your data. Here we use the [Titanic
dataset](https://www.kaggle.com/c/titanic) as an example.
"""
import tensorflow as tf
import autokeras as ak
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
"""
The second step is to run the
[StructuredDataClassifier](/structured_data_classifier).
As a quick demo, we set epochs to 10.
You can also leave the epochs unspecified for an adaptive number of epochs.
"""
# Initialize the structured data classifier.
clf = ak.StructuredDataClassifier(
overwrite=True,
max_trials=3) # It tries 3 different models.
# Feed the structured data classifier with training data.
clf.fit(
# The path to the train.csv file.
train_file_path,
# The name of the label column.
'survived',
epochs=10)
# Predict with the best model.
predicted_y = clf.predict(test_file_path)
# Evaluate the best model with testing data.
print(clf.evaluate(test_file_path, 'survived'))
"""
## Data Format
The AutoKeras StructuredDataClassifier is quite flexible for the data format.
The example above shows how to use the CSV files directly. Besides CSV files, it also
supports numpy.ndarray, pandas.DataFrame or [tf.data.Dataset](
https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable). The data should be
two-dimensional with numerical or categorical values.
For the classification labels,
AutoKeras accepts both plain labels, i.e. strings or integers, and one-hot encoded
encoded labels, i.e. vectors of 0s and 1s.
The labels can be numpy.ndarray, pandas.DataFrame, or pandas.Series.
The following examples show how the data can be prepared with numpy.ndarray,
pandas.DataFrame, and tensorflow.data.Dataset.
"""
import pandas as pd
import numpy as np
# x_train as pandas.DataFrame, y_train as pandas.Series
x_train = pd.read_csv(train_file_path)
print(type(x_train)) # pandas.DataFrame
y_train = x_train.pop('survived')
print(type(y_train)) # pandas.Series
# You can also use pandas.DataFrame for y_train.
y_train = pd.DataFrame(y_train)
print(type(y_train)) # pandas.DataFrame
# You can also use numpy.ndarray for x_train and y_train.
x_train = x_train.to_numpy()
y_train = y_train.to_numpy()
print(type(x_train)) # numpy.ndarray
print(type(y_train)) # numpy.ndarray
# Preparing testing data.
x_test = | pd.read_csv(test_file_path) | pandas.read_csv |
import os
import audiofile
import audiofile as af
import numpy as np
import pandas as pd
import pytest
import audinterface
import audformat
def signal_duration(signal, sampling_rate):
return signal.shape[1] / sampling_rate
def signal_max(signal, sampling_rate):
return np.max(signal)
SEGMENT = audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
)
)
def signal_modification(signal, sampling_rate, subtract=False):
if subtract:
signal -= 0.1 * signal
else:
signal += 0.1 * signal
return signal
@pytest.mark.parametrize(
'process_func, segment, signal, sampling_rate, start, end, keep_nat, '
'channels, mixdown, expected_output',
[
(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
SEGMENT,
np.ones((1, 8000)),
8000,
None,
None,
False,
None,
False,
1,
),
(
signal_max,
None,
np.ones(3),
8000,
None,
None,
False,
0,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
0,
False,
0,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
1,
False,
1,
),
(
signal_max,
None,
np.array([[0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
None,
True,
0.5,
),
(
signal_max,
None,
np.array([[-1., -1., -1.], [0., 0., 0.], [1., 1., 1.]]),
8000,
None,
None,
False,
[1, 2],
True,
0.5,
),
# invalid channel selection
pytest.param(
signal_max,
None,
np.ones((1, 3)),
8000,
None,
None,
False,
1,
False,
1,
marks=pytest.mark.xfail(raises=ValueError),
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
None,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
False,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.NaT,
True,
None,
False,
3.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
None,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
pd.NaT,
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
None,
pd.to_timedelta('2s'),
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.NaT,
pd.to_timedelta('2s'),
False,
None,
False,
2.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
pd.to_timedelta('1s'),
pd.to_timedelta('2s'),
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
'1s',
'2s',
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
'1000ms',
'2000ms',
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
1,
2,
False,
None,
False,
1.0,
),
(
signal_duration,
None,
np.zeros((1, 24000)),
8000,
1.0,
2.0,
False,
None,
False,
1.0,
),
],
)
def test_process_file(
tmpdir,
process_func,
segment,
signal,
sampling_rate,
start,
end,
keep_nat,
channels,
mixdown,
expected_output,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=sampling_rate,
resample=False,
channels=channels,
mixdown=mixdown,
segment=segment,
keep_nat=keep_nat,
verbose=False,
)
# create test file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
# test absolute path
y = process.process_file(
path,
start=start,
end=end,
)
np.testing.assert_almost_equal(
y.values, expected_output, decimal=4,
)
# test relative path
y = process.process_file(
file,
start=start,
end=end,
root=root,
)
np.testing.assert_almost_equal(
y.values, expected_output, decimal=4,
)
@pytest.mark.parametrize(
'process_func, num_files, signal, sampling_rate, starts, ends, '
'expected_output',
[
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
None,
None,
[3.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
'1s',
'2s',
[1.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
1,
2,
[1.0] * 2,
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
[None, 1],
[None, 2],
[3.0, 1.0],
),
(
signal_duration,
2,
np.zeros((1, 24000)),
8000,
[None, '1s'],
[None, '2s'],
[3.0, 1.0],
),
(
signal_duration,
3,
np.zeros((1, 24000)),
8000,
[None, '1s'],
[None, '2s', None],
[3.0, 1.0],
),
(
signal_duration,
1,
np.zeros((1, 24000)),
8000,
[None],
[None, '2s'],
[3.0],
),
],
)
def test_process_files(
tmpdir,
process_func,
num_files,
signal,
sampling_rate,
starts,
ends,
expected_output,
):
process = audinterface.Process(
process_func=process_func,
sampling_rate=sampling_rate,
resample=False,
verbose=False,
)
# create files
files = []
paths = []
root = tmpdir
for idx in range(num_files):
file = f'file{idx}.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
files.append(file)
paths.append(path)
# test absolute paths
output = process.process_files(
paths,
starts=starts,
ends=ends,
)
np.testing.assert_almost_equal(
output.values,
expected_output,
decimal=4,
)
# test relative paths
output = process.process_files(
files,
starts=starts,
ends=ends,
root=root,
)
np.testing.assert_almost_equal(
output.values,
expected_output,
decimal=4,
)
@pytest.mark.parametrize(
'num_files, segment, num_workers, multiprocessing',
[
(3, None, 1, False, ),
(3, None, 2, False, ),
(3, None, None, False, ),
(3, SEGMENT, 1, False, ),
]
)
def test_process_folder(
tmpdir,
num_files,
segment,
num_workers,
multiprocessing,
):
process = audinterface.Process(
process_func=None,
sampling_rate=None,
resample=False,
segment=segment,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
sampling_rate = 8000
path = str(tmpdir.mkdir('wav'))
files = [
os.path.join(path, f'file{n}.wav') for n in range(num_files)
]
for file in files:
signal = np.random.uniform(-1.0, 1.0, (1, sampling_rate))
af.write(file, signal, sampling_rate)
y = process.process_folder(path)
pd.testing.assert_series_equal(
y,
process.process_files(files),
)
def test_process_func_args():
def process_func(s, sr, arg1, arg2):
assert arg1 == 'foo'
assert arg2 == 'bar'
audinterface.Process(
process_func=process_func,
process_func_args={
'arg1': 'foo',
'arg2': 'bar',
}
)
with pytest.warns(UserWarning):
audinterface.Process(
feature_names=('o1', 'o2', 'o3'),
process_func=process_func,
arg1='foo',
arg2='bar',
)
@pytest.mark.parametrize(
'num_workers, multiprocessing',
[
(1, False, ),
(2, False, ),
(None, False, ),
]
)
def test_process_index(tmpdir, num_workers, multiprocessing):
process = audinterface.Process(
process_func=None,
sampling_rate=None,
resample=False,
num_workers=num_workers,
multiprocessing=multiprocessing,
verbose=False,
)
sampling_rate = 8000
signal = np.random.uniform(-1.0, 1.0, (1, 3 * sampling_rate))
# create file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, signal, sampling_rate)
# empty index
index = audformat.segmented_index()
y = process.process_index(index)
assert y.empty
# segmented index with absolute paths
index = audformat.segmented_index(
[path] * 3,
pd.timedelta_range('0s', '2s', 3),
pd.timedelta_range('1s', '3s', 3),
)
y = process.process_index(index)
for (path, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
path, start=start, end=end
)
np.testing.assert_equal(signal, value)
# filewise index with absolute paths
index = audformat.filewise_index(path)
y = process.process_index(index)
for (path, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
path, start=start, end=end
)
np.testing.assert_equal(signal, value)
# segmented index with relative paths
index = audformat.segmented_index(
[file] * 3,
pd.timedelta_range('0s', '2s', 3),
pd.timedelta_range('1s', '3s', 3),
)
y = process.process_index(index, root=root)
for (file, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
file, start=start, end=end, root=root
)
np.testing.assert_equal(signal, value)
# filewise index with relative paths
index = audformat.filewise_index(path)
y = process.process_index(index, root=root)
for (file, start, end), value in y.items():
signal, sampling_rate = audinterface.utils.read_audio(
file, start=start, end=end, root=root
)
np.testing.assert_equal(signal, value)
@pytest.mark.parametrize(
'process_func, process_func_args, segment, signal, '
'sampling_rate, file, start, end, keep_nat, expected_signal',
[
(
None,
{},
None,
np.array([1., 2., 3.]),
44100,
None,
None,
None,
False,
np.array([1., 2., 3.]),
),
(
None,
{},
None,
np.array([1., 2., 3.]),
44100,
'file',
None,
None,
False,
np.array([1., 2., 3.]),
),
(
None,
{},
SEGMENT,
np.array([1., 2., 3., 4.]),
44100,
None,
None,
None,
False,
np.array([1., 2.]),
),
(
None,
{},
SEGMENT,
np.array([1., 2., 3., 4.]),
44100,
'file',
None,
None,
False,
np.array([1., 2.]),
),
(
signal_max,
{},
None,
np.array([1., 2., 3.]),
44100,
None,
None,
None,
False,
3.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
3,
None,
None,
None,
False,
1.0,
),
(
signal_duration,
{},
None,
np.array([1., 2., 3.]),
1,
None,
| pd.to_timedelta('2s') | pandas.to_timedelta |
import collections
import datetime
import logging
import typing
from abc import ABC, abstractmethod
from typing import Callable, List, Iterable, Optional
import numpy as np
import pandas as pd
from . import distance_metric, util, data_transformation
from .distance_metric import DistanceMetric
from .featuregen import FeatureGeneratorFromNamedTuples
from .util.string import objectRepr
from .util.typing import PandasNamedTuple
from .vector_model import VectorClassificationModel, VectorRegressionModel
log = logging.getLogger(__name__)
class Neighbor:
def __init__(self, value: PandasNamedTuple, distance: float):
self.distance = distance
self.value = value
self.identifier = value.Index
class NeighborProvider(ABC):
def __init__(self, dfIndexedById: pd.DataFrame):
self.df = dfIndexedById
self.index = self.df.index
if any(self.index.duplicated()):
raise Exception("Dataframe index should not contain duplicates")
self.indexPositionDict = {idx: pos for pos, idx in enumerate(self.index)}
@abstractmethod
def iterPotentialNeighbors(self, value: PandasNamedTuple) -> Iterable[PandasNamedTuple]:
pass
@abstractmethod
def __str__(self):
return super().__str__()
class AllNeighborsProvider(NeighborProvider):
def __init__(self, dfIndexedById: pd.DataFrame):
super().__init__(dfIndexedById)
self.namedTuples = None
def __getstate__(self):
d = self.__dict__.copy()
d["namedTuples"] = None
return d
def iterPotentialNeighbors(self, value):
identifier = value.Index
if self.namedTuples is None:
self.namedTuples = list(self.df.itertuples())
for nt in self.namedTuples:
if nt.Index != identifier:
yield nt
def __str__(self):
return str(self.__class__.__name__)
class TimerangeNeighborsProvider(NeighborProvider):
def __init__(self, dfIndexedById: pd.DataFrame, timestampsColumn="timestamps",
pastTimeRangeDays=120, futureTimeRangeDays=120):
super().__init__(dfIndexedById)
if not pd.core.dtypes.common.is_datetime64_any_dtype(self.df[timestampsColumn]):
raise Exception(f"Column {timestampsColumn} does not have a compatible datatype")
self.timestampsColumn = timestampsColumn
self.pastTimeRangeDays = pastTimeRangeDays
self.futureTimeRangeDays = futureTimeRangeDays
self.pastTimeDelta = datetime.timedelta(days=pastTimeRangeDays)
self.futureTimeDelta = datetime.timedelta(days=futureTimeRangeDays)
def iterPotentialNeighbors(self, value: PandasNamedTuple):
identifier = value.Index
inputTime = getattr(value, self.timestampsColumn)
maxTime, minTime = inputTime + self.futureTimeDelta, inputTime - self.pastTimeDelta
neighborsDf = self.df[
self.df[self.timestampsColumn].apply(lambda time: minTime < time < inputTime)
]
if identifier in neighborsDf.index:
neighborsDf.drop(identifier, inplace=True)
return neighborsDf.itertuples()
def __str__(self):
return objectRepr(self, ["pastTimeRangeDays", "futureTimeRangeDays"])
class AbstractKnnFinder(ABC):
@abstractmethod
def findNeighbors(self, namedTuple: PandasNamedTuple, n_neighbors=20) -> List[Neighbor]:
pass
@abstractmethod
def __str__(self):
super().__str__()
class CachingKNearestNeighboursFinder(AbstractKnnFinder):
"""
A nearest neighbor finder which uses a cache for distance metrics in order speed up repeated computations
of the neighbors of the same data point by keeping a pandas.Series of distances to all provided
data points cached. If the distance metric is of the composite type LinearCombinationDistanceMetric,
its component distance metrics are cached, such that weights in the linear combination can be varied
without necessitating recomputations.
"""
log = log.getChild(__qualname__)
def __init__(self, cache: 'CachingKNearestNeighboursFinder.DistanceMetricCache', distanceMetric: DistanceMetric,
neighborProvider: NeighborProvider):
self.neighborProvider = neighborProvider
# This field is purely for logging purposes
self.distanceMetric = distanceMetric
if isinstance(distanceMetric, distance_metric.LinearCombinationDistanceMetric):
self.weightedDistanceMetrics = [(cache.getCachedMetric(dm), w) for (w, dm) in distanceMetric.metrics]
else:
self.weightedDistanceMetrics = [(cache.getCachedMetric(distanceMetric), 1)]
def __str__(self):
return objectRepr(self, ["neighborProvider", "distanceMetric"])
class DistanceMetricCache:
"""
A cache for distance metrics which identifies equivalent distance metrics by their string representations.
The cache can be passed (consecutively) to multiple KNN models in order to speed up computations for the
same test data points. If the cache is reused, it is assumed that the neighbor provider remains the same.
"""
log = log.getChild(__qualname__)
def __init__(self):
self._cachedMetricsByName = {}
def getCachedMetric(self, distanceMetric):
key = str(distanceMetric)
cachedMetric = self._cachedMetricsByName.get(key)
if cachedMetric is None:
self.log.info(f"Creating new cached metric for key '{key}'")
cachedMetric = CachingKNearestNeighboursFinder.CachedSeriesDistanceMetric(distanceMetric)
self._cachedMetricsByName[key] = cachedMetric
else:
self.log.info(f"Reusing cached metric for key '{key}'")
return cachedMetric
class CachedSeriesDistanceMetric:
"""
Provides caching for a wrapped distance metric: the series of all distances to provided potential neighbors
are retained in a cache
"""
def __init__(self, distanceMetric):
self.distanceMetric = distanceMetric
self.cache = {}
def getDistanceSeries(self, namedTuple: PandasNamedTuple, potentialNeighborValues):
identifier = namedTuple.Index
series = self.cache.get(identifier)
if series is None:
distances = []
for neighborTuple in potentialNeighborValues:
distances.append(self.distanceMetric.distance(namedTuple, neighborTuple))
series = | pd.Series(distances) | pandas.Series |
"""Tradier options view"""
__docformat__ = "numpy"
import argparse
from typing import List
from bisect import bisect_left
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import mplfinance as mpf
from colorama import Fore, Style
from tabulate import tabulate
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
check_non_negative,
export_data,
plot_autoscale,
patch_pandas_text_adjustment,
)
from gamestonk_terminal.options import op_helpers, tradier_model
from gamestonk_terminal import config_plot as cfp
from gamestonk_terminal import feature_flags as gtff
column_map = {"mid_iv": "iv", "open_interest": "oi", "volume": "vol"}
def red_highlight(val):
"""Red highlight
Parameters
----------
val
dataframe values to color
Returns
----------
str
colored dataframes values
"""
return f"{Fore.RED}{val}{Style.RESET_ALL}"
def green_highlight(val):
"""Green highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return f"{Fore.GREEN}{val}{Style.RESET_ALL}"
def check_valid_option_chains_headers(headers: str) -> List[str]:
"""Check valid option chains headers
Parameters
----------
headers : str
Option chains headers
Returns
----------
List[str]
List of columns string
"""
columns = [str(item) for item in headers.split(",")]
for header in columns:
if header not in tradier_model.df_columns:
raise argparse.ArgumentTypeError("Invalid option chains header selected!")
return columns
def display_chains(ticker: str, expiry: str, other_args: List[str]):
"""Display option chain
Parameters
----------
ticker: str
Stock ticker
expiry: str
Expiration date of option
other_args: List[str]
Argparse arguments
"""
parser = argparse.ArgumentParser(
prog="chains",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Display option chains",
)
parser.add_argument(
"--calls",
action="store_true",
default=False,
dest="calls_only",
help="Flag to show calls only",
)
parser.add_argument(
"--puts",
action="store_true",
default=False,
dest="puts_only",
help="Flag to show puts only",
)
parser.add_argument(
"-m",
"--min",
dest="min_sp",
type=check_non_negative,
default=-1,
help="minimum strike price to consider.",
)
parser.add_argument(
"-M",
"--max",
dest="max_sp",
type=check_non_negative,
default=-1,
help="maximum strike price to consider.",
)
parser.add_argument(
"-d",
"--display",
dest="to_display",
default=tradier_model.default_columns,
type=check_valid_option_chains_headers,
help="columns to look at. Columns can be: {bid, ask, strike, bidsize, asksize, volume, open_interest, delta, "
"gamma, theta, vega, ask_iv, bid_iv, mid_iv} ",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
chains_df = tradier_model.get_option_chains(ticker, expiry)
columns = ns_parser.to_display + ["strike", "option_type"]
chains_df = chains_df[columns].rename(columns=column_map)
if ns_parser.min_sp == -1:
min_strike = np.percentile(chains_df["strike"], 25)
else:
min_strike = ns_parser.min_sp
if ns_parser.max_sp == -1:
max_strike = np.percentile(chains_df["strike"], 75)
else:
max_strike = ns_parser.max_sp
print(f"The strike prices are displayed between {min_strike} and {max_strike}")
chains_df = chains_df[chains_df["strike"] >= min_strike]
chains_df = chains_df[chains_df["strike"] <= max_strike]
if ns_parser.export:
# Note the extra dirname needed due to the subfolder in options
export_data(
ns_parser.export,
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
f"chains_{ticker}_{expiry}",
chains_df,
)
calls_df = chains_df[chains_df.option_type == "call"].drop(
columns=["option_type"]
)
puts_df = chains_df[chains_df.option_type == "put"].drop(
columns=["option_type"]
)
if ns_parser.calls_only:
print(
tabulate(
calls_df,
headers=calls_df.columns,
tablefmt="grid",
showindex=False,
floatfmt=".2f",
)
)
elif ns_parser.puts_only:
print(
tabulate(
puts_df,
headers=puts_df.columns,
tablefmt="grid",
showindex=False,
floatfmt=".2f",
)
)
else:
puts_df = puts_df[puts_df.columns[::-1]]
chain_table = calls_df.merge(puts_df, on="strike")
if gtff.USE_COLOR:
call_cols = [col for col in chain_table if col.endswith("_x")]
put_cols = [col for col in chain_table if col.endswith("_y")]
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", 0)
| pd.set_option("display.max_rows", None) | pandas.set_option |
"""
Instructions to process/merge SMS and Suncor frame crack data, to be completed monthly for FleetMonthlyReport
1. Load frame cracks from sap (load_framecracks.vbs) + copy to clipboard
2. Make sure SMS data has been categorized front, mid, rear from eventlog
3. run pre_process_framecracks (copies merged data to clipboard)
- NOTE make sure d_lower set correctly for last time processed
>>> from smseventlog.data import framecracks as frm
df = frm.pre_process_framecracks(d_lower=None)
OR
>>> make framecracks
4. paste data back to working xlsx file and remove duplicates/split rows as needed
5. Paste WO numbers back into EventLog to merge properly in future
6. Once categorization complete, reset vals in _merge column to 'old'
"""
import re
import numpy as np
import pandas as pd
from smseventlog import config as cf
from smseventlog import delta, dt
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog.database import db
from smseventlog.queries import FrameCracks
from smseventlog.utils import fileops as fl
log = getlog(__name__)
p_frm_excel = cf.p_res / 'csv/FH Frame Cracks History.xlsx'
def load_df_smr(d_lower=None):
if d_lower is None:
d_lower = dt(2018, 1, 1)
sql = f"select a.* \
FROM Unitsmr a \
LEFT JOIN UnitID b on a.unit=b.unit \
Where b.MineSite='FortHills' and \
a.datesmr>='{d_lower}'"
return pd.read_sql(sql=sql, con=db.engine) \
.pipe(f.parse_datecols) \
.pipe(f.lower_cols) \
.rename(columns=dict(date_smr='date_added')) \
.sort_values(['date_added'])
def format_int(df, cols):
for col in cols:
df[col] = | pd.to_numeric(df[col], errors='coerce') | pandas.to_numeric |
import pandas as pd
import numpy as np
def filter_data(data, conditions):
"""
Remove elements that do not match the condition provided.
Takes a data list as input and returns a filtered list.
Conditions should be a list of strings of the following format:
'<field> <op> <value>'
where the following operations are valid: >, <, >=, <=, ==, !=
Example: ["duration < 15", "city == 'San Francisco'"]
"""
for condition in conditions:
field, op, value = condition.split(" ", 2)
# check if field is valid
if field not in data.columns.values:
raise Exception("'{}' is not a feature of the dataframe. Did you spell something wrong?".format(field))
# convert value into number or strip excess quotes if string
try:
value = float(value)
except:
value = value.strip("\'\"")
# get booleans for filtering
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else: # catch invalid operation codes
raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.")
# filter data and outcomes
data = data[matches].reset_index(drop=True)
return data
def dest_process(dest):
if dest == '1':
dest_processed = '希思罗机场'
elif dest == '2':
dest_processed = '曼彻斯特机场'
elif dest == '3':
dest_processed = '爱丁堡机场'
else:
dest_processed = '伯明翰机场'
return dest_processed
def data_process(dataframe):
dataframe.date = | pd.to_datetime(dataframe.date) | pandas.to_datetime |
import os
import pandas as pd
from fbprophet import Prophet
from matplotlib import pyplot
def forecast_volume(data_file, days_forward):
"""
Forecast hourly order volume based on days_forward.
:param data_file: ( String ) Input CSV file w/ order hours + number of orders.
:param days_forward: ( Integer ) Number of days forward.
:return: ( DataFrame ) Forecast Timestamp + Predicted Order Volumes.
"""
# Read input data
hourly_volume_df = pd.read_csv(f"{os.getcwd()}/../data/{data_file}", usecols = ["order_hour","num_orders"])
# Convert order_hour into datetime
hourly_volume_df["order_hour"] = | pd.to_datetime(hourly_volume_df["order_hour"]) | pandas.to_datetime |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 31.25, 4., 0., 1), (3, 4, 0, 15.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 4.0, 400.0, -np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 800.0, -100.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_signals_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
portfolio = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_max_orders(self):
_ = from_signals_all(price=price_wide)
_ = from_signals_all(price=price_wide, max_orders=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_all(price=price_wide, log=True)
_ = from_signals_all(price=price_wide, log=True, max_logs=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandom:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='rand_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples([(0.25, 0.25), (0.5, 0.5)], names=['rprob_entry_prob', 'rprob_exit_prob'])
)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_all(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='all', **kwargs)
def from_orders_longonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='longonly', **kwargs)
def from_orders_shortonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_all().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1), (8, 0, 2, 100.0, 1.0, 0.0, 0),
(9, 1, 2, 100.0, 2.0, 0.0, 1), (10, 3, 2, 50.0, 4.0, 0.0, 0), (11, 4, 2, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 0, 2, 100.0, 1.0, 0.0, 1), (5, 1, 2, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_all(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 198.01980198019803, 2.02, 0.0, 1),
(2, 3, 0, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 1),
(2, 3, 0, 49.504950495049506, 4.04, 0.0, 0), (3, 4, 0, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 3, 1, 1.0, 4.0, 0.4, 1), (7, 4, 1, 1.0, 5.0, 0.5, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 2.0, 0), (10, 3, 2, 1.0, 4.0, 4.0, 1), (11, 4, 2, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 3, 1, 1.0, 4.0, 0.1, 1), (7, 4, 1, 1.0, 5.0, 0.1, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 1.0, 0), (10, 3, 2, 1.0, 4.0, 1.0, 1), (11, 4, 2, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_all(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 3, 1, 1.0, 3.6, 0.0, 1), (7, 4, 1, 1.0, 5.5, 0.0, 0), (8, 0, 2, 1.0, 0.0, 0.0, 1),
(9, 1, 2, 1.0, 4.0, 0.0, 0), (10, 3, 2, 1.0, 0.0, 0.0, 1), (11, 4, 2, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 1, 0, 0.5, 2.0, 0.0, 0), (2, 3, 0, 0.5, 4.0, 0.0, 1),
(3, 4, 0, 0.5, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0), (8, 0, 2, 1.0, 1.0, 0.0, 1),
(9, 1, 2, 1.0, 2.0, 0.0, 0), (10, 3, 2, 1.0, 4.0, 0.0, 1), (11, 4, 2, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_all(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 3, 1, 1.0, 4.0, 0.0, 0),
(6, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 3, 1, 1.0, 4.0, 0.0, 0), (5, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 4, 1, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0), (4, 0, 1, 1000.0, 1.0, 0.0, 1), (5, 3, 1, 1000.0, 4.0, 0.0, 1),
(6, 4, 1, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 1, 0, 0, 0.0, 100.0, 2.0, 200.0, -np.inf, 0, 2, 2.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 2, 0, 0, 400.0, -100.0, 3.0, 100.0, np.nan, 0, 2, 3.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 3, 0, 0, 400.0, -100.0, 4.0, 0.0, np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 4, 0, 0, 0.0, 0.0, 5.0, 0.0, -np.inf, 0, 2, 5.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 1, 0, 200.0, 2.0, 0.0, 1),
(3, 3, 1, 200.0, 4.0, 0.0, 0), (4, 4, 1, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 3, 1, 100.0, 4.0, 0.0, 0),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_orders_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_target_shares(self):
record_arrays_close(
from_orders_all(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=75., size_type='targetshares',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_all(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 2, 1, 8.333333333333332, 3.0, 0.0, 0),
(8, 3, 1, 4.166666666666668, 4.0, 0.0, 0), (9, 4, 1, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 25.0, 2.0, 0.0, 0),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 0), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 0),
(4, 4, 0, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0),
(2, 1, 0, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 1, 2, 25.0, 2.0, 0.0, 0), (5, 2, 0, 8.333333333333332, 3.0, 0.0, 1),
(6, 2, 1, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 3, 0, 4.166666666666668, 4.0, 0.0, 1), (9, 3, 1, 4.166666666666668, 4.0, 0.0, 1),
(10, 3, 2, 4.166666666666668, 4.0, 0.0, 1), (11, 4, 0, 2.5, 5.0, 0.0, 1),
(12, 4, 1, 2.5, 5.0, 0.0, 1), (13, 4, 2, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 2, 1, 6.25, 3.0, 0.0, 0), (8, 3, 1, 2.34375, 4.0, 0.0, 0),
(9, 4, 1, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 37.5, 2.0, 0.0, 0), (2, 2, 0, 6.25, 3.0, 0.0, 0),
(3, 3, 0, 2.34375, 4.0, 0.0, 0), (4, 4, 0, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 0, 1, 2.50000000e+01, 1., 0., 0),
(2, 0, 2, 1.25000000e+01, 1., 0., 0), (3, 1, 0, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 1, 2, 7.81250000e-01, 2., 0., 0),
(6, 2, 0, 2.60416667e-01, 3., 0., 0), (7, 2, 1, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 3, 0, 2.44140625e-02, 4., 0., 0),
(10, 3, 1, 1.22070312e-02, 4., 0., 0), (11, 3, 2, 6.10351562e-03, 4., 0., 0),
(12, 4, 0, 2.44140625e-03, 5., 0., 0), (13, 4, 1, 1.22070312e-03, 5., 0., 0),
(14, 4, 2, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_all(price=price_wide)
_ = from_orders_all(price=price_wide, max_orders=9)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_all(price=price_wide, log=True)
_ = from_orders_all(price=price_wide, log=True, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, log=True, max_logs=14)
# ############# from_order_func ############# #
@njit
def order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col])
@njit
def log_order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col], log=True)
class TestFromOrderFunc:
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_one_column(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price.tolist(), order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(price, order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_multiple_columns(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (5, 0, 1, 100.0, 1.0, 0.0, 0),
(6, 1, 1, 200.0, 2.0, 0.0, 1), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_shape(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5,), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise,
keys=pd.Index(['first'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0, 1, 2], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise,
keys=pd.Index(['first', 'second', 'third'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first', 'second', 'third'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_group_by(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(8, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_cash_sharing(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 1, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 1, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 2, 200.0, 2.0, 0.0, 1),
(4, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(4, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (5, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def segment_prep_func_nb(sc, target_hold_value):
order_size = np.copy(target_hold_value[sc.i, sc.from_col:sc.to_col])
order_size_type = np.full(sc.group_len, SizeType.TargetValue)
direction = np.full(sc.group_len, Direction.All)
order_value_out = np.empty(sc.group_len, dtype=np.float_)
sc.last_val_price[sc.from_col:sc.to_col] = sc.close[sc.i, sc.from_col:sc.to_col]
nb.sort_call_seq_nb(sc, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(oc, order_size, order_size_type, direction):
col_i = oc.call_seq_now[oc.call_idx]
return nb.create_order_nb(
size=order_size[col_i],
size_type=order_size_type[col_i],
price=oc.close[oc.i, col_i],
direction=direction[col_i]
)
portfolio = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, segment_prep_func_nb=segment_prep_func_nb,
segment_prep_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_value(self, test_row_wise):
@njit
def target_val_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_val_order_func_nb(oc):
return nb.create_order_nb(size=50., size_type=SizeType.TargetValue, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
segment_prep_func_nb=target_val_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_percent(self, test_row_wise):
@njit
def target_pct_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_pct_order_func_nb(oc):
return nb.create_order_nb(size=0.5, size_type=SizeType.TargetPercent, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
segment_prep_func_nb=target_pct_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_init_cash(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=[1., 10., np.inf])
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 1.0, 0.0, 0),
(2, 0, 2, 10.0, 1.0, 0.0, 0), (3, 1, 0, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 1, 2, 10.0, 2.0, 0.0, 1),
(6, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 3, 0, 10.0, 4.0, 0.0, 1),
(10, 3, 1, 10.0, 4.0, 0.0, 1), (11, 3, 2, 10.0, 4.0, 0.0, 1),
(12, 4, 0, 8.0, 5.0, 0.0, 0), (13, 4, 1, 8.0, 5.0, 0.0, 0),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 2.0, 0.0, 1),
(2, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (3, 3, 0, 10.0, 4.0, 0.0, 1),
(4, 4, 0, 8.0, 5.0, 0.0, 0), (5, 0, 1, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 3, 1, 10.0, 4.0, 0.0, 1), (9, 4, 1, 8.0, 5.0, 0.0, 0),
(10, 0, 2, 10.0, 1.0, 0.0, 0), (11, 1, 2, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 3, 2, 10.0, 4.0, 0.0, 1),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(portfolio._init_cash) == np.ndarray
base_portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=np.inf)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.Auto)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.Auto
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.AutoAlign)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def group_prep_func_nb(gc, call_i, group_lst):
call_i[0] += 1
group_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,)
)
assert call_i[0] == 28
assert list(sim_lst) == [1]
assert list(group_lst) == [2, 18]
assert list(segment_lst) == [3, 6, 9, 12, 15, 19, 21, 23, 25, 27]
assert list(order_lst) == [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 20, 22, 24, 26, 28]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, True],
[False, False],
[False, True],
[False, False],
[False, True],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask
)
assert call_i[0] == 8
assert list(sim_lst) == [1]
assert list(group_lst) == [2]
assert list(segment_lst) == [3, 5, 7]
assert list(order_lst) == [4, 6, 8]
def test_func_calls_row_wise(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def row_prep_func_nb(gc, call_i, row_lst):
call_i[0] += 1
row_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
row_wise=True
)
assert call_i[0] == 31
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 8, 14, 20, 26]
assert list(segment_lst) == [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]
assert list(order_lst) == [4, 5, 7, 10, 11, 13, 16, 17, 19, 22, 23, 25, 28, 29, 31]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask,
row_wise=True
)
assert call_i[0] == 14
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 5, 9]
assert list(segment_lst) == [3, 6, 10, 13]
assert list(order_lst) == [4, 7, 8, 11, 12, 14]
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_orders(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=14)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_logs(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=14)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'all']
group_by = pd.Index(['first', 'first', 'second'], name='group')
portfolio = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D'
) # independent
portfolio_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D'
) # grouped
portfolio_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D'
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
assert vbt.Portfolio.loads(portfolio['a'].dumps()) == portfolio['a']
assert vbt.Portfolio.loads(portfolio.dumps()) == portfolio
portfolio.save(tmp_path / 'portfolio')
assert vbt.Portfolio.load(tmp_path / 'portfolio') == portfolio
def test_wrapper(self):
pd.testing.assert_index_equal(
portfolio.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
price_na.columns
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.grouper.group_by is None
assert portfolio.wrapper.grouper.allow_enable
assert portfolio.wrapper.grouper.allow_disable
assert portfolio.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.columns,
price_na.columns
)
assert portfolio_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.grouper.group_by,
group_by
)
assert portfolio_grouped.wrapper.grouper.allow_enable
assert portfolio_grouped.wrapper.grouper.allow_disable
assert portfolio_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_shared.wrapper.columns,
price_na.columns
)
assert portfolio_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_shared.wrapper.grouper.group_by,
group_by
)
assert not portfolio_shared.wrapper.grouper.allow_enable
assert portfolio_shared.wrapper.grouper.allow_disable
assert not portfolio_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert portfolio['a'].wrapper == portfolio.wrapper['a']
assert portfolio['a'].orders == portfolio.orders['a']
assert portfolio['a'].logs == portfolio.logs['a']
assert portfolio['a'].init_cash == portfolio.init_cash['a']
pd.testing.assert_series_equal(portfolio['a'].call_seq, portfolio.call_seq['a'])
assert portfolio['c'].wrapper == portfolio.wrapper['c']
assert portfolio['c'].orders == portfolio.orders['c']
assert portfolio['c'].logs == portfolio.logs['c']
assert portfolio['c'].init_cash == portfolio.init_cash['c']
pd.testing.assert_series_equal(portfolio['c'].call_seq, portfolio.call_seq['c'])
assert portfolio[['c']].wrapper == portfolio.wrapper[['c']]
assert portfolio[['c']].orders == portfolio.orders[['c']]
assert portfolio[['c']].logs == portfolio.logs[['c']]
pd.testing.assert_series_equal(portfolio[['c']].init_cash, portfolio.init_cash[['c']])
pd.testing.assert_frame_equal(portfolio[['c']].call_seq, portfolio.call_seq[['c']])
assert portfolio_grouped['first'].wrapper == portfolio_grouped.wrapper['first']
assert portfolio_grouped['first'].orders == portfolio_grouped.orders['first']
assert portfolio_grouped['first'].logs == portfolio_grouped.logs['first']
assert portfolio_grouped['first'].init_cash == portfolio_grouped.init_cash['first']
pd.testing.assert_frame_equal(portfolio_grouped['first'].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped[['first']].wrapper == portfolio_grouped.wrapper[['first']]
assert portfolio_grouped[['first']].orders == portfolio_grouped.orders[['first']]
assert portfolio_grouped[['first']].logs == portfolio_grouped.logs[['first']]
pd.testing.assert_series_equal(
portfolio_grouped[['first']].init_cash,
portfolio_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_grouped[['first']].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped['second'].wrapper == portfolio_grouped.wrapper['second']
assert portfolio_grouped['second'].orders == portfolio_grouped.orders['second']
assert portfolio_grouped['second'].logs == portfolio_grouped.logs['second']
assert portfolio_grouped['second'].init_cash == portfolio_grouped.init_cash['second']
pd.testing.assert_series_equal(portfolio_grouped['second'].call_seq, portfolio_grouped.call_seq['c'])
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].wrapper == portfolio_grouped.wrapper[['second']]
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].logs == portfolio_grouped.logs[['second']]
pd.testing.assert_series_equal(
portfolio_grouped[['second']].init_cash,
portfolio_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_grouped[['second']].call_seq, portfolio_grouped.call_seq[['c']])
assert portfolio_shared['first'].wrapper == portfolio_shared.wrapper['first']
assert portfolio_shared['first'].orders == portfolio_shared.orders['first']
assert portfolio_shared['first'].logs == portfolio_shared.logs['first']
assert portfolio_shared['first'].init_cash == portfolio_shared.init_cash['first']
pd.testing.assert_frame_equal(portfolio_shared['first'].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].wrapper == portfolio_shared.wrapper[['first']]
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].logs == portfolio_shared.logs[['first']]
pd.testing.assert_series_equal(
portfolio_shared[['first']].init_cash,
portfolio_shared.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_shared[['first']].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared['second'].wrapper == portfolio_shared.wrapper['second']
assert portfolio_shared['second'].orders == portfolio_shared.orders['second']
assert portfolio_shared['second'].logs == portfolio_shared.logs['second']
assert portfolio_shared['second'].init_cash == portfolio_shared.init_cash['second']
pd.testing.assert_series_equal(portfolio_shared['second'].call_seq, portfolio_shared.call_seq['c'])
assert portfolio_shared[['second']].wrapper == portfolio_shared.wrapper[['second']]
assert portfolio_shared[['second']].orders == portfolio_shared.orders[['second']]
assert portfolio_shared[['second']].logs == portfolio_shared.logs[['second']]
pd.testing.assert_series_equal(
portfolio_shared[['second']].init_cash,
portfolio_shared.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_shared[['second']].call_seq, portfolio_shared.call_seq[['c']])
def test_regroup(self):
assert portfolio.regroup(None) == portfolio
assert portfolio.regroup(False) == portfolio
assert portfolio.regroup(group_by) != portfolio
pd.testing.assert_index_equal(portfolio.regroup(group_by).wrapper.grouper.group_by, group_by)
assert portfolio_grouped.regroup(None) == portfolio_grouped
assert portfolio_grouped.regroup(False) != portfolio_grouped
assert portfolio_grouped.regroup(False).wrapper.grouper.group_by is None
assert portfolio_grouped.regroup(group_by) == portfolio_grouped
assert portfolio_shared.regroup(None) == portfolio_shared
with pytest.raises(Exception) as e_info:
_ = portfolio_shared.regroup(False)
assert portfolio_shared.regroup(group_by) == portfolio_shared
def test_cash_sharing(self):
assert not portfolio.cash_sharing
assert not portfolio_grouped.cash_sharing
assert portfolio_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
portfolio.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_incl_unrealized(self):
assert not vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=False).incl_unrealized
assert vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=True).incl_unrealized
def test_orders(self):
record_arrays_close(
portfolio.orders.values,
np.array([
(0, 1, 0, 0.1, 2.02, 0.10202, 0), (1, 2, 0, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 4, 0, 1.0, 5.05, 0.1505, 0), (3, 0, 1, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 3, 1, 0.1, 4.04, 0.10404000000000001, 0),
(6, 4, 1, 1.0, 4.95, 0.14950000000000002, 1), (7, 0, 2, 1.0, 1.01, 0.1101, 0),
(8, 1, 2, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 3, 2, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
portfolio.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, np.nan, 100.0, 1.0, 0, 0, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.0, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 1, 0, 0, 100.0, 0.0, 2.0, 100.0, 0.1, 0, 0, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.69598, 0.1, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 2, 0, 0, 99.69598, 0.1, 3.0, 99.99598, -1.0, 0, 0, 3.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 3, 0, 0, 99.89001, 0.0, 4.0, 99.89001, -0.1, 0, 0, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 4, 0, 0, 99.89001, 0.0, 5.0, 99.89001, 1.0, 0, 0, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 94.68951, 1.0, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 0, 1, 1, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 1, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.8801, -1.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 2.0, 98.8801, 0.1, 0, 1, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 2, 1, 1, 100.97612, -1.1, np.nan, np.nan, -1.0, 0, 1, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 3, 1, 1, 100.97612, -1.1, 4.0, 96.57611999999999, -0.1, 0, 1, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.46808, -1.0, 0.1, 4.04, 0.10404000000000001, 0, 0, -1, 5),
(9, 4, 1, 1, 100.46808, -1.0, 5.0, 95.46808, 1.0, 0, 1, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 105.26858, -2.0, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 0, 2, 2, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 2, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.8799, 1.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 1, 2, 2, 98.8799, 1.0, 2.0, 100.8799, 0.1, 0, 2, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.57588000000001, 1.1, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 3.0, 101.87588000000001, -1.0, 0, 2, 3.0,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True, 101.41618000000001,
0.10000000000000009, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 3, 2, 2, 101.41618000000001, 0.10000000000000009, 4.0, 101.81618000000002,
-0.1, 0, 2, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True,
101.70822000000001, 0.0, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 4, 2, 2, 101.70822000000001, 0.0, np.nan, 101.70822000000001, 1.0, 0, 2, np.nan, 0.01, 0.1, 0.01,
1e-08, np.inf, 0.0, True, False, True, 101.70822000000001, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.logs.count(),
result
)
def test_trades(self):
record_arrays_close(
portfolio.trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.trades.count(),
result
)
def test_positions(self):
record_arrays_close(
portfolio.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1)
], dtype=position_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
portfolio.drawdowns.values,
np.array([
(0, 0, 0, 4, 4, 0), (1, 1, 0, 4, 4, 0), (2, 2, 2, 3, 4, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(portfolio.close, price_na)
pd.testing.assert_frame_equal(portfolio_grouped.close, price_na)
pd.testing.assert_frame_equal(portfolio_shared.close, price_na)
def test_fill_close(self):
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=False),
price_na
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=False),
price_na.ffill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=True),
price_na.bfill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=True),
price_na.ffill().bfill()
)
def test_share_flow(self):
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.share_flow(),
result
)
def test_shares(self):
pd.testing.assert_frame_equal(
portfolio.shares(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.shares(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.shares(),
result
)
def test_pos_mask(self):
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(),
result
)
def test_pos_coverage(self):
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('pos_coverage')
)
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('pos_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
portfolio.cash_flow(short_cash=False),
pd.DataFrame(
np.array([
[0., -1.0999, -1.1201],
[-0.30402, -0.29998, -0.30402],
[0.19403, 0., 2.8403],
[0., 0.29996, 0.29204],
[-5.2005, -5.0995, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
portfolio.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
portfolio_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
portfolio.cash(short_cash=False),
pd.DataFrame(
np.array([
[100., 98.9001, 98.8799],
[99.69598, 98.60012, 98.57588],
[99.89001, 98.60012, 101.41618],
[99.89001, 98.90008, 101.70822],
[94.68951, 93.80058, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(),
result
)
def test_holding_value(self):
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., np.nan, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., np.nan, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[np.nan, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 0.01001, 0.],
[0., 0.02182537, 0.],
[0., np.nan, 0.],
[0., 0.03887266, 0.],
[0., 0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -0.01021449, 0.01001202],
[0.00200208, -0.02282155, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.0421496, 0.],
[0.05015573, -0.11933092, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.00505305, 0.01001202],
[0.00100052, -0.01120162, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.02052334, 0.],
[0.02503887, -0.05440679, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005003, 0.01001202],
[-0.01006684, 0.02183062],
[np.nan, 0.00294938],
[-0.02037095, 0.],
[-0.02564654, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0., -0.01001, 0.01001202],
[0.00200208, -0.02182537, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.03887266, 0.],
[0.05015573, -0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0050025, 0.01001202],
[0.00100052, -0.01095617, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.01971414, 0.],
[0.02503887, -0.04906757, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00495344, 0.01001202],
[-0.00984861, 0.02183062],
[np.nan, 0.00294938],
[-0.01957348, 0.],
[-0.02323332, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, np.nan, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, np.nan, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[np.nan, np.nan, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[np.nan, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[np.nan, np.nan, 9.33060570e-03],
[0.0, np.nan, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[np.nan, 9.33060570e-03],
[np.nan, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(),
result
)
def test_active_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, np.nan, 0.42740909],
[0., np.nan, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[np.nan, 0.42740909],
[np.nan, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.active_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(),
result
)
def test_market_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(),
result
)
def test_market_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(),
result
)
def test_total_market_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.0005995, -0.001201],
[-0.0066395, 0.0077588],
[-0.0066395, 0.0171618],
[-0.0066395, 0.0170822],
[-0.01372199, 0.0170822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0005995, -0.001201],
[-0.0005201, -0.0061194, 0.0077588],
[-0.00054995, -0.0061194, 0.0171618],
[-0.00054995, -0.0061194, 0.0170822],
[-0.00155245, -0.01218736, 0.0170822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(),
pd.Series(
np.array([-20.82791491, 10.2576347]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-66.19490297745766, -19.873024060759022]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-25.06639947, 12.34506527]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-11.058998255347488, -21.39151322377427, 10.257634695847853]),
index=price_na.columns
).rename('sharpe_ratio')
)
def test_stats(self):
pd.testing.assert_series_equal(
portfolio.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -1.1112299999999966,
-1.1112299999999966, 283.3333333333333, 66.66666666666667,
1.6451238489727062, 1.6451238489727062,
pd.Timedelta('3 days 08:00:00'), pd.Timedelta('3 days 08:00:00'),
1.3333333333333333, 33.333333333333336, -98.38058805880588,
-100.8038553855386, -99.59222172217225,
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 04:00:00'),
0.10827272727272726, 1.2350921335789007, -0.01041305691622876,
-7.373390156195147, 25.695952942372134, 5717.085878360386
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='stats_mean')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364,
-11.057783842772304, -9.75393669809172, -46.721467294341814
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(required_return=0.1, risk_free=0.01),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364,
-188.9975847831419, -15.874008737030774, -46.721467294341814
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(active_returns=True),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 1, 0.0, -54.450495049504966,
-54.450495049504966, -54.450495049504966,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('1 days 00:00:00'),
-0.10999000000000003, np.nan, 0.010431562217554364, np.nan, np.nan, np.nan
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(incl_unrealized=True),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 2, 0.0, -3.9702970297029667,
-54.450495049504966, -29.210396039603964,
pd.Timedelta('1 days 00:00:00'), pd.Timedelta('0 days 12:00:00'),
-0.1552449999999999, -3.43044967406917, 0.010431562217554364,
-11.057783842772304, -9.75393669809172, -46.721467294341814
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='a')
)
pd.testing.assert_series_equal(
portfolio_grouped['first'].stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 200.0, -5.0419100000000014,
-2.5209550000000007, 275.0, 70.0, 2.46248125751388,
2.46248125751388, pd.Timedelta('4 days 00:00:00'),
pd.Timedelta('4 days 00:00:00'), 2, 0.0, -54.450495049504966,
-388.2424242424243, -221.34645964596461,
pd.Timedelta('3 days 00:00:00'), pd.Timedelta('2 days 00:00:00'),
-0.2646459090909091, -1.711191707103453, -0.015271830375806438,
-20.827914910501114, -13.477807138901431, -38.202477209943744
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='first')
)
pd.testing.assert_series_equal(
portfolio['c'].stats(),
portfolio.stats(column='c')
)
pd.testing.assert_series_equal(
portfolio['c'].stats(),
portfolio_grouped.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
portfolio_grouped['second'].stats(),
portfolio_grouped.stats(column='second')
)
def test_returns_stats(self):
pd.testing.assert_series_equal(
portfolio.returns_stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), -0.3514495165378495,
283.3333333333333, 16.311169116434524, 6.502016923413218,
-7.373390156195147, 5717.085878360386, -0.8844312951385028,
4.768700318817701, 25.695952942372134, 0.3292440354159287,
-1.5661463405418332, 3.21984512467388, 7.438903089386976,
-0.005028770371222715, -0.3453376142959778, 0.0014301079570843596
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Total Return [%]', 'Benchmark Return [%]',
'Annual Return [%]', 'Annual Volatility [%]', 'Sharpe Ratio',
'Calmar Ratio', 'Max. Drawdown [%]', 'Omega Ratio', 'Sortino Ratio',
'Skew', 'Kurtosis', 'Tail Ratio', 'Common Sense Ratio', 'Value at Risk',
'Alpha', 'Beta'
], dtype='object'),
name='stats_mean')
)
pd.testing.assert_series_equal(
portfolio['a'].returns_stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), | pd.Timestamp('2020-01-05 00:00:00') | pandas.Timestamp |
"""xgboost"""
from __future__ import annotations
import logging
import pathlib
import typing
import numpy as np
import pandas as pd
import sklearn.metrics
import pytoolkit as tk
from .core import Model
logger = logging.getLogger(__name__)
class XGBModel(Model):
"""XGBoostのモデル。
Args:
params: XGBoostのパラメータ
nfold: cvの分割数
models_dir: 保存先ディレクトリ
early_stopping_rounds: xgboost.cvのパラメータ
num_boost_round: xgboost.cvのパラメータ
verbose_eval: xgboost.cvのパラメータ
callbacks: xgboost.cvのパラメータ
cv_params: xgboost.cvのパラメータ (kwargs)
"""
def __init__(
self,
params: dict[str, typing.Any],
nfold: int,
models_dir: tk.typing.PathLike,
early_stopping_rounds: int = 200,
num_boost_round: int = 9999,
verbose_eval: int = 100,
callbacks: list[typing.Callable[[typing.Any], None]] = None,
cv_params: dict[str, typing.Any] = None,
preprocessors: tk.pipeline.EstimatorListType = None,
postprocessors: tk.pipeline.EstimatorListType = None,
):
import xgboost
super().__init__(nfold, models_dir, preprocessors, postprocessors)
self.params = params
self.early_stopping_rounds = early_stopping_rounds
self.num_boost_round = num_boost_round
self.verbose_eval = verbose_eval
self.callbacks = callbacks
self.cv_params = cv_params
self.gbms_: list[xgboost.Booster] | None = None
self.best_ntree_limit_: int | None = None
def _save(self, models_dir: pathlib.Path):
assert self.gbms_ is not None
tk.utils.dump(self.gbms_, models_dir / "model.pkl")
tk.utils.dump(self.best_ntree_limit_, models_dir / "best_ntree_limit.pkl")
# ついでにfeature_importanceも。
df_importance = self.feature_importance()
df_importance.to_excel(str(models_dir / "feature_importance.xlsx"))
def _load(self, models_dir: pathlib.Path):
self.gbms_ = tk.utils.load(models_dir / "model.pkl")
self.best_ntree_limit_ = tk.utils.load(models_dir / "best_ntree_limit.pkl")
assert self.gbms_ is not None
assert len(self.gbms_) == self.nfold
def _cv(self, dataset: tk.data.Dataset, folds: tk.validation.FoldsType) -> None:
import xgboost
assert isinstance(dataset.data, pd.DataFrame)
train_set = xgboost.DMatrix(
data=dataset.data,
label=dataset.labels,
weight=dataset.weights,
feature_names=dataset.data.columns.values,
)
self.gbms_ = []
def model_extractor(env):
assert self.gbms_ is not None
self.gbms_.clear()
self.gbms_.extend([f.bst for f in env.cvfolds])
eval_hist = xgboost.cv(
self.params,
dtrain=train_set,
folds=folds,
callbacks=(self.callbacks or []) + [model_extractor],
num_boost_round=self.num_boost_round,
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=self.verbose_eval,
**(self.cv_params or {}),
)
scores = {}
for k, v in eval_hist.items():
if k.endswith("-mean"):
name, score = k[:-5], v.values[-1]
scores[name] = score
logger.info(f"cv {name}: {score:,.3f}")
self.best_ntree_limit_ = len(v)
def _predict(self, dataset: tk.data.Dataset, fold: int) -> np.ndarray:
import xgboost
assert self.gbms_ is not None
assert self.best_ntree_limit_ is not None
assert isinstance(dataset.data, pd.DataFrame)
data = xgboost.DMatrix(
data=dataset.data, feature_names=dataset.data.columns.values
)
return self.gbms_[fold].predict(data, ntree_limit=self.best_ntree_limit_)
def feature_importance(self, importance_type: str = "total_gain"):
"""Feature ImportanceをDataFrameで返す。"""
assert self.gbms_ is not None
columns = self.gbms_[0].feature_names
for gbm in self.gbms_:
assert tuple(columns) == tuple(gbm.feature_names)
fi = np.zeros((len(columns),), dtype=np.float32)
for gbm in self.gbms_:
d = gbm.get_score(importance_type=importance_type)
fi += [d.get(c, 0) for c in columns]
return | pd.DataFrame(data={"importance": fi}, index=columns) | pandas.DataFrame |
import json
import logging
import warnings
import os
import pathlib
import platform
import tempfile
import shutil
import pandas as pd
from cellpy.exceptions import UnderDefined
from cellpy.parameters import prms
from cellpy.readers import dbreader
from cellpy.parameters.internal_settings import (
get_headers_journal,
keys_journal_session,
)
from cellpy.parameters.legacy.internal_settings import (
headers_journal_v0 as hdr_journal_old,
)
from cellpy.utils.batch_tools.batch_core import BaseJournal
from cellpy.utils.batch_tools.engines import simple_db_engine
hdr_journal = get_headers_journal()
trans_dict = {}
missing_keys = []
for key in hdr_journal:
if key in hdr_journal_old:
trans_dict[hdr_journal_old[key]] = hdr_journal[key]
else:
missing_keys.append(key)
class LabJournal(BaseJournal):
def __init__(self, db_reader="default"):
super().__init__()
if db_reader == "default":
self.db_reader = dbreader.Reader()
else:
logging.debug(f"Remark! db_reader: {db_reader}")
self.db_reader = db_reader
self.batch_col = "b01"
def _check_file_name(self, file_name, to_project_folder=False):
if file_name is None:
if not self.file_name:
self.generate_file_name()
file_name = pathlib.Path(self.file_name)
else:
file_name = pathlib.Path(file_name)
if to_project_folder:
file_name = file_name.with_suffix(".json")
file_name = pathlib.Path(self.project_dir) / file_name
self.file_name = file_name # updates object (maybe not smart)
return file_name
def from_db(self, project=None, name=None, batch_col=None, **kwargs):
logging.debug("creating journal from db")
if batch_col is None:
batch_col = self.batch_col
if project is not None:
self.project = project
if name is None:
name = self.name
else:
self.name = name
logging.debug(f"batch_name, batch_col: {name}, {batch_col}")
if self.db_reader is not None:
srnos = self.db_reader.select_batch(name, batch_col)
self.pages = simple_db_engine(self.db_reader, srnos, **kwargs)
if self.pages.empty:
logging.critical(
f"EMPTY JOURNAL: are you sure you have provided correct input to batch?"
)
logging.critical(f"name: {name}")
logging.critical(f"project: {self.project}")
logging.critical(f"batch_col: {batch_col}")
else:
logging.debug("creating empty journal pages")
self.pages = pd.DataFrame()
self.generate_empty_session()
self.generate_folder_names()
self.paginate()
def generate_empty_session(self):
self.session = {}
for item in keys_journal_session:
self.session[item] = None
@staticmethod
def _fix_cellpy_paths(p):
if platform.system() != "Windows":
if p.find("\\") >= 0:
# convert from win to posix
p = pathlib.PureWindowsPath(p)
else:
if p.find("/") >= 0:
# convert from posix to win
p = pathlib.PurePosixPath(p)
return pathlib.Path(p)
@classmethod
def read_journal_jason_file(cls, file_name):
logging.debug(f"json loader starting on {file_name}")
with open(file_name, "r") as infile:
top_level_dict = json.load(infile)
pages_dict = top_level_dict["info_df"]
meta = top_level_dict["metadata"]
session = top_level_dict.get("session", None)
pages = pd.DataFrame(pages_dict)
if pages.empty:
logging.critical("could not find any pages in the journal")
raise UnderDefined
pages = cls._clean_pages(pages)
if session is None:
logging.debug(f"no session - generating empty one")
session = dict()
session, pages = cls._clean_session(session, pages)
return pages, meta, session
@classmethod
def read_journal_excel_file(cls, file_name, **kwargs):
sheet_names = {"meta": "meta", "pages": "pages", "session": "session"}
project = kwargs.pop("project", "NaN")
name = kwargs.pop("batch", pathlib.Path(file_name).stem)
_meta = {
"name": name,
"project": project,
"project_dir": pathlib.Path("."),
"batch_dir": pathlib.Path("."),
"raw_dir": pathlib.Path("."),
}
logging.debug(f"xlsx loader starting on {file_name}")
meta_sheet_name = sheet_names["meta"] # not tested yet
pages_sheet_name = sheet_names["pages"]
session_sheet_name = sheet_names["session"] # not tested yet
temporary_directory = tempfile.mkdtemp()
temporary_file_name = shutil.copy(file_name, temporary_directory)
try:
pages = pd.read_excel(
temporary_file_name, engine="openpyxl", sheet_name=pages_sheet_name
)
except KeyError:
print(f"Worksheet '{pages_sheet_name}' does not exist.")
return None
try:
session = pd.read_excel(
temporary_file_name,
sheet_name=session_sheet_name,
engine="openpyxl",
header=[0, 1],
)
except (KeyError, ValueError):
print(f"Worksheet '{session_sheet_name}' does not exist.")
session = None
try:
meta = pd.read_excel(
temporary_file_name, sheet_name=meta_sheet_name, engine="openpyxl"
)
except (KeyError, ValueError):
print(f"Worksheet '{meta_sheet_name}' does not exist.")
meta = None
if pages.empty:
logging.critical("could not find any pages in the journal")
raise UnderDefined
pages = cls._clean_pages(pages)
pages = pages.set_index(hdr_journal.filename)
if meta is None:
meta = _meta
else:
meta = cls._unpack_meta(meta)
if session is None:
logging.debug(f"no session - generating empty one")
session = dict()
else:
session = cls._unpack_session(session)
session, pages = cls._clean_session(session, pages)
return pages, meta, session
@classmethod
def _unpack_session(cls, session):
try:
bcn2 = {
l: list(sb["cycle_index"].values)
for l, sb in session["bad_cycles"].groupby("cell_name")
}
except KeyError:
bcn2 = []
try:
bc2 = list(session["bad_cells"]["cell_name"].dropna().values.flatten())
except KeyError:
bc2 = []
try:
s2 = list(session["starred"]["cell_name"].dropna().values.flatten())
except KeyError:
s2 = []
try:
n2 = list(session["notes"]["txt"].dropna().values.flatten())
except KeyError:
n2 = []
session = {"bad_cycles": bcn2, "bad_cells": bc2, "starred": s2, "notes": n2}
return session
@classmethod
def _unpack_meta(cls, meta):
# TODO: add try-except
meta = meta.loc[:, ["parameter", "value"]]
meta = meta.set_index("parameter")
return meta.to_dict()["value"]
@classmethod
def _clean_session(cls, session, pages):
# include steps for cleaning up the session dict here
if not session:
logging.critical("no session found in your journal file")
for item in keys_journal_session:
session[item] = session.get(item, None)
return session, pages
@classmethod
def _clean_pages(cls, pages):
logging.debug("removing empty rows")
pages = pages.dropna(how="all")
logging.debug("checking path-names")
try:
pages[hdr_journal.cellpy_file_name] = pages[
hdr_journal.cellpy_file_name
].apply(cls._fix_cellpy_paths)
except KeyError:
# assumes it is a old type journal file
print(f"The key '{hdr_journal.cellpy_file_name}' is missing!")
print(f"Assumes that this is an old-type journal file.")
try:
pages.rename(columns=trans_dict, inplace=True)
pages[hdr_journal.cellpy_file_name] = pages[
hdr_journal.cellpy_file_name
].apply(cls._fix_cellpy_paths)
logging.warning("old journal file - updating")
except KeyError as e:
print("Error! Could still not parse the pages.")
print(f"Missing key: {hdr_journal.cellpy_file_name}")
pages[hdr_journal.cellpy_file_name] = None
# only keep selected cells if keep column exists
if "keep" in pages.columns:
logging.debug("Journal contains 'keep' - selecting only 'keep' > 0.")
pages = pages.loc[pages.keep > 0, :]
for column_name in missing_keys:
if column_name not in pages.columns:
logging.debug(f"wrong journal format - missing: {column_name}")
pages[column_name] = None
for column_name in hdr_journal:
if column_name not in pages.columns:
pages[column_name] = None
return pages
def from_file(self, file_name=None, paginate=True, **kwargs):
"""Loads a DataFrame with all the needed info about the experiment"""
file_name = self._check_file_name(file_name)
logging.debug(f"reading {file_name}")
if pathlib.Path(file_name).suffix.lower() == ".xlsx":
file_loader = self.read_journal_excel_file
else:
file_loader = self.read_journal_jason_file
try:
out = file_loader(file_name, **kwargs)
if out is None:
raise IOError(f"Error reading {file_name}.")
pages, meta_dict, session = out
except UnderDefined as e:
logging.critical(f"could not load {file_name}")
raise UnderDefined from e
logging.debug(f"got pages and meta_dict")
self.pages = pages
self.session = session
self.file_name = file_name
self._prm_packer(meta_dict)
if paginate:
self.generate_folder_names()
self.paginate()
def from_file_old(self, file_name=None):
"""Loads a DataFrame with all the needed info about the experiment"""
file_name = self._check_file_name(file_name)
with open(file_name, "r") as infile:
top_level_dict = json.load(infile)
pages_dict = top_level_dict["info_df"]
pages = pd.DataFrame(pages_dict)
pages[hdr_journal.cellpy_file_name] = pages[hdr_journal.cellpy_file_name].apply(
self._fix_cellpy_paths
)
self.pages = pages
self.file_name = file_name
self._prm_packer(top_level_dict["metadata"])
self.generate_folder_names()
self.paginate()
def create_empty_pages(self, description=None):
if description is not None:
print(f"Creating from {type(description)} is not implemented yet")
logging.debug("Creating an empty journal")
logging.debug(f"name: {self.name}")
logging.debug(f"project: {self.project}")
col_names = list(hdr_journal.values())
pages = pd.DataFrame(columns=col_names)
pages.set_index(hdr_journal.filename, inplace=True)
return pages
def to_file(self, file_name=None, paginate=True, to_project_folder=True):
"""Saves a DataFrame with all the needed info about the experiment.
Args:
file_name (str or pathlib.Path): journal file name
paginate (bool): make project folders
to_project_folder (bool): save journal file to the folder containing your cellpy projects
Returns:
"""
file_name = self._check_file_name(
file_name, to_project_folder=to_project_folder
)
pages = self.pages
session = self.session
meta = self._prm_packer()
top_level_dict = {
"info_df": pages,
"metadata": meta,
"session": session,
}
is_json = False
is_xlsx = False
if file_name.suffix == ".xlsx":
is_xlsx = True
if file_name.suffix == ".json":
is_json = True
if is_xlsx:
df_session = self._pack_session(session)
df_meta = self._pack_meta(meta)
try:
with pd.ExcelWriter(file_name, mode="w", engine="openpyxl") as writer:
pages.to_excel(writer, sheet_name="pages", engine="openpyxl")
# no index is not supported for multi-index (update to index=False when pandas implements it):
df_session.to_excel(writer, sheet_name="session", engine="openpyxl")
df_meta.to_excel(
writer, sheet_name="meta", engine="openpyxl", index=False
)
except PermissionError as e:
print(f"Could not load journal to xlsx ({e})")
if is_json:
jason_string = json.dumps(
top_level_dict,
default=lambda info_df: json.loads(
info_df.to_json(default_handler=str)
),
)
with open(file_name, "w") as outfile:
outfile.write(jason_string)
self.file_name = file_name
logging.info(f"Saved file to {file_name}")
if paginate:
self.paginate()
@staticmethod
def _pack_session(session):
frames = []
keys = []
try:
l_bad_cycle_numbers = []
for k, v in session["bad_cycles"].items():
l_bad_cycle_numbers.append(pd.DataFrame(data=v, columns=[k]))
df_bad_cycle_numbers = (
pd.concat(l_bad_cycle_numbers, axis=1)
.melt(var_name="cell_name", value_name="cycle_index")
.dropna()
)
frames.append(df_bad_cycle_numbers)
keys.append("bad_cycles")
except KeyError:
logging.debug("missing bad cycle numbers")
df_bad_cells = pd.DataFrame(session["bad_cells"], columns=["cell_name"])
frames.append(df_bad_cells)
keys.append("bad_cells")
df_starred = pd.DataFrame(session["starred"], columns=["cell_name"])
frames.append(df_starred)
keys.append("starred")
df_notes = pd.DataFrame(session["notes"], columns=["txt"])
frames.append(df_notes)
keys.append("notes")
session = pd.concat(frames, axis=1, keys=keys)
return session
@staticmethod
def _pack_meta(meta):
meta = | pd.DataFrame(meta, index=[0]) | pandas.DataFrame |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergänzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergänzen
V3_DPKT_KNOT=pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT'))
V3_DPKT_KNOT_PH=V3_DPKT_KNOT[V3_DPKT_KNOT['ATTRTYPE'].isin(['PH'])]
# Mehrfacheintraege sollte es nicht geben ...
# V3_DPKT_KNOT_PH[V3_DPKT_KNOT_PH.duplicated(subset=['fkOBJTYPE'])]
df=pd.merge(dfSegsNodesNData,V3_DPKT_KNOT_PH,left_on='pk',right_on='fkOBJTYPE',suffixes=('','_DPKT'),how='left')
cols=dfSegsNodesNData.columns.to_list()
cols.remove('pk')
df=df.filter(items=cols+['ATTRTYPE','CLIENT_ID','OPCITEM_ID','NAME'])
dfSegsNodesNDataDpkt=df
#dfSegsNodesNDataDpkt
# ---
colList=dfSegsNodesNDataDpkt.columns.to_list()
dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fDIVNameFromSEGName(x))
### dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.filter(items=['DIVPipelineName']+colList)
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.sort_values(by=['DIVPipelineName','SEGName','NODEsSEGLfdNr']).reset_index(drop=True)
dfSegsNodesNDataDpkt['DruckResIDBase']=dfSegsNodesNDataDpkt['OPCITEM_ID'].apply(lambda x: fGetBaseIDFromResID(x) )
dfSegsNodesNDataDpkt['SEGResIDBase']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fGetSEGBaseIDFromSEGName(x) )
###### --- ODI
ODIFile=os.path.join(VersionsDir,ODI)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'ODI',ODI))
dfODI=Lx.getDfFromODI(ODIFile)
dfSegsNodesNDataDpkt['SEGResIDs']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['SEGResIDsIMDI']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
dfSegsNodesNDataDpkt['DruckResIDs']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['DruckResIDsIMDI']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
# --- lfd. Nr. der Druckmessstelle im Segment ermitteln
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase'].notnull()].copy()
df['NODEsSEGDruckErgLfdNr']=df.groupby('SEGName').cumcount() + 1
df['NODEsSEGDruckErgLfdNr']=df['NODEsSEGDruckErgLfdNr'].astype(int)
cols=dfSegsNodesNDataDpkt.columns.to_list()
cols.append('NODEsSEGDruckErgLfdNr')
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt
,df
,left_index=True
,right_index=True
,how='left'
,suffixes=('','_df')
).filter(items=cols)
dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr']=dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr'].astype(int,errors='ignore')
# LDSPara ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfPara,left_on='SEGNodes',right_index=True,suffixes=('','_LDSPara'),how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
#for SEGNodes in [str(SEGNodes) for SEGNodes in df.index if str(SEGNodes) not in dfSegsNodesNDataDpkt['SEGNodes'].values]:
# logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
###### --- LDSParaPT
LDSParaPTFile=os.path.join(VersionsDir,LDSParaPT)
if os.path.exists(LDSParaPTFile):
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSParaPT',LDSParaPT))
dfDPDTParams=pd.read_csv(LDSParaPTFile,delimiter=';',error_bad_lines=False,warn_bad_lines=True)
dfMehrfach=dfDPDTParams.groupby(by='#ID').filter(lambda x: len(x) > 1)
rows,cols=dfMehrfach.shape
if rows > 0:
logger.warning("{:s}Mehrfachkonfigurationen:".format(logStr))
logger.warning("{:s}".format(dfMehrfach.to_string()))
dfDPDTParams=dfDPDTParams.groupby(by='#ID').first()
# LDSParaPT ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfDPDTParams,left_on='CLIENT_ID',right_on='#ID',how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfOhne=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID']].reset_index(drop=True)
rows,cols=dfOhne.shape
if rows > 0:
logger.debug("{:s}Druckmessstellen ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(dfOhne.to_string()))
dfSegsNodesNDataDpkt['pMinMlc']=dfSegsNodesNDataDpkt.apply(lambda row: row['ZKOR']+row['pMin']*100000/(794.*9.81),axis=1)
g=dfSegsNodesNDataDpkt.groupby(by='SEGName')
df=g.pMinMlc.agg(pMinMlcMinSEG=np.min,pMinMlcMaxSEG=np.max)
# pMinMlcMinSEG, pMinMlcMaxSEG ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,df,left_on='SEGName',right_index=True,how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
# Segmente ohne Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt.groupby(['SEGName']).first()
df=df[pd.isnull(df['pMinMlcMinSEG'])][['DIVPipelineName','SEGNodes']]
rows,cols=df.shape
if rows > 0:
logger.debug("{:s}ganze Segmente ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(df.to_string()))
# Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (~pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID','pMin']].reset_index(drop=True)
logger.debug("{:s}dfSegsNodesNDataDpkt: Mindestdrücke: {!s:s}".format(logStr,df.to_string()))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfSegsNodesNDataDpkt
def fResValidSeriesSTAT_S(x): # STAT_S
if pd.isnull(x)==False:
if x >=0:
return True
else:
return False
else:
return False
def fResValidSeriesSTAT_S601(x): # STAT_S
if pd.isnull(x)==False:
if x==601:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S(x,value=20): # AL_S
if pd.isnull(x)==False:
if x==value:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S10(x):
return fResValidSeriesAL_S(x,value=10)
def fResValidSeriesAL_S4(x):
return fResValidSeriesAL_S(x,value=4)
def fResValidSeriesAL_S3(x):
return fResValidSeriesAL_S(x,value=3)
ResChannelFunctions=[fResValidSeriesSTAT_S,fResValidSeriesAL_S,fResValidSeriesSTAT_S601]
ResChannelResultNames=['Zustaendig','Alarm','Stoerung']
ResChannelTypes=['STAT_S','AL_S','STAT_S']
# (fast) alle verfuegbaren Erg-Kanaele
ResChannelTypesAll=['AL_S','STAT_S','SB_S','MZ_AV','LR_AV','NG_AV','LP_AV','AC_AV','ACCST_AV','ACCTR_AV','ACF_AV','TIMER_AV','AM_AV','DNTD_AV','DNTP_AV','DPDT_AV'
,'DPDT_REF_AV'
,'DPDT_REF' # Workaround
,'QM_AV','ZHKNR_S']
baseColorsSchieber=[ # Schieberfarben
'g' # 1
,'b' # 2
,'m' # 3
,'r' # 4
,'c' # 5
# alle Basisfarben außer y gelb
,'tab:blue' # 6
,'tab:orange' # 7
,'tab:green' # 8
,'tab:red' # 9
,'tab:purple' # 10
,'tab:brown' # 11
,'tab:pink' # 12
,'gold' # 13
,'fuchsia' # 14
,'coral' # 15
]
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
]
# --- Reports LDS: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def getLDSResVecDf(
ResIDBase='ID.' # i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In. / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.
,LDSResBaseType='SEG' # or Druck
,lx=None
,timeStart=None,timeEnd=None
,ResChannelTypes=ResChannelTypesAll
,timeShiftPair=None
):
"""
returns a df: the specified LDSResChannels (AL_S, ...) for an ResIDBase
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
# zu lesende IDs basierend auf ResIDBase bestimmen
ErgIDs=[ResIDBase+ext for ext in ResChannelTypes]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs]
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Daten lesen von TC-H5s
dfFiltered=lx.getTCsFromH5s(timeStart=timeStart,timeEnd=timeEnd,LDSResOnly=True,LDSResColsSpecified=ErgIDsAll,LDSResTypeSpecified=LDSResBaseType,timeShiftPair=timeShiftPair)
# Spalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetResTimes(
ResIDBases=[] # Liste der Wortstaemme der Ergebnisvektoren
,df=pd.DataFrame() # TCsLDSRes...
,ResChannelTypes=ResChannelTypes # ['STAT_S','AL_S','STAT_S'] # Liste der Ergebnisvektoren Postfixe
,ResChannelFunctions=ResChannelFunctions # [fResValidSeriesSTAT_S,ResValidSeriesAL_S,fResValidSeriesSTAT_S601] # Liste der Ergebnisvektoren Funktionen
,ResChannelResultNames=ResChannelResultNames # ['Zustaendig','Alarm','Stoerung'] # Liste der key-Namen der Ergebnisse
,tdAllowed=pd.Timedelta('1 second') # erlaubte Zeitspanne zwischen geht und kommt (die beiden an diese Zeitspanne angrenzenden Zeitbereiche werden als 1 Zeit gewertet)
):
"""
Return: dct
key: ResIDBase
value: dct:
key: ResChannelResultName
Value: Liste mit Zeitpaaren (oder leere Liste)
"""
resTimesDct={}
for ResIDBase in ResIDBases:
tPairsDct={}
for idx,ext in enumerate(ResChannelTypes):
ID=ResIDBase+ext
if ext == 'AL_S':
debugOutput=True
else:
debugOutput=False
if ID in df:
#print("{:s} in Ergliste".format(ID))
tPairs=findAllTimeIntervallsSeries(
s=df[ID].dropna() #!
,fct=ResChannelFunctions[idx]
,tdAllowed=tdAllowed#pd.Timedelta('1 second')
,debugOutput=debugOutput
)
else:
#print("{:s} nicht in Ergliste".format(ID))
tPairs=[]
tPairsDct[ResChannelResultNames[idx]]=tPairs
resTimesDct[ResIDBase]=tPairsDct
return resTimesDct
def getAlarmStatistikData(
h5File='a.h5'
,dfSegsNodesNDataDpkt=pd.DataFrame()
,timeShiftPair=None # z.B. (1,'H') bei Replay
):
"""
Returns TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
TCsLDSRes1=pd.DataFrame()
TCsLDSRes2=pd.DataFrame()
try:
# "connect" to the App Logs
lx=Lx.AppLog(h5File=h5File)
if hasattr(lx, 'h5FileLDSRes'):
logger.error("{0:s}{1:s}".format(logStr,'In den TCs nur Res und nicht Res1 und Res2?!'))
raise RmError
# zu lesende Daten ermitteln
l=dfSegsNodesNDataDpkt['DruckResIDBase'].unique()
l = l[~pd.isnull(l)]
DruckErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
#
l=dfSegsNodesNDataDpkt['SEGResIDBase'].unique()
l = l[~pd.isnull(l)]
SEGErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
ErgIDs=[*DruckErgIDs,*SEGErgIDs]
# Daten lesen
TCsLDSRes1,TCsLDSRes2=lx.getTCsFromH5s(LDSResOnly=True,LDSResColsSpecified=ErgIDs,timeShiftPair=timeShiftPair)
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
dfCVDataOnly=lx.getCVDFromH5(timeDelta=timeDelta,returnDfCVDataOnly=True)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
def processAlarmStatistikData(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,tdAllowed=None # pd.Timedelta('1 second')
# Alarm geht ... Alarm kommt (wieder): wenn Zeitspanne ... <= tdAllowed, dann wird dies _gewertet als dieselbe Alarmzeitspanne
# d.h. es handelt sich _gewertet inhaltlich um denselben Alarm
# None zählt die Alarme strikt getrennt
):
"""
Returns: SEGResDct,DruckResDct
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.<KEY>.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: <KEY>S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Zeiten SEGErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['SEGResIDBase'].unique() if not pd.isnull(baseID)]
SEGResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes1,tdAllowed=tdAllowed)
logger.debug("{:s}SEGResDct: {!s:s}".format(logStr,SEGResDct))
# Zeiten DruckErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['DruckResIDBase'].unique() if not pd.isnull(baseID)]
DruckResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes2,tdAllowed=tdAllowed)
logger.debug("{:s}DruckResDct: {!s:s}".format(logStr,DruckResDct))
# verschiedene Auspraegungen pro Alarmzeit ermitteln
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
for keyStr, colExt in zip(['AL_S_SB_S','<KEY>'],['SB_S','ZHKNR_S']):
lGes=[]
if tPairs != []:
for tPair in tPairs:
col=ID+colExt
lSingle=ResSrc.loc[tPair[0]:tPair[1],col]
lSingle=[int(x) for x in lSingle if pd.isnull(x)==False]
lSingle=[lSingle[0]]+[lSingle[i] for i in range(1,len(lSingle)) if lSingle[i]!=lSingle[i-1]]
lGes.append(lSingle)
IDDct[keyStr]=lGes
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def addNrToAlarmStatistikData(
SEGResDct={}
,DruckResDct={}
,dfAlarmEreignisse=pd.DataFrame()
):
"""
Returns: SEGResDct,DruckResDct added with key AL_S_NR
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_SPV.In.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: AL_S_ZHKNR_S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
# ergänzt:
key: AL_S_NR: value: Liste mit der Nr. (aus dfAlarmEreignisse) pro Alarm (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#
for ResDct, LDSResBaseType in zip([SEGResDct, DruckResDct],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
lNr=[]
if tPairs != []:
ZHKNRnListen=IDDct['AL_S_ZHKNR_S']
for idxAlarm,tPair in enumerate(tPairs):
ZHKNRnListe=ZHKNRnListen[idxAlarm]
ZHKNR=ZHKNRnListe[0]
ae=AlarmEvent(tPair[0],tPair[1],ZHKNR,LDSResBaseType)
Nr=dfAlarmEreignisse[dfAlarmEreignisse['AlarmEvent']==ae]['Nr'].iloc[0]
lNr.append(Nr)
IDDct['AL_S_NR']=lNr
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def processAlarmStatistikData2(
DruckResDct=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
):
"""
Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
Returns: SEGDruckResDct
ResDct:
key: baseID
value: dct
sortiert und direkt angrenzende oder gar ueberlappende Zeiten aus Druckergebnissen zusammenfasst
key: Zustaendig: value: Zeitbereiche, in denen ein Druckergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen ein Druckergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen ein Druckergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
voneiander verschiedene Ausprägungen (sortiert) aus Druckergebnissen
key: AL_S_SB_S: Liste
key: AL_S_ZHKNR_S: Liste
key: AL_S_NR: Liste
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
SEGDruckResDct={}
# merken, ob eine ID bereits bei einem SEG gezählt wurde; die Alarme einer ID sollen nur bei einem SEG gezaehlt werden
IDBereitsGezaehlt={}
# über alle DruckErgs
for idx,(ID,tPairsDct) in enumerate(DruckResDct.items()):
# SEG ermitteln
# ein DruckErg kann zu mehreren SEGs gehoeren z.B. gehoert ein Verzweigungsknoten i.d.R. zu 3 versch. SEGs
tupleLst=getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,ID)
for idxTuple,(DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) in enumerate(tupleLst):
# wenn internes SEG
if SEGOnlyInLDSPara:
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt da dieses SEG intern.".format(logStr,ID,SEGName))
continue
# ID wurde bereits gezählt
if ID in IDBereitsGezaehlt.keys():
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern wurde bereits bei SEGName {:s} gezaehlt.".format(logStr,ID,SEGName,IDBereitsGezaehlt[ID]))
continue
else:
# ID wurde noch nicht gezaehlt
IDBereitsGezaehlt[ID]=SEGName
#if idxTuple>0:
# logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern nur bei SEGName {:s}.".format(logStr,ID,SEGName,tupleLst[0][1]))
# continue
if len(tPairsDct['Alarm'])>0:
logger.debug("{:s}SEGName {:20s}: durch ID {:40s} mit Alarm. Nr des Verweises von ID auf ein Segment: {:d}".format(logStr,SEGName,ID, idxTuple+1))
if SEGResIDBase not in SEGDruckResDct.keys():
# auf dieses SEG wurde noch nie verwiesen
SEGDruckResDct[SEGResIDBase]=deepcopy(tPairsDct) # das Segment erhält die Ergebnisse des ersten Druckvektors der zum Segment gehört
else:
# ergaenzen
# Zeitlisten ergänzen
for idx2,ext in enumerate(ResChannelTypes):
tPairs=tPairsDct[ResChannelResultNames[idx2]]
for idx3,tPair in enumerate(tPairs):
if True: #tPair not in SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]]: # keine identischen Zeiten mehrfach zaehlen
# die Ueberlappung von Zeiten wird weiter unten behandelt
SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]].append(tPair)
# weitere Listen ergaenzen
for ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']:
SEGDruckResDct[SEGResIDBase][ext]=SEGDruckResDct[SEGResIDBase][ext]+tPairsDct[ext]
# Ergebnis: sortieren und dann direkt angrenzende oder gar ueberlappende Zeiten zusammenfassen
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']: # keine Zeiten
pass
else:
tPairs=tPairsDct[ResChannelResultNames[idx2]]
tPairs=sorted(tPairs,key=lambda tup: tup[0])
tPairs=fCombineSubsequenttPairs(tPairs)
SEGDruckResDct[ID][ResChannelResultNames[idx2]]=tPairs
# voneiander verschiedene Ausprägungen (sortiert)
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
v=tPairsDct[ext]
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S']: # Liste von Listen
l=[*{*chain.from_iterable(v)}]
l=sorted(pd.unique(l))
SEGDruckResDct[ID][ext]=l
elif ext in ['AL_S_NR']: # Liste
l=sorted(pd.unique(v))
SEGDruckResDct[ID][ext]=l
else:
pass
logger.debug("{:s}SEGDruckResDct: {!s:s}".format(logStr,SEGDruckResDct))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGDruckResDct
def buildAlarmDataframes(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,SEGResDct={}
,DruckResDct={}
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR']
,NrAsc=[False]+4*[True]
):
"""
Returns dfAlarmStatistik,dfAlarmEreignisse,SEGDruckResDct
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmStatistik=pd.DataFrame()
dfAlarmEreignisse=pd.DataFrame()
try:
# Ereignisse
dfAlarmEreignisse=buildDfAlarmEreignisse(
SEGResDct=SEGResDct
,DruckResDct=DruckResDct
,TCsLDSRes1=TCsLDSRes1
,TCsLDSRes2=TCsLDSRes2
,dfCVDataOnly=dfCVDataOnly
,dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt
,replaceTup=replaceTup
,NrBy=NrBy
,NrAsc=NrAsc
)
# in dfAlarmEreignisse erzeugte Alarm-Nr. an Dct merken
SEGResDct,DruckResDct=addNrToAlarmStatistikData(
SEGResDct
,DruckResDct
,dfAlarmEreignisse
)
# BZKat der Alarme
def fGetAlarmKat(row):
"""
"""
# baseID des Alarms
baseID=row['OrteIDs'][0]
# dct des Alarms
if row['LDSResBaseType']=='SEG':
dct=SEGResDct[baseID]
else:
dct=DruckResDct[baseID]
# Nrn der baseID
Nrn=dct['AL_S_NR']
# idx dieses Alarms innerhalb der Alarme der baseID
idxAl=Nrn.index(row['Nr'])
# Zustaende dieses alarms
SB_S=dct['AL_S_SB_S'][idxAl]
kat=''
if 3 in SB_S:
kat='instationär'
else:
if 2 in SB_S:
kat = 'schw. instationär'
else:
if 1 in SB_S:
kat = 'stat. Fluss'
elif 4 in SB_S:
kat = 'stat. Ruhe'
return kat
dfAlarmEreignisse['BZKat']=dfAlarmEreignisse.apply(lambda row: fGetAlarmKat(row),axis=1)
# Segment-verdichtete Druckergebnisse
SEGDruckResDct=processAlarmStatistikData2(
DruckResDct
,TCsLDSRes2
,dfSegsNodesNDataDpkt
)
# Alarmstatistik bilden
dfAlarmStatistik=dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]
dfAlarmStatistik=dfAlarmStatistik[['DIVPipelineName','SEGName','SEGNodes','SEGResIDBase']].drop_duplicates(keep='first').reset_index(drop=True)
dfAlarmStatistik['Nr']=dfAlarmStatistik.apply(lambda row: "{:2d}".format(int(row.name)),axis=1)
# SEG
dfAlarmStatistik['FörderZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Zustaendig'])
dfAlarmStatistik['FörderZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Alarm'])
dfAlarmStatistik['FörderZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Stoerung'])
dfAlarmStatistik['FörderZeitenAlAnz']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['FörderZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_SB_S'])
dfAlarmStatistik['FörderZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_NR'])
# Druck (SEG-verdichtet)
dfAlarmStatistik['RuheZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Zustaendig'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Alarm'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Stoerung'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_SB_S'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_NR'] if x in SEGDruckResDct.keys() else [])
#dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAlNrn'].apply(lambda x: len(x))
# je 3 Zeiten bearbeitet
dfAlarmStatistik['FörderZeit']=dfAlarmStatistik['FörderZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeit']=dfAlarmStatistik['RuheZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitAl']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitAl']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitSt']=dfAlarmStatistik['FörderZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitSt']=dfAlarmStatistik['RuheZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse, dfAlarmStatistik,SEGDruckResDct
def plotDfAlarmStatistik(
dfAlarmStatistik=pd.DataFrame()
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=dfAlarmStatistik[[
'Nr'
,'DIVPipelineName'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'FörderZeitSt'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
,'RuheZeitSt'
]].copy()
# diese Zeiten um (Störzeiten) annotieren
df['FörderZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['FörderZeit'],row['FörderZeitSt']) if row['FörderZeitSt'] > 0. else row['FörderZeit'] ,axis=1)
df['RuheZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['RuheZeit'],row['RuheZeitSt']) if row['RuheZeitSt'] > 0. else row['RuheZeit'],axis=1)
# LfdNr. annotieren
df['LfdNr']=df.apply(lambda row: "{:2d} - {:s}".format(int(row.Nr)+1,str(row.DIVPipelineName)),axis=1)
# Zeiten Alarm um Alarm-Nrn annotieren
def fAddZeitMitNrn(zeit,lAlNr):
if len(lAlNr) > 0:
if len(lAlNr) <= 3:
return "{!s:s} (Nrn.: {!s:s})".format(zeit,lAlNr)
else:
# mehr als 3 Alarme...
return "{!s:s} (Nrn.: {!s:s}, ...)".format(zeit,lAlNr[0])
else:
return zeit
df['FörderZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['FörderZeitAl'],row['FörderZeitenAlNrn']),axis=1)
df['RuheZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['RuheZeitAl'],row['RuheZeitenAlNrn']),axis=1)
df=df[[
'LfdNr'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
]]
try:
t=plt.table(cellText=df.values, colLabels=df.columns, loc='center')
cols=df.columns.to_list()
colIdxLfdNr=cols.index('LfdNr')
colIdxFoerderZeit=cols.index('FörderZeit')
colIdxFoerderZeitenAlAnz=cols.index('FörderZeitenAlAnz')
colIdxFoerderZeitAl=cols.index('FörderZeitAl')
colIdxRuheZeit=cols.index('RuheZeit')
colIdxRuheZeitenAlAnz=cols.index('RuheZeitenAlAnz')
colIdxRuheZeitAl=cols.index('RuheZeitAl')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
if row == 0:
if col in [colIdxRuheZeit,colIdxRuheZeitenAlAnz,colIdxRuheZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='plum')
elif col in [colIdxFoerderZeit,colIdxFoerderZeitenAlAnz,colIdxFoerderZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='lightsteelblue')
if col == colIdxLfdNr:
if row==0:
continue
if 'color' in dfAlarmStatistik.columns.to_list():
color=dfAlarmStatistik['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
if col == colIdxFoerderZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'FörderZeitSt']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxFoerderZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Förderzeit
if df.loc[row-1,'FörderZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # palegoldenrod
#if df.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
if col == colIdxRuheZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'RuheZeitSt']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxRuheZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Ruhezeit
if df.loc[row-1,'RuheZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
pass
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # # palegoldenrod
#if df.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def fOrteStripped(LDSResBaseType,OrteIDs):
"""
returns Orte stripped
"""
if LDSResBaseType == 'SEG': # 'Objects.3S_FBG_SEG_INFO.3S_L_6_MHV_02_FUD.In.']
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_'+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
elif LDSResBaseType == 'Druck': # Objects.3S_FBG_DRUCK.3S_6_BNV_01_PTI_01.In
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
else:
return None
def fCVDTime(row,dfSEG,dfDruck,replaceTup=('2021-','')):
"""
in:
dfSEG/dfDruck: TCsLDSRes1/TCsLDSRes2
row: Zeile aus dfAlarmEreignisse
von row verwendet:
LDSResBaseType: SEG (dfSEG) oder nicht (dfDruck)
OrteIDs: ==> ID von ZHKNR_S in dfSEG/dfDruck
ZHKNR: ZHKNR
returns:
string: xZeitA - ZeitEx
ZeitA: erste Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
ZeitE: letzte Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
xZeitA, wenn ZeitA die erste Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
xZeitE, wenn ZeitE die letzte Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
in Zeit wurde replaceTup angewendet
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
Time=""
ID=row['OrteIDs'][0]+'ZHKNR_S'
ZHKNR=row['ZHKNR']
if row['LDSResBaseType']=='SEG':
df=dfSEG
else:
df=dfDruck
s=df[df[ID]==ZHKNR][ID] # eine Spalte; Zeilen in denen ZHKNR_S den Wert von ZHKNR trägt
tA=s.index[0] # 1. Zeit
tE=s.index[-1] # letzte Zeit
Time=" {!s:s} - {!s:s} ".format(tA,tE)
try:
if tA==df[ID].dropna().index[0]:
Time='x'+Time.lstrip()
except:
logger.debug("{0:s}Time: {1:s}: x-tA Annotation Fehler; keine Annotation".format(logStr,Time))
try:
if tE==df[ID].dropna().index[-1]:
Time=Time.rstrip()+'x'
except:
logger.debug("{0:s}Time: {1:s}: x-tE Annotation Fehler; keine Annotation".format(logStr,Time))
Time=Time.replace(replaceTup[0],replaceTup[1])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return Time
def buildDfAlarmEreignisse(
SEGResDct={}
,DruckResDct={}
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR'] # Sortierspalten für die Nr. der Ereignisse
,NrAsc=[False]+4*[True] # aufsteigend j/n für die o.g. Sortierspalten
):
"""
Returns dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
###BZKat: Betriebszustandskategorie des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmEreignisse=pd.DataFrame()
try:
AlarmEvents=[] # Liste von AlarmEvent
AlarmEventsOrte={} # dct der Orte, die diesen (key) AlarmEvent melden
AlarmEventsZHKNRn={} # dct der ZHKNRn, die zu diesem (key) gehoeren
# über SEG- und Druck-Ergebnisvektoren
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for ResIDBase,dct in ResDct.items():
AL_S=dct['Alarm']
if len(AL_S) > 0:
# eine Erg-ID weist Alarme [(tA,tE),...] auf
# korrespondiernede Liste der ZHKs: [(999,1111),...]
ZHKNRnListen=dct['AL_S_ZHKNR_S']
ID=ResIDBase+'ZHKNR_S' # fuer nachfolgende Ausgabe
# ueber alle Alarme der Erg-ID
for idx,AL_S_Timepair in enumerate(AL_S):
(t1,t2)=AL_S_Timepair # tA, tE
ZHKNR_S_Lst=ZHKNRnListen[idx] # Liste der ZHKs in dieser Zeit
if len(ZHKNR_S_Lst) != 1:
logger.warning(("{:s}ID:\n\t {:s}: Alarm {:d} der ID\n\t Zeit von {!s:s} bis {!s:s}:\n\t Anzahl verschiedener ZHKNRn !=1: {:d} {:s}:\n\t ZHKNR eines Alarms wechselt waehrend eines Alarms. Alarm wird identifiziert mit 1. ZHKNR.".format(logStr,ID
,idx
,t1
,t2
,len(ZHKNR_S_Lst)
,str(ZHKNR_S_Lst)
)))
# die erste wird verwendet
ZHKNR=int(ZHKNR_S_Lst[0])
# AlarmEvent erzeugen
alarmEvent=AlarmEvent(t1,t2,ZHKNR,LDSResBaseType)
if alarmEvent not in AlarmEvents:
# diesen Alarm gibt es noch nicht in der Ereignisliste ...
AlarmEvents.append(alarmEvent)
AlarmEventsOrte[alarmEvent]=[]
AlarmEventsZHKNRn[alarmEvent]=[]
else:
pass
# Ort ergaenzen (derselbe Alarm wird erst ab V83.5.3 nur an einem Ort - dem lexikalisch kleinsten des Bilanzraumes - ausgegeben; zuvor konnte derselbe Alarm an mehreren Orten auftreten)
AlarmEventsOrte[alarmEvent].append(ResIDBase)
# ZHKNR(n) ergaenzen (ein Alarm wird unter 1 ZHKNR geführt)
AlarmEventsZHKNRn[alarmEvent].append(ZHKNR_S_Lst)
# df erzeugen
dfAlarmEreignisse=pd.DataFrame.from_records(
[alarmEvent for alarmEvent in AlarmEvents],
columns=AlarmEvent._fields
)
# Liste der EventOrte erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
l.append(AlarmEventsOrte[alarmEvent])
dfAlarmEreignisse['OrteIDs']=l
# abgekuerzte Orte
dfAlarmEreignisse['Orte']=dfAlarmEreignisse.apply(lambda row: fOrteStripped(row.LDSResBaseType,row.OrteIDs),axis=1)
dfAlarmEreignisse['Ort']=dfAlarmEreignisse['Orte'].apply(lambda x: x[0])
# Liste der ZHKNRn erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
lOfZl=AlarmEventsZHKNRn[alarmEvent]
lOfZ=[*{*chain.from_iterable(lOfZl)}]
lOfZ=sorted(pd.unique(lOfZ))
l.append(lOfZ)
dfAlarmEreignisse['ZHKNRn']=l
# Segmentname eines Ereignisses
dfAlarmEreignisse['SEGName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==row['OrteIDs'][0]]['SEGName'].iloc[0] if row['LDSResBaseType']=='SEG'
else [tuple for tuple in getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,row['OrteIDs'][0]) if not tuple[-1]][0][1],axis=1)
# DIVPipelineName eines Ereignisses
dfAlarmEreignisse['DIVPipelineName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DIVPipelineName'].iloc[0]
,axis=1)
# Alarm: ---
#tA: Anfangszeit
#tE: Endezeit
#ZHKNR: ZHKNR (1. bei mehreren Alarmen)
#LDSResBaseType: SEG oder Druck
# Orte: ---
#OrteIDs: OrteIDs des Alarms
#Orte: Kurzform von OrteIDs des Alarms
#ZHKNRn:
#SEGName: Segmentname
#DIVPipelineName
## Nr.
dfAlarmEreignisse.sort_values(by=NrBy,ascending=NrAsc,inplace=True)
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
#dfAlarmEreignisse['Nr']=dfAlarmEreignisse['Nr']+1
logger.debug("{0:s}{1:s}: {2:s}".format(logStr,'dfAlarmEreignisse',dfAlarmEreignisse.to_string()))
# Voralarm
VoralarmTypen=[]
for index, row in dfAlarmEreignisse.iterrows():
# zur Information bei Ausgaben
OrteIDs=row['OrteIDs']
OrtID=OrteIDs[0]
VoralarmTyp=None
try:
if row['LDSResBaseType']=='SEG':
VoralarmTyp=TCsLDSRes1.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
elif row['LDSResBaseType']=='Druck':
VoralarmTyp=TCsLDSRes2.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
except:
pass
if pd.isnull(VoralarmTyp): # == None: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=-1
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: kein (isnull) Vorlalarm gefunden?! (ggf. neutraler BRWechsel) - Voralarm gesetzt auf: {:d}".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA'],int(VoralarmTyp)))
if int(VoralarmTyp)==0: # == 0: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=0
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: Vorlalarm 0?! (ggf. war Bilanz in Stoerung)".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA']))
if int(VoralarmTyp) not in [-1,0,3,4,10]:
logger.warning("{:s}PV: {:s} Alarm Nr. {:d} {:d} tA {!s:s}: unbekannter Vorlalarm gefunden: {:d}".format(logStr,row['OrteIDs'][0],int(row['Nr']),row['ZHKNR'],row['tA'],int(VoralarmTyp)))
logger.debug("{:s}{:d} {!s:s} VoralarmTyp:{:d}".format(logStr,int(row['Nr']),row['tA'],int(VoralarmTyp)))
VoralarmTypen.append(VoralarmTyp)
dfAlarmEreignisse['Voralarm']=[int(x) for x in VoralarmTypen]
# Type (aus dfCVDataOnly) und Erzeugungszeit (aus dfCVDataOnly) und Name (aus dfCVDataOnly)
dfAlarmEreignisse['ZHKNR']=dfAlarmEreignisse['ZHKNR'].astype('int64')
dfAlarmEreignisse['ZHKNRStr']=dfAlarmEreignisse['ZHKNR'].astype('string')
dfCVDataOnly['ZHKNRStr']=dfCVDataOnly['ZHKNR'].astype('string')
# wg. aelteren App-Log Versionen in denen ZHKNR in dfCVDataOnly nicht ermittelt werden konnte
# Type,ScenTime,Name sind dann undefiniert
dfAlarmEreignisse=pd.merge(dfAlarmEreignisse,dfCVDataOnly,on='ZHKNRStr',suffixes=('','_CVD'),how='left').filter(items=dfAlarmEreignisse.columns.to_list()+['Type'
#,'ScenTime'
,'Name'])
dfAlarmEreignisse=dfAlarmEreignisse.drop(['ZHKNRStr'],axis=1)
dfAlarmEreignisse=dfAlarmEreignisse.fillna(value='')
# lfd. Nummern
dfAlarmEreignisse['NrSD']=dfAlarmEreignisse.groupby(['LDSResBaseType']).cumcount() + 1
dfAlarmEreignisse['NrName']=dfAlarmEreignisse.groupby(['Name']).cumcount() + 1
dfAlarmEreignisse['NrSEGName']=dfAlarmEreignisse.groupby(['SEGName']).cumcount() + 1
# Lebenszeit der ZHKNR
try:
dfAlarmEreignisse['tD_ZHKNR']=dfAlarmEreignisse.apply(lambda row: fCVDTime(row,TCsLDSRes1,TCsLDSRes2,replaceTup),axis=1)
except:
logger.debug("{:s}Spalte tD_ZHKNR (Lebenszeit einer ZHKNR) konnte nicht ermittelt werden. Vmtl. aeltere App-Log Version.".format(logStr))
dfAlarmEreignisse['tD_ZHKNR']='-1'
# Dauer des Alarms
dfAlarmEreignisse['tD']=dfAlarmEreignisse.apply(lambda row: row['tE']-row['tA'],axis=1)
dfAlarmEreignisse['tD']= dfAlarmEreignisse['tD'].apply(lambda x: "{!s:s}".format(x).replace('days','Tage').replace('0 Tage','').replace('Tage','T'))
# AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
dfAlarmEreignisse=dfAlarmEreignisse[['Nr','tA', 'tE','tD','ZHKNR','tD_ZHKNR','ZHKNRn','LDSResBaseType'
,'OrteIDs', 'Orte', 'Ort', 'SEGName','DIVPipelineName'
,'Voralarm', 'Type', 'Name'
,'NrSD', 'NrName', 'NrSEGName'
]]
dfAlarmEreignisse['AlarmEvent']=dfAlarmEreignisse.apply(lambda row: AlarmEvent(row['tA'],row['tE'],row['ZHKNR'],row['LDSResBaseType']),axis=1)
# unklar, warum erforderlich
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fCVDName(Name
):
"""
"""
lName=len(Name)
if len(Name)==0:
Name='ZHKName vmtl. nicht in Log'
lNameMaxH=20
if lName > 2*lNameMaxH:
Name=Name[:lNameMaxH-2]+'....'+Name[lName-lNameMaxH+2:]
Name=Name.replace('°','|')
return Name
def plotDfAlarmEreignisse(
dfAlarmEreignisse=pd.DataFrame()
,sortBy=[]
,replaceTup=('2021-','')
,replaceTuptD=('0 days','')
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=dfAlarmEreignisse[['Nr','LDSResBaseType','Voralarm','Type','NrSD','tA','tE','tD','ZHKNR','Name','Orte','tD_ZHKNR','NrName','NrSEGName','SEGName','BZKat']].copy()
df['tA']=df['tA'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
df['tE']=df['tE'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
###df['Anz']=df['Orte'].apply(lambda x: len(x))
##df['Orte']=df['Orte'].apply(lambda x: str(x).replace('[','').replace(']','').replace("'",""))
df['Orte']=df['Orte'].apply(lambda x: str(x[0]))
df['LDSResBaseType']=df.apply(lambda row: "{:s} {:s} - {:d}".format(row['LDSResBaseType'],row['Type'],row['Voralarm']),axis=1)
df=df[['Nr','LDSResBaseType','NrSD','tA','tE','tD','ZHKNR','Name','NrName','NrSEGName','SEGName','tD_ZHKNR','Orte','BZKat']]
df.rename(columns={'LDSResBaseType':'ResTyp - Voralarm'},inplace=True)
df.rename(columns={'tD_ZHKNR':'ZHKZeit','Name':'ZHKName'},inplace=True)
###df['ZHKName']=df['ZHKName'].apply(lambda x: fCVDName(x))
####df['ZHKName']=df['Orte'].apply(lambda x: x[0])
df['NrSEGName (SEGName)']=df.apply(lambda row: "{!s:2s} ({!s:s})".format(row['NrSEGName'],row['SEGName']),axis=1)
df=df[['Nr','ResTyp - Voralarm','NrSD','tA','tD','ZHKNR'
,'Orte' #'ZHKName'
,'BZKat'
,'NrName','NrSEGName (SEGName)','ZHKZeit']]
df.rename(columns={'Orte':'ID'},inplace=True)
df['tD']=df['tD'].apply(lambda x: str(x).replace(replaceTuptD[0],replaceTuptD[1]))
def fGetZHKNRStr(row,dfOrig):
"""
returns:
ZHKNStr in Abhängigkeit der aktuellen Zeile und dfOrig
"""
s=dfOrig[dfOrig['Nr']==row['Nr']].iloc[0]
if len(s.ZHKNRn)>1:
if len(s.ZHKNRn)==2:
return "{:d} ({!s:s})".format(row['ZHKNR'],s.ZHKNRn[1:])
else:
return "{:d} (+{:d})".format(row['ZHKNR'],len(s.ZHKNRn)-1)
else:
return "{:d}".format(row['ZHKNR'])
df['ZHKNR']=df.apply(lambda row: fGetZHKNRStr(row,dfAlarmEreignisse),axis=1)
if sortBy!=[]:
df=df.sort_values(by=sortBy)
t=plt.table(cellText=df.values, colLabels=df.columns
,colWidths=[.03,.1 # Nr ResTyp-Voralarm
,.04 # NrSD
,.08,.08 # tA tD
,.085 # ZHKNR
,.1125,.07 #.1125 # ID BZKat
,.04 # NrName
,.14 # NrSEGName (SEGName)
,.2125] # ZHKZeit
, cellLoc='left'
, loc='center')
t.auto_set_font_size(False)
t.set_fontsize(10)
cols=df.columns.to_list()
#colIdxOrte=cols.index('Orte')
#colIdxName=cols.index('ZHKName')
colIdxNrSD=cols.index('NrSD')
colIdxNrSEG=cols.index('NrSEGName (SEGName)')
# ResTyp - Voralarm
colIdxResTypVA=cols.index('ResTyp - Voralarm')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
#if col == colIdxName:
# cellObj.set_text_props(ha='left')
if col == colIdxNrSD:
if row > 0:
if dfAlarmEreignisse.loc[row-1,'LDSResBaseType']=='SEG':
cellObj.set_text_props(backgroundcolor='lightsteelblue')
else:
cellObj.set_text_props(backgroundcolor='plum')
elif col == colIdxNrSEG:
if row==0:
continue
if 'color' in dfAlarmEreignisse.columns.to_list():
color=dfAlarmEreignisse['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
elif col == colIdxResTypVA and row > 0:
pass
if dfAlarmEreignisse.loc[row-1,'Voralarm'] in [10]:
cellObj.set_text_props(backgroundcolor='sandybrown')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [4]:
cellObj.set_text_props(backgroundcolor='pink')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [3]:
cellObj.set_text_props(backgroundcolor='lightcoral')
else:
pass
#cellObj.set_text_props(fontsize=16)
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def plotDfAlarmStatistikReportsSEGErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,SEGResDct={}
,timeStart=None,timeEnd=None
,SEGErgsFile='SEGErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H' # Runden (1 Stunde)
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with FörderZeitenAlAnz>0
1 Base Plot and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr)
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
logger.debug("{0:s}timeStart (ohne timeShift): {1:s} timeEnd (ohne timeShift): {2:s}".format(logStr,str(timeStart),str(timeEnd)))
xlimsDct={}
pdf=PdfPages(SEGErgsFile)
(fileNameBase,ext)= os.path.splitext(SEGErgsFile)
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,row['SEGResIDBase'])
if row['FörderZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("{:s}: FörderZeitenAlAnz: 0".format(titleStr))
continue # keine SEGs ohne Alarme drucken
# Erg lesen
ResIDBase=row['SEGResIDBase']
dfSegReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='SEG',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
ID='AL_S'
if ID not in dfSegReprVec.keys():
continue
idxSEGPlotted=idxSEGPlotted+1
xlimsDct[ResIDBase]=[]
logger.debug("{:s}ResIDBase: {:s} dfSegReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfSegReprVec.columns.to_list()))
# Plot Basis ###########################################################
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
,plotLegend=True
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
if row['FörderZeitenAlAnz'] > 0:
if row['FörderZeitenAlAnz'] <= 3:
txtNr=" Nrn.: {!s:s}".format(row['FörderZeitenAlNrn'])
else:
txtNr=" Nrn.: {!s:s} u.w.".format(row['FörderZeitenAlNrn'][0])
txt=txt+txtNr
else:
txtNr=''
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,txtNr.replace('Nrn.: ','Nrn ').replace(',','').replace('[','').replace(']','').replace('u.w.','u w'))
plt.savefig(fileName)
plt.show()
###plt.clf()
plt.close()
# Plot Alarme ###########################################################
dct=SEGResDct[row['SEGResIDBase']]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['FörderZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
# wenn AlarmRand - PlotRand < 3 Minuten: um 3 Minuten erweitern
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
txtNr=" Nr.: {!s:s}".format(AlNr)
txt=txt+txtNr
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
#logger.info("{:s}".format(titleStr))
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
#(fileName,ext)= os.path.splitext(SEGErgsFile)
fileNameAlarm="{:s} {:s}.png".format(fileName.replace('.png','')
,txtNr.replace('Nr.: ','Nr ').replace(',','').replace('[','').replace(']',''))
plt.savefig(fileNameAlarm)
plt.show()
###plt.clf()
plt.close()
###plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotDfAlarmStatistikReportsDruckErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,DruckResDct={}
,timeStart=None,timeEnd=None
,DruckErgsFile='DruckErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H'
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with RuheZeitenAlAnz>0
1 Base Plot for a Druck with an Alarm and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
logger.debug("{0:s}firstTime (ohne TimeShift): {1:s} lastTime (ohne TimeShift): {2:s}".format(logStr,str(firstTime),str(lastTime)))
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr) # https://stackoverflow.com/questions/35339139/where-is-the-documentation-on-pandas-freq-tags
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
logger.debug("{0:s}timeStart abgerundet (ohne TimeShift): {1:s} timeEnd aufgerundet (ohne TimeShift): {2:s} TimeShift: {3:s}".format(logStr
,str(timeStart)
,str(timeEnd)
,str(timeDelta)))
xlimsDct={}
pdf=PdfPages(DruckErgsFile)
(fileNameBase,ext)= os.path.splitext(DruckErgsFile)
# über alle Segmente der Alarmstatistik (die DruckIDs sollen in der Reihenfolge der Alarmstatistik abgearbeitet werden)
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if row['RuheZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("LfdNr {:2d} - {:s}: {:s}: RuheZeitenAlAnz: 0".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']))
continue # keine SEGs ohne Alarme drucken
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
idxSEGPlotted=idxSEGPlotted+1
# DruckIDs eines Segmentes
DruckIDs=sorted([ID for ID in dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DruckResIDBase'].unique() if not pd.isnull(ID)])
for idxDruckID,DruckResIDBase in enumerate(DruckIDs):
dct=DruckResDct[DruckResIDBase]
if len(dct['Alarm'])==0:
# nur DruckIDs mit Alarmen plotten
continue
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
# Erg lesen
ResIDBase=DruckResIDBase
dfDruckReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='Druck',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
logger.debug("{:s}ResIDBase: {:s} dfDruckReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfDruckReprVec.columns.to_list()))
logger.debug("{:s}ID: {:s}: timeStart (mit TimeShift): {:s} timeEnd (mit TimeShift): {:s}".format(logStr
,DruckResIDBase
,str(dfDruckReprVec.index[0])
,str(dfDruckReprVec.index[-1])
))
ID='AL_S'
if ID not in dfDruckReprVec.keys():
continue
xlimsDct[ResIDBase]=[]
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten))
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,fOrteStripped('Druck',[DruckResIDBase])[0]
)
plt.savefig(fileName)
plt.show()
pdf.savefig(fig)
plt.close()
# Plot Alarme ###########################################################
dct=DruckResDct[DruckResIDBase]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['RuheZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d} Nr. {:4d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten)
,AlNr)
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileNameAlarm="{:s} Nr {:d}.png".format(fileName.replace('.png',''),AlNr)
plt.savefig(fileNameAlarm)
plt.show()
plt.close()
#plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotTimespans(
xlims # list of sections
,orientation='landscape' # oben HYD unten LDS; 'portrait': # links HYD rechts LDS
,pad=3.5 # tight_layout() can take keyword arguments of pad, w_pad and h_pad. These control the extra padding around the figure border and between subplots. The pads are specified in fraction of fontsize.
,w_pad=0.5
,h_pad=0.5
# 'portrait' # links HYD rechts LDS
,rectSpalteLinks=[0, 0, 0.5, 1]
,rectSpalteRechts=[0.325, 0, 1, 1]
# 'landscape': # oben HYD unten LDS
,rectZeileOben=[0, .5, 1, 1]
,rectZeileUnten=[0, 0, 1, .5]
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,figTitle='' #!
,figSave=False #!
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,sectionTitlesLDS=None # list of section titles to be used
,sectionTextsLDS=None # list of section texts to be used
,vLinesX=[] # plotted in each HYD section if X-time fits
,hLinesY=[] # plotted in each HYD section
,vAreasX=[] # for each HYD section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXLDS=None # plotted in each LDS section if X-time fits
,vAreasXLDS=None # for each LDS section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
,vLinesXColorLDS=None
,vAreasXColorLDS=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# --- Args Fct. HYD ---:
,TCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,TCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,TCsOPCScenTimeShift=pd.Timedelta('1 hour')
,TCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,TCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,TCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,TCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={}
,pDct={}
,QDctOPC={}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={}
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
# --- Args Fct. LDS ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
,ylimAL=ylimALD
,yticksAL=yticksALD
,ylimR=ylimRD #can be a list #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False # can be a list #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD # can be a list of lists #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
fig=plt.gcf()
if orientation=='landscape':
# oben HYD unten LDS
gsHYD = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.ncols)]
gsLDS = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.ncols)]
else:
# links HYD rechts LDS
gsHYD = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.nrows)]
gsLDS = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.nrows)]
pltLDSpQAndEventsResults=plotTimespansHYD(
axLst=axLstHYD
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitles
,sectionTexts=sectionTexts
,vLinesX=vLinesX
,hLinesY=hLinesY
,vAreasX=vAreasX
,vLinesXColor=vLinesXColor
,vAreasXColor=vAreasXColor
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfTCsLDSIn=TCsLDSIn
,dfTCsOPC=TCsOPC
,dfTCsOPCScenTimeShift=TCsOPCScenTimeShift
,dfTCsSIDEvents=TCsSIDEvents
,dfTCsSIDEventsTimeShift=TCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=TCsSIDEventsInXlimOnly
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
,yGridSteps=yGridSteps
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
)
if orientation=='landscape':
# oben HYD unten LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileOben)
else:
# links HYD rechts LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteLinks)
if sectionTitlesLDS==None:
sectionTitlesLDS=sectionTitles
if sectionTextsLDS==None:
sectionTextsLDS=sectionTexts
if vLinesXLDS==None:
vLinesXLDS=vLinesX
if vAreasXLDS==None:
vAreasXLDS=vAreasX
if vLinesXColorLDS==None:
vLinesXColorLDS=vLinesXColor
if vAreasXColorLDS==None:
vAreasXColorLDS=vAreasXColor
pltLDSErgVecResults=plotTimespansLDS(
axLst=axLstLDS
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitlesLDS
,sectionTexts=sectionTextsLDS
,vLinesX=vLinesXLDS
,vAreasX=vAreasXLDS
,vLinesXColor=vLinesXColorLDS
,vAreasXColor=vAreasXColorLDS
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,ylimR=ylimR
,ylimRxlim=ylimRxlim
,yticksR=yticksR
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
# wenn weniger als 5 Achsen geplottet werden stimmt der erste Wert von rectSpalteRechts nicht
#(axes,lines)=pltLDSErgVecResults[0]
#
# numOfYAxes=len(axes)
#corFac=5-numOfYAxes
#rectSpalteRechtsCor=rectSpalteRechts #[0.325, 0, 1, 1]
#rectSpalteRechtsCor[0]=rectSpalteRechtsCor[0]+0.06*corFac
if orientation=='landscape':
# oben HYD unten LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileUnten)
else:
# links HYD rechts LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteRechts)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return gsHYD,gsLDS,pltLDSpQAndEventsResults,pltLDSErgVecResults
def plotTimespansHYD(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,hLinesY=[] # plotted in each section
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfTCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={ # Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSrc 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None#[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSpQAndEvents selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
# plots pltLDSpQAndEvents-Sections
# returns a Lst of pltLDSpQAndEvents-Results, a Lst of (axes,lines,scatters)
try:
if sectionTitles==[] or sectionTitles==None:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSpQAndEventsResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
(axes,lines,scatters)=pltLDSpQAndEvents(
ax
,dfTCsLDSIn=dfTCsLDSIn
,dfTCsOPC=dfTCsOPC
,dfTCsOPCScenTimeShift=dfTCsOPCScenTimeShift
,dfTCsSIDEvents=dfTCsSIDEvents
,dfTCsSIDEventsTimeShift=dfTCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=dfTCsSIDEventsInXlimOnly
,dfTCsSIDEventsyOffset=dfTCsSIDEventsyOffset
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,xlim=xlim
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
# 3. Achse
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
,yGridSteps=yGridSteps
,plotLegend=plotLegendFct
,baseColorsDef=baseColorsDef
)
pltLDSpQAndEventsResults.append((axes,lines,scatters))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
for hLineY in hLinesY:
ax.axhline(y=hLineY,xmin=0, xmax=1,color='gray',ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly:
legendHorizontalPos='center' # wenn nur 1x Legende dann Mitte
if plotLegend1stOnly and idx>0:
pass
else:
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSpQAndEventsResults
def plotTimespansLDS(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
#,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD
,yTwinedAxesPosDeltaHPStart=-0.0125
,yTwinedAxesPosDeltaHP=-0.0875
,ylimR=ylimRD # can be a list
,ylimRxlim=False # can be a list
,yticksR=yticksRD # can be a list
# dito Beschl.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
# plots pltLDSErgVec-Sections
# returns a Lst of pltLDSErgVec-Results, a Lst of (axes,lines)
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if sectionTitles==[] or sectionTitles ==None:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSErgVecResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
ylimRIdx=ylimR
if isinstance(ylimR, list):
ylimRIdx=ylimR[idx]
ylimRxlimIdx=ylimRxlim
if isinstance(ylimRxlim, list):
ylimRxlimIdx=ylimRxlim[idx]
yticksRIdx=yticksR
if isinstance(yticksR, list):
if any(isinstance(el, list) for el in yticksR):
yticksRIdx=yticksR[idx]
(axes,lines)=pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,xlim=xlims[idx]
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,ylimAL=ylimAL
,yticksAL=yticksAL
,yTwinedAxesPosDeltaHPStart=yTwinedAxesPosDeltaHPStart
,yTwinedAxesPosDeltaHP=yTwinedAxesPosDeltaHP
,ylimR=ylimRIdx
,ylimRxlim=ylimRxlimIdx
,yticksR=yticksRIdx
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,ySpanMin=ySpanMin
,plotLegend=plotLegendFct
,legendLoc=legendLoc
,legendFramealpha=legendFramealpha
,legendFacecolor=legendFacecolor
,attrsDctLDS=attrsDctLDS
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,plotACCLimits=plotACCLimits
,highlightAreas=highlightAreas
,Seg_Highlight_Color=Seg_Highlight_Color
,Seg_Highlight_Alpha=Seg_Highlight_Alpha
,Seg_Highlight_Fct=Seg_Highlight_Fct
,Seg_HighlightError_Color=Seg_HighlightError_Color
,Seg_Highlight_Alpha_Error=Seg_Highlight_Alpha_Error #
,Seg_HighlightError_Fct=Seg_HighlightError_Fct
,Druck_Highlight_Color=Druck_Highlight_Color
,Druck_Highlight_Alpha=Druck_Highlight_Alpha
,Druck_Highlight_Fct=Druck_Highlight_Fct
,Druck_HighlightError_Color=Druck_HighlightError_Color
,Druck_Highlight_Alpha_Error=Druck_Highlight_Alpha_Error #
,Druck_HighlightError_Fct=Druck_HighlightError_Fct
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
pltLDSErgVecResults.append((axes,lines))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly and idx>0:
pass
else:
if not dfSegReprVec.empty:
patternSeg='Seg$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternSeg,line) != None])
,tuple([line for line in lines if re.search(patternSeg,line) != None])
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if not dfDruckReprVec.empty:
patternDruck='Drk$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternDruck,line) != None])
,tuple([line for line in lines if re.search(patternDruck,line) != None])
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSErgVecResults
def pltLDSpQAndEvents(
ax
,dfTCsLDSIn # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorgenannten Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame()
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={# Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSnk 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,bysecond=None #[0,15,30,45]
,byminute=None
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True # plot RTTM-Echoes
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None #wenn undef., dann aus ylimQ
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,ylabel3rd='Schieber (ZUSTände 0,1,2 jew. + x; Befehle)'
,yGridSteps=30 # 0: das y-Gitter besteht dann bei ylimp=ylimQ=yticksp=yticksQ None nur aus min/max (also 1 Gitterabschnitt)
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
"""
zeichnet pq-Zeitkurven - ggf. ergaenzt durch Events
Returns:
* axes (Dct of axes)
* lines (Dct of lines)
* scatters (List of ax.scatter-Results)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
axes={}
lines={}
scatters=[]
try:
axes['p']=ax
# x-Achse ----------------
if xlim == None:
xlimMin=dfTCsLDSIn.index[0]
xlimMax=dfTCsLDSIn.index[-1]
xlim=(xlimMin,xlimMax)
(xlimMin,xlimMax)=xlim
ax.set_xlim(xlim)
logger.debug("{0:s}dfTCsOPCScenTimeShift: {1:s}".format(logStr,str(dfTCsOPCScenTimeShift)))
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
pltHelperX(
ax
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart
)
# Eindeutigkeit der IDPlts pruefen
keys=[]
keysUneindeutig=[]
for dct in [QDct,pDct,QDctOPC,pDctOPC]:
for key, value in dct.items():
if IDPltKey in value.keys():
IDPltValue=value[IDPltKey]
if IDPltValue in keys:
print("IDPlt {:s} bereits vergeben".format(IDPltValue))
keysUneindeutig.append(IDPltValue)
else:
keys.append(IDPltValue)
# 1. Achse p -----------------------
logger.debug("{0:s}{1:s}".format(logStr,'# 1. Achse p'))
for key, value in pDct.items(): # nur die konfigurierten IDs plotten
if key in dfTCsLDSIn.columns: # nur dann, wenn ID als Spalte enthalten
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsLDSIn
,ID=key # Spaltenname
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct # a Dct with - i.e. {'Q Src':{'color':'red'},...}
,IDPltKey=IDPltKey # Schluesselbezeichner in value
,IDPltValuePostfix=None
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('1 hour') # pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
if 'RTTM' in value.keys():
if value['RTTM'] in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsLDSIn
,ID=value['RTTM']
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=' RTTM'
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('0 hour') #pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,value['RTTM']))
if not dfTCsOPC.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 1. Achse p OPC'))
for key, value in pDctOPC.items():
if key in dfTCsOPC.columns:
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsOPC
,ID=key
,xDctValue=value
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
,timeShift=dfTCsOPCScenTimeShift
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
ylimp,yticksp=pltLDSpQHelperYLimAndTicks(
dfTCsLDSIn
,pDct.keys()
,ylim=ylimp
,yticks=yticksp
,ylimxlim=ylimpxlim
,xlim=xlim
,ySpanMin=ySpanMin
,yGridSteps=yGridSteps
)
ax.set_ylim(ylimp)
ax.set_yticks(yticksp)
ax.grid()
ax.set_zorder(10)
ax.patch.set_visible(False)
ax.set_ylabel(ylabelp)
# 2. y-Achse Q ----------------------------------------
logger.debug("{0:s}{1:s}".format(logStr,'# 2. Achse Q'))
ax2 = ax.twinx()
axes['Q']=ax2
pltHelperX(
ax2
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+yTwinedAxesPosDeltaHP
)
for key, value in QDct.items():
if key in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsLDSIn
,ID=key
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
# ,timeShift=pd.Timedelta('0 hour') # pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
if 'RTTM' in value.keys() and plotRTTM:
if value['RTTM'] in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsLDSIn
,ID=value['RTTM']
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=' RTTM'
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('0 hour') #pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,value['RTTM']))
if not dfTCsOPC.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 2. Achse Q OPC'))
for key, value in QDctOPC.items():
if key in dfTCsOPC.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsOPC
,ID=key
,xDctValue=value
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
,timeShift=dfTCsOPCScenTimeShift
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
pltLDSHelperY(ax2)
ylimQ,yticksQ=pltLDSpQHelperYLimAndTicks(
dfTCsLDSIn
,QDct.keys()
,ylim=ylimQ
,yticks=yticksQ
,ylimxlim=ylimQxlim
,xlim=xlim
,ySpanMin=ySpanMin
,yGridSteps=yGridSteps
)
ax2.set_ylim(ylimQ)
ax2.set_yticks(yticksQ)
ax2.grid()
ax2.set_ylabel(ylabelQ)
# ggf. 3. Achse
if not dfTCsSIDEvents.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 3. Achse SID'))
ax3 = ax.twinx()
axes['SID']=ax3
pltHelperX(
ax3
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
)
if dfTCsSIDEventsInXlimOnly:
# auf xlim beschränken
dfTCsSIDEventsPlot=dfTCsSIDEvents[
(dfTCsSIDEvents.index-dfTCsSIDEventsTimeShift>=xlim[0])
&#:
(dfTCsSIDEvents.index-dfTCsSIDEventsTimeShift<=xlim[1])
]
# weiter beschränken auf die, die in xlim mind. 1 Eintrag haben
dfTCsSIDEventsPlot=dfTCsSIDEventsPlot.dropna(axis=1,how='all')
else:
dfTCsSIDEventsPlot=dfTCsSIDEvents
# doppelte bzw. mehrfache Spaltennamen eliminieren (das waere ein Aufruf-Fehler)
dfTCsSIDEventsPlot = dfTCsSIDEventsPlot.loc[:,~dfTCsSIDEventsPlot.columns.duplicated()]
logger.debug("{:s}dfTCsSIDEventsPlot.dropna(how='all'): {:s}".format(logStr,dfTCsSIDEventsPlot.dropna(how='all').to_string()))
if not dfTCsSIDEventsPlot.dropna(how='all').empty: # mind. 1 Ereignis in irgendeiner Spalte muss ueberbleiben
# aus Performanzgruenden wird nur zum Plot gegeben, was in xlim auch zu sehen sein wird
dfTCsSIDEventsPlot2=dfTCsSIDEventsPlot[
( dfTCsSIDEventsPlot.index-dfTCsSIDEventsTimeShift>=xlim[0])
&#:
( dfTCsSIDEventsPlot.index-dfTCsSIDEventsTimeShift<=xlim[1])
]
labelsOneCall,scattersOneCall=pltLDSSIDHelper(
ax3
,dfTCsSIDEventsPlot2
,dfTCsSIDEventsTimeShift
,dfTCsSIDEventsyOffset
,pSIDEvents
,valRegExMiddleCmds
,eventCCmds
,eventCStats
,markerDef
,baseColorsDef
)
scatters=scatters+scattersOneCall
pltLDSHelperY(ax3)
ax3.set_ylim(ylim3rd)
ax3.set_yticks(yticks3rd)
ax3.set_ylabel(ylabel3rd)
if plotLegend:
legendHorizontalPos='center'
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return axes,lines,scatters
def pltLDSErgVec(
ax=None # Axes auf die geplottet werden soll (und aus der neue axes ge-twinx-ed werden; plt.gcf().gca() wenn undef.
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=None # tuple (xmin,xmax); wenn undef. gelten min/max aus vorgenannten Daten als xlim; wenn Seg angegeben, gilt Seg
,dateFormat='%y.%m.%d: %H:%M:%S'
,bysecond=None #[0,15,30,45]
,byminute=None
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD #[0,10,20,30,40]
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ylimR=ylimRD #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9 # wenn ylim R/AC undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
"""
zeichnet Zeitkurven von App LDS Ergebnisvektoren auf ax
return: axes (Dct der Achsen), yLines (Dct der Linien)
Dct der Achsen: 'A': Alarm etc.; 'R': m3/h; 'a': ACC; 'TV': Timer und Leckvolumen
#! Lücken (nicht plotten) wenn keine Zeiten
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
axes={}
yLines={}
try:
if dfSegReprVec.empty and dfDruckReprVec.empty:
logger.error("{0:s}{1:s}".format(logStr,'dfSegReprVec UND dfDruckReprVec leer?! Return.'))
return
if not dfSegReprVec.empty:
# keine komplett leeren Zeilen
dfSegReprVec=dfSegReprVec[~dfSegReprVec.isnull().all(1)]
# keine doppelten Indices
dfSegReprVec=dfSegReprVec[~dfSegReprVec.index.duplicated(keep='last')] # dfSegReprVec.groupby(dfSegReprVec.index).last() # df[~df.index.duplicated(keep='last')]
if not dfDruckReprVec.empty:
# keine komplett leeren Zeilen
dfDruckReprVec=dfDruckReprVec[~dfDruckReprVec.isnull().all(1)]
# keine doppelten Indices
dfDruckReprVec=dfDruckReprVec[~dfDruckReprVec.index.duplicated(keep='last')] # dfDruckReprVec.groupby(dfDruckReprVec.index).last() # df[~df.index.duplicated(keep='last')]
if ax==None:
ax=plt.gcf().gca()
axes['A']=ax
# x-Achse ----------------
if xlim == None:
if not dfSegReprVec.empty:
xlimMin=dfSegReprVec.index[0]
xlimMax=dfSegReprVec.index[-1]
elif not dfDruckReprVec.empty:
xlimMin=dfDruckReprVec.index[0]
xlimMax=dfDruckReprVec.index[-1]
xlim=(xlimMin,xlimMax)
(xlimMin,xlimMax)=xlim
ax.set_xlim(xlim)
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
logger.debug("{0:s}dateFormat: {1:s}".format(logStr,dateFormat))
pltHelperX(
ax
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart
)
# 1. Achse Alarm -----------------------
if not dfSegReprVec.empty and highlightAreas:
tPairs=findAllTimeIntervalls(dfSegReprVec,Seg_Highlight_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Seg_Highlight_Alpha, color=Seg_Highlight_Color)
tPairs=findAllTimeIntervalls(dfSegReprVec,Seg_HighlightError_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Seg_Highlight_Alpha_Error, color=Seg_HighlightError_Color)
if not dfDruckReprVec.empty and highlightAreas:
tPairs=findAllTimeIntervalls(dfDruckReprVec,Druck_Highlight_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Druck_Highlight_Alpha, color=Druck_Highlight_Color)
tPairs=findAllTimeIntervalls(dfDruckReprVec,Druck_HighlightError_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Druck_Highlight_Alpha_Error, color=Druck_HighlightError_Color)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax,dfSegReprVec,'AL_S',attrsDctLDS['Seg_AL_S_Attrs'])
yLines['AL_S Seg']=lines[0]
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax,dfDruckReprVec,'AL_S',attrsDctLDS['Druck_AL_S_Attrs'])
yLines['AL_S Drk']=lines[0]
if not dfSegReprVec.empty and plotSB_S:
lines = pltLDSErgVecHelper(ax,dfSegReprVec,'SB_S',attrsDctLDS['Seg_SB_S_Attrs'],fct=lambda x: x*10)
yLines['SB_S Seg']=lines[0]
if not dfDruckReprVec.empty and plotSB_S:
lines = pltLDSErgVecHelper(ax,dfDruckReprVec,'SB_S',attrsDctLDS['Druck_SB_S_Attrs'],fct=lambda x: x*10)
yLines['SB_S Drk']=lines[0]
ax.set_ylim(ylimAL)
ax.set_yticks(yticksAL)
ax.grid()
ax.set_zorder(10)
ax.patch.set_visible(False)
ax.set_ylabel('A [0/10/20] u. 10x B [0/1/2/3/4]')
# 2. y-<NAME> ----------------------------------------
ax2 = ax.twinx()
axes['R']=ax2
pltHelperX(
ax2
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+yTwinedAxesPosDeltaHP
)
pltLDSHelperY(ax2)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'MZ_AV',attrsDctLDS['Seg_MZ_AV_Attrs'])
yLines['MZ_AV (R1) Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'LR_AV',attrsDctLDS['Seg_LR_AV_Attrs'])
yLines['LR_AV (R2) Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'NG_AV',attrsDctLDS['Seg_NG_AV_Attrs'])
yLines['NG_AV Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'QM_AV',attrsDctLDS['Seg_QM16_AV_Attrs'],fct=lambda x: x*1.6/100.)
yLines['QM16_AV Seg']=lines[0]
if plotLPRate:
# R2 = R1 - LP
# R2 - R1 = -LP
# LP = R1 - R2
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'LP_AV',attrsDctLDS['Seg_LP_AV_Attrs'])
yLines['LP_AV Seg']=lines[0]
if plotR2FillSeg:
df=dfSegReprVec
df=df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='1s'))
df=df.fillna(method='ffill').fillna(method='bfill')
# R2 unter 0
dummy=ax2.fill_between(df.index, df['LR_AV'],0
,where=df['LR_AV']<0,color='grey',alpha=.2)
# zwischen R2 und 0
dummy=ax2.fill_between(df.index, 0, df['LR_AV']
,where=df['LR_AV']>0
#,color='yellow',alpha=.1
,color='red',alpha=.1
)
# R2 über 0 aber unter NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=(df['LR_AV']>0) & (df['LR_AV']<df['NG_AV'])
#,color='red',alpha=.1
,color='yellow',alpha=.1
)
# R2 über NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=df['LR_AV']>df['NG_AV']
,color='red',alpha=.2)
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'MZ_AV',attrsDctLDS['Druck_MZ_AV_Attrs'])
yLines['MZ_AV (R1) Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'LR_AV',attrsDctLDS['Druck_LR_AV_Attrs'])
yLines['LR_AV (R2) Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'NG_AV',attrsDctLDS['Druck_NG_AV_Attrs'])
yLines['NG_AV Drk']=lines[0]
if plotLPRate:
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'LP_AV',attrsDctLDS['Druck_LP_AV_Attrs'])
yLines['LP_AV Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'QM_AV',attrsDctLDS['Druck_QM16_AV_Attrs'],fct=lambda x: x*1.6/100.)
yLines['QM16_AV Drk']=lines[0]
if plotR2FillDruck:
df=dfDruckReprVec
df=df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='1s'))
df=df.fillna(method='ffill').fillna(method='bfill')
# R2 unter 0
dummy=ax2.fill_between(df.index, df['LR_AV'],0
,where=df['LR_AV']<0,color='grey',alpha=.4)
# zwischen R2 und 0
dummy=ax2.fill_between(df.index, 0, df['LR_AV']
,where=df['LR_AV']>0
#,color='yellow',alpha=.1
,color='red',alpha=.1
)
# R2 über 0 aber unter NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=(df['LR_AV']>0) & (df['LR_AV']<df['NG_AV'])
#,color='red',alpha=.1
,color='yellow',alpha=.1
)
# R2 über NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=df['LR_AV']>df['NG_AV']
,color='red',alpha=.2)
ylimSeg,yticksSeg=pltLDSErgVecHelperYLimAndTicks(
dfSegReprVec
,'LR_AV'
,ylim=ylimR
,yticks=yticksR
,ylimxlim=ylimRxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimRSeg: {1:s} yticksRSeg: {2:s}".format(logStr,str(ylimSeg),str(yticksSeg)))
ylimDrk,yticksDrk=pltLDSErgVecHelperYLimAndTicks(
dfDruckReprVec
,'LR_AV'
,ylim=ylimR
,yticks=yticksR
,ylimxlim=ylimRxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimRDrk: {1:s} yticksRDrk: {2:s}".format(logStr,str(ylimDrk),str(yticksDrk)))
if ylimSeg[1]>=ylimDrk[1]:
ylimR=ylimSeg
yticksR=yticksSeg
else:
ylimR=ylimDrk
yticksR=yticksDrk
logger.debug("{0:s}ylimR: {1:s} yticksR: {2:s}".format(logStr,str(ylimR),str(yticksR)))
ax2.set_ylim(ylimR)
ax2.set_yticks(yticksR)
ax2.grid()
ax2.set_ylabel('R1, R2, NG, LP (R1-R2), QM 1.6% [Nm³/h]')
# 3. y-Achse Beschleunigung ----------------------------------------
if plotAC:
# 3. y-Achse Beschleunigung -------------------------------------------------
ax3 = ax.twinx()
axes['a']=ax3
pltHelperX(
ax3
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
)
pltLDSHelperY(ax3)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax3,dfSegReprVec,'AC_AV',attrsDctLDS['Seg_AC_AV_Attrs'])
yLines['AC_AV Seg']=lines[0]
lines = pltLDSErgVecHelper(ax3,dfSegReprVec,'ACF_AV',attrsDctLDS['Seg_ACF_AV_Attrs'])
yLines['ACF_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax3,dfDruckReprVec,'AC_AV',attrsDctLDS['Druck_AC_AV_Attrs'])
yLines['AC_AV Drk']=lines[0]
lines = pltLDSErgVecHelper(ax3,dfDruckReprVec,'ACF_AV',attrsDctLDS['Druck_ACF_AV_Attrs'])
yLines['ACF_AV Drk']=lines[0]
# ACC Limits
if plotACCLimits:
if not dfSegReprVec.empty:
# +
line=ax3.axhline(y=dfSegReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=dfSegReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfDruckReprVec.empty:
# +
line=ax3.axhline(y=dfDruckReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=dfDruckReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfSegReprVec.empty:
# -
line=ax3.axhline(y=-dfSegReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=-dfSegReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Seg_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
if not dfDruckReprVec.empty:
# -
line=ax3.axhline(y=-dfDruckReprVec['ACCST_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
line=ax3.axhline(y=-dfDruckReprVec['ACCTR_AV'].max())
for prop,value in attrsDctLDS['Druck_ACC_Limits_Attrs'].items():
plt.setp(line,"{:s}".format(prop),value)
ylimSeg,yticksSeg=pltLDSErgVecHelperYLimAndTicks(
dfSegReprVec
,'AC_AV'
,ylim=ylimAC
,yticks=yticksAC
,ylimxlim=ylimACxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimACSeg: {1:s} yticksACSeg: {2:s}".format(logStr,str(ylimSeg),str(yticksSeg)))
ylimDrk,yticksDrk=pltLDSErgVecHelperYLimAndTicks(
dfDruckReprVec
,'AC_AV'
,ylim=ylimAC
,yticks=yticksAC
,ylimxlim=ylimACxlim
,xlim=xlim
,ySpanMin=ySpanMin
)
logger.debug("{0:s}ylimACDrk: {1:s} yticksACDrk: {2:s}".format(logStr,str(ylimDrk),str(yticksDrk)))
if ylimSeg[1]>=ylimDrk[1]:
ylimAC=ylimSeg
yticksAC=yticksSeg
else:
ylimAC=ylimDrk
yticksAC=yticksDrk
logger.debug("{0:s}ylimAC: {1:s} yticksAC: {2:s}".format(logStr,str(ylimAC),str(yticksAC)))
ax3.set_ylim(ylimAC)
ax3.set_yticks(yticksAC)
ax3.set_ylabel('a [mm/s²]')
# 4. y-Achse Timer und Volumen ----------------------------------------
if plotTV:
# 4. y-Achse Timer und Volumen ----------------------------------------
ax4 = ax.twinx()
axes['TV']=ax4
yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
if plotAC:
yPos=yPos+yTwinedAxesPosDeltaHP
pltHelperX(
ax4
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yPos
)
pltLDSHelperY(ax4)
if not dfSegReprVec.empty:
# TIMER_AV
lines = pltLDSErgVecHelper(ax4,dfSegReprVec,'TIMER_AV',attrsDctLDS['Seg_TIMER_AV_Attrs'],fct=plotTVTimerFct)
yLines['TIMER_AV Seg']=lines[0]
# AM_AV
lines = pltLDSErgVecHelper(ax4,dfSegReprVec,'AM_AV',attrsDctLDS['Seg_AM_AV_Attrs'],fct=plotTVAmFct)
yLines['AM_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
# TIMER_AV
lines = pltLDSErgVecHelper(ax4,dfDruckReprVec,'TIMER_AV',attrsDctLDS['Druck_TIMER_AV_Attrs'],fct=plotTVTimerFct)
yLines['TIMER_AV Drk']=lines[0]
# AM_AV
lines = pltLDSErgVecHelper(ax4,dfDruckReprVec,'AM_AV',attrsDctLDS['Druck_AM_AV_Attrs'],fct=plotTVAmFct)
yLines['AM_AV Drk']=lines[0]
if not dfSegReprVec.empty or not dfDruckReprVec.empty:
ax4.set_ylim(ylimTV)
ax4.set_yticks(yticksTV)
ax4.set_ylabel(plotTVAmLabel)
ax4.grid()
# 5. y-Achse DPDT ----------------------------------------
if plotDPDT and (not dfSegReprVec.empty or not dfDruckReprVec.empty):
# Min. ermitteln
DPDT_REF_MinSEG=0
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
s=dfSegReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
if not s.empty:
DPDT_REF_MinSEG=s.min()
DPDT_REF_MinDruck=0
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
s=dfDruckReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
if not s.empty:
DPDT_REF_MinDruck=s.min()
DPDT_REF_Min=min(DPDT_REF_MinSEG,DPDT_REF_MinDruck)
if DPDT_REF_Min >= 0:
pass # es gibt nichts zu plotten
else:
# Max ermitteln
maxSeg=DPDT_REF_Min
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
s=dfSegReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
s=s[s<0]
if not s.empty:
DPDT_REF_MinSEG=s.max()
maxDruck=DPDT_REF_Min
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
s=dfDruckReprVec.loc[xlim[0]:xlim[1],'DPDT_REF'].dropna()
s=s[s<0]
if not s.empty:
DPDT_REF_MinDruck=s.max()
DPDT_REF_Max=max(maxSeg,maxDruck)
# 5. y-Achse DPDT ----------------------------------------
ax5 = ax.twinx()
axes['DPDT']=ax5
yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
if plotAC:
yPos=yPos+yTwinedAxesPosDeltaHP
if plotTV:
yPos=yPos+yTwinedAxesPosDeltaHP
pltHelperX(
ax5
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yPos
)
pltLDSHelperY(ax5)
if not dfSegReprVec.empty:
if 'DPDT_REF' in dfSegReprVec.columns.to_list():
lines = pltLDSErgVecHelper(ax5,dfSegReprVec,'DPDT_REF',attrsDctLDS['Seg_DPDT_REF_Attrs'],fct=None)
yLines['DPDT_REF Seg']=lines[0]
df=dfSegReprVec.loc[xlim[0]:xlim[1],:].query('DPDT_AV < 0')
#logger.debug("{0:s}df (neg. DPDT): {1:s}".format(logStr,df.to_string()))
lines = pltLDSErgVecHelper(ax5,df,'DPDT_AV',attrsDctLDS['Seg_DPDT_AV_Attrs'],fct=None)
yLines['DPDT_AV Seg']=lines[0]
if not dfDruckReprVec.empty:
if 'DPDT_REF' in dfDruckReprVec.columns.to_list():
lines = pltLDSErgVecHelper(ax5,dfDruckReprVec,'DPDT_REF',attrsDctLDS['Druck_DPDT_REF_Attrs'],fct=None)
yLines['DPDT_REF Drk']=lines[0]
df=dfDruckReprVec.loc[xlim[0]:xlim[1],:].query('DPDT_AV < 0')
#logger.debug("{0:s}df (neg. DPDT): {1:s}".format(logStr,df.to_string()))
lines = pltLDSErgVecHelper(ax5,df,'DPDT_AV',attrsDctLDS['Druck_DPDT_AV_Attrs'],fct=None)
yLines['DPDT_AV Drk']=lines[0]
yTickList=[DPDT_REF_Min*10
,DPDT_REF_Min/0.9 # bei einem Vorhaltemass von 0.9 steht hier x; 0.9 X x kann man an den anderen beiden Ticks ablesen
#,DPDT_REF_Min
,0
,DPDT_REF_Min*-1
]
ax5.set_ylim(yTickList[0],yTickList[-1])
ax5.set_yticks([round(yTick,2) for yTick in yTickList])
if DPDT_REF_Max > DPDT_REF_Min:
ax5.set_ylabel("bar/Minute (max. Wert: {:6.3f})".format(DPDT_REF_Max))
else:
ax5.set_ylabel('bar/Minute')
ax5.grid()
if plotLegend:
legendHorizontalPos='center'
if not dfSegReprVec.empty:
if dfDruckReprVec.empty:
loc=legendLoc # Vorgabe
else:
loc='upper '+legendHorizontalPos # beide: fix
patternSeg='Seg$'
axes['A'].add_artist(axes['A'].legend(
tuple([yLines[line] for line in yLines if re.search(patternSeg,line) != None])
,tuple([line for line in yLines if re.search(patternSeg,line) != None])
,loc=loc #'upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if not dfDruckReprVec.empty:
if dfSegReprVec.empty:
loc=legendLoc # Vorgabe
else:
loc='lower '+legendHorizontalPos # beide: fix
patternDruck='Drk$'
axes['A'].add_artist(axes['A'].legend(
tuple([yLines[line] for line in yLines if re.search(patternDruck,line) != None])
,tuple([line for line in yLines if re.search(patternDruck,line) != None])
,loc=loc #'lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return axes,yLines
def pltHelperX(
ax
,dateFormat='%d.%m.%y: %H:%M:%S'
,bysecond=None # [0,15,30,45]
,byminute=None
,byhour=None
,yPos=-0.0125 #: (i.d.R. negativer) Abstand der y-Achse von der Zeichenfläche; default: -0.0125
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
logger.debug("{0:s}dateFormat: {1:s}".format(logStr,dateFormat))
if bysecond != None:
majLocatorTmp=mdates.SecondLocator(bysecond=bysecond)
elif byminute != None:
majLocatorTmp=mdates.MinuteLocator(byminute=byminute)
elif byhour != None:
majLocatorTmp=mdates.HourLocator(byhour=byhour)
else:
majLocatorTmp=mdates.HourLocator(byhour=[0,12])
majFormatterTmp=mdates.DateFormatter(dateFormat)
logger.debug("{0:s}ax.xaxis.set_major_locator ...".format(logStr))
ax.xaxis.set_major_locator(majLocatorTmp)
logger.debug("{0:s}ax.xaxis.set_major_formatter ...".format(logStr))
ax.xaxis.set_major_formatter(majFormatterTmp)
#logger.debug("{0:s}ax.get_xticks(): {1:s}".format(logStr,str(ax.get_xticks())))
logger.debug("{0:s}setp(ax.xaxis.get_majorticklabels() ...".format(logStr))
dummy=plt.setp(ax.xaxis.get_majorticklabels(),rotation='vertical',ha='center')
ax.spines["left"].set_position(("axes",yPos))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltLDSHelperY(
ax
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
pltMakePatchSpinesInvisible(ax)
ax.spines['left'].set_visible(True)
ax.yaxis.set_label_position('left')
ax.yaxis.set_ticks_position('left')
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltLDSErgVecHelperYLimAndTicks(
dfReprVec
,dfReprVecCol
,ylim=None #(-10,10) # wenn undef., dann min/max dfReprVec
,yticks=None #[-10,0,10] # wenn undef., dann aus dem Ergebnis von ylim
,ylimxlim=False #wenn Wahr und ylim undef., dann wird nachf. xlim beruecksichtigt bei min/max dfReprVec
,xlim=None
,ySpanMin=0.1 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
):
"""
Returns: ylim,yticks
Der y-Werte-Bereich ylim wird zur x-Achse symmetrisch ermittelt.
yticks spielt dabei keine Rolle.
Sind ylim bzw. yticks definiert, erfahren sie keine Änderung.
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
if ylim != None:
# der y-Wertebereich ist explizit definiert
pass
else:
if not dfReprVec.empty and not dfReprVec.loc[:,dfReprVecCol].isnull().all().all():
if not ylimxlim:
ylimmin=dfReprVec.loc[:,dfReprVecCol].min()
ylimmax=dfReprVec.loc[:,dfReprVecCol].max()
else:
(xlimMin,xlimMax)=xlim
if not dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].isnull().all().all():
ylimmin=dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].min()
ylimmax=dfReprVec.loc[xlimMin:xlimMax,dfReprVecCol].max()
else:
ylimmin=0
ylimmax=0
ylimminR=round(ylimmin,0)
ylimmaxR=round(ylimmax,0)
if ylimminR > ylimmin:
ylimminR=ylimminR-1
if ylimmaxR < ylimmax:
ylimmaxR=ylimmaxR+1
ylimminAbsR=math.fabs(ylimminR)
# B auf den extremaleren Wert
ylimB=max(ylimminAbsR,ylimmaxR)
if ylimB < ySpanMin:
# B auf Mindestwert
ylimB=ySpanMin
## Differenz < Mindestwert: B+
#if math.fabs(ylimmax-ylimmin) < ySpanMin:
# ylimB=.5*(ylimminAbs+ylimmax)+ySpanMin
ylim=(-ylimB,ylimB)
else:
ylim=(-ySpanMin,ySpanMin)
if yticks != None:
# die y-Ticks sind explizit definiert
pass
else:
# aus Wertebereich
(ylimMin,ylimMax)=ylim
yticks=[ylimMin,0,ylimMax]
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return ylim,yticks
def pltLDSpQHelperYLimAndTicks(
dfReprVec
,dfReprVecCols
,ylim=None # wenn undef., dann min/max dfReprVec
,yticks=None # wenn undef., dann aus ylimR
,ylimxlim=False # wenn Wahr und ylim undef., dann wird nachf. xlim beruecksichtigt bei min/max dfReprVec
,xlim=None # x-Wertebereich
,ySpanMin=0.1 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,yGridSteps=yGridStepsD
):
"""
Returns: ylim,yticks
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if ylim != None:
# der y-Wertebereich ist explizit definiert
pass
else:
df=dfReprVec.loc[:,[col for col in dfReprVecCols]]
if not ylimxlim:
# Extremalwerte Analysebereich
ylimmin=df.min().min()
ylimmax=df.max().max()
else:
if xlim == None:
logger.error("{0:s} xlim muss angegeben sein wenn ylimxlim Wahr gesetzt wird. Weiter mit ylimxlim Falsch.".format(logStr))
ylimmin=df.min().min()
ylimmax=df.max().max()
else:
# Extremalwerte x-Wertebereich
(xlimMin,xlimMax)=xlim
# Extremalwerte Analysebereich
ylimmin=df.loc[xlimMin:xlimMax,:].min().min()
ylimmax=df.loc[xlimMin:xlimMax,:].max().max()
logger.debug("{0:s} ylimmin={1:10.2f} ylimmax={2:10.2f}.".format(logStr,ylimmin,ylimmax))
if math.fabs(ylimmax-ylimmin) < ySpanMin:
ylimmax=ylimmin+ySpanMin
logger.debug("{0:s} ylimmin={1:10.2f} ylimmax={2:10.2f}.".format(logStr,ylimmin,ylimmax))
ylimMinR=round(ylimmin,0)
ylimMaxR=round(ylimmax,0)
if ylimMinR>ylimmin:
ylimMinR=ylimMinR-1
if ylimMaxR<ylimmax:
ylimMaxR=ylimMaxR+1
logger.debug("{0:s} ylimMinR={1:10.2f} ylimMaxR={2:10.2f}.".format(logStr,ylimMinR,ylimMaxR))
ylim=(ylimMinR,ylimMaxR)
if yticks != None:
# die y-Ticks sind explizit definiert
pass
else:
# aus Wertebereich
(ylimMin,ylimMax)=ylim
if yGridSteps==0:
yticks=[ylimMin,ylimMax]
else:
dYGrid=(ylimMax-ylimMin)/yGridSteps
y=np.arange(ylimMin,ylimMax,dYGrid)
if y[-1]<ylimMax:
y=np.append(y,y[-1]+dYGrid)
yticks=y
logger.debug("{0:s} yticks={1:s}.".format(logStr,str(yticks)))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return ylim,yticks
def pltLDSErgVecHelper(
ax
,dfReprVec=pd.DataFrame()
,ID='AL_S' # Spaltenname in dfReprVec
,attrs={}
,fct=None # Function
):
"""
Helper
Returns:
lines: ax.plot-Ergebnis
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lines=[]
label=ID
x=dfReprVec.index.values
if ID in dfReprVec.columns.to_list():
if fct==None:
y=dfReprVec[ID].values
else:
y=dfReprVec[ID].apply(fct).values
if 'where' in attrs.keys():
logger.debug("{0:s}ID: {1:s}: step-Plot".format(logStr,ID))
lines = ax.step(x,y,label=label
,where=attrs['where'])
else:
lines = ax.plot(x,y,label=label
)
for prop,propValue in [(prop,value) for (prop, value) in attrs.items() if prop not in ['where']]:
plt.setp(lines[0],"{:s}".format(prop),propValue)
else:
logger.warning("{0:s}Spalte: {1:s}: nicht vorhanden?!".format(logStr,ID))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return lines
def pltLDSpQHelper(
ax
,TCdf=pd.DataFrame()
,ID='' # Spaltenname
,xDctValue={} # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs={} # a Dct with - i.e. {'Q Src':{'color':'red'},...}
,IDPltKey='IDPlt' # Schluesselbezeichner in xDctValue (Key in xDctAttrs und xDctFcts)
,IDPltValuePostfix=None # SchluesselPostfix in xDctAttrs und xDctFcts - i.e. ' RTTM'
,xDctFcts={} # a Dct with Fcts - i.e. {'p Src': lambda x: 134.969 + x*10^5/(794.*9.81)}
,timeShift=pd.Timedelta('0 seconds')
):
"""
Helper
Returns:
label: Bezeichner
lines: ax.plot-Ergebnis
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
logger.debug("{:s}Echo der Parameter: xDctFcts: {!s:s}".format(logStr,xDctFcts))
label=''
lines=[]
# nur Not Null plotten
s=TCdf[ID][TCdf[ID].notnull()]
logger.debug("{0:s}timeShift: {1:s}".format(logStr,str(timeShift)))
x=s.index.values+timeShift #TCdf.index.values+timeShift
IDPltValue=None
if IDPltKey in xDctValue.keys():
# es liegt ein Schluessel fuer eine Layout-Informationen vor
IDPltValue=xDctValue[IDPltKey] # koennte auch None sein ... {'IDPlt':None}
if IDPltValue != None and IDPltValuePostfix != None:
IDPltValue=IDPltValue+IDPltValuePostfix
if IDPltValue in xDctFcts.keys():
logger.debug("{:s}Fcts fuer: {:s}".format(logStr,IDPltValue))
fct=xDctFcts[IDPltValue]
y=s.apply(fct).values#TCdf[ID].apply(fct).values
else:
y=s.values #TCdf[ID].values
if IDPltValue != None:
label=IDPltValue+' '+ID
if IDPltValue in xDctAttrs.keys():
if 'where' in xDctAttrs[IDPltValue].keys():
logger.debug("{0:s}ID: {1:s}: step-Plot".format(logStr,ID))
lines = ax.step(x,y
,label=label
,where=xDctAttrs[IDPltValue]['where'])
else:
lines = ax.plot(x,y
,label=label
)
for prop,propValue in [(prop,value) for (prop, value) in xDctAttrs[IDPltValue].items() if prop not in ['where']]:
plt.setp(lines[0],"{:s}".format(prop),propValue)
else:
# es ist kein Layout definiert - einfach plotten
logger.debug("{0:s}IDPltValue: {1:s}: es ist kein Layout definiert - einfach plotten ...".format(logStr,IDPltValue))
lines = ax.plot(x,y
,label=label
)
else:
# es liegt kein Schluessel (oder Wert None) fuer eine Layout-Informationen vor - einfach plotten
label=ID
logger.debug("{0:s}ID: {1:s}: es liegt kein Schluessel (oder kein Wert) fuer eine Layout-Informationen vor - einfach plotten ...".format(logStr,ID))
lines = ax.plot(x,y)
logger.debug("{0:s}label: '{1:s}' len(lines): {2:d}".format(logStr,label,len(lines)))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return label, lines
def pltLDSSIDHelper(
ax
,dfTCsSIDEvents
,dfTCsScenTimeShift
,dfTCsSIDEventsyOffset # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden); max. Erhöhung: 0.9
,pSIDEvents
,valRegExMiddleCmds
,eventCCmds
,eventCStats
,markerDef
,baseColorsDef
):
"""
Helper
Returns:
labels: Bezeichner
scatters: ax.scatter-Ergebnisse
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust (gefülltes "dickes" Kreuz)
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
labels=[]
scatters=[]
# Anzahl der verschiedenen Schieber ermitteln
idxKat={}
idxSchieberLfd=0
for col in dfTCsSIDEvents.columns:
m=re.search(pSIDEvents,col)
valRegExSchieberID=m.group('colRegExSchieberID')
if valRegExSchieberID not in idxKat.keys():
idxKat[valRegExSchieberID]=idxSchieberLfd
idxSchieberLfd=idxSchieberLfd+1
logger.debug("{0:s}Dct idxKat: keys (versch. Schieber - meint versch. Kategorien): {1:s}: values (Index der jeweiligen Kategorie): {2:s}".format(logStr,str(idxKat.keys()),str(idxKat.values())))
dfTCsSIDEventsPlot = dfTCsSIDEvents # hier keine Veränderungen mehr
for col in dfTCsSIDEventsPlot.columns:
m=re.search(pSIDEvents,col)
valRegExSchieberID=m.group('colRegExSchieberID')
idxSchieberLfd=idxKat[valRegExSchieberID]
valRegExEventID=m.group('colRegExEventID')
valRegExMiddle=m.group('colRegExMiddle')
# Markersize
s=plt.rcParams['lines.markersize']**2
# Marker
if valRegExMiddle == valRegExMiddleCmds:
idxMarker=eventCCmds[valRegExEventID]
else:
idxMarker=eventCStats[valRegExEventID]
if valRegExEventID in ['In.ZUST']:
s=s*2.5
if idxMarker < len(markerDef):
m=markerDef[idxMarker]
else:
m=markerDef[-1]
logger.debug("{0:s}{1:s}: idxMarker: Soll: {2:d} MarkerIdx gewählt: {3:d}".format(logStr,col,idxMarker,len(markerDef)-1))
if idxSchieberLfd < len(baseColorsDef):
c=baseColorsDef[idxSchieberLfd]
else:
c=baseColorsDef[-1]
logger.debug("{0:s}{1:s}: idxSchieberLfd: Ist: {2:d} FarbenIdx gewählt: {3:d}".format(logStr,col,idxSchieberLfd,len(baseColorsDef)-1))
colors=[c for idx in range(len(dfTCsSIDEventsPlot.index))] # alle Ereignisse (der Spalte) haben dieselbe Farbe
label=col # alle Ereignisse (der Spalte) haben dasselbe Label
#sDefault=plt.rcParams['lines.markersize']**2
x=dfTCsSIDEventsPlot.index.values+dfTCsScenTimeShift
y=dfTCsSIDEventsPlot[col].values+min(idxSchieberLfd*dfTCsSIDEventsyOffset,.9)
logger.debug("{:s}{:s}: erste und letzte Werte: x:{!s:s} y:{!s:s}...".format(logStr,col,x[::len(x)-1],y[::len(y)-1]))
scatter = ax.scatter(
x
,y
,c=colors
,marker=m
,label=label
,s=s#Default
)
# scatter ist eine PathCollection; Attribut u.a. get_label(): Return the label used for this artist in the legend
# auch wenn y durchgehend Null wird ein scatter zurueckgegeben (d.h. ist ein Legendeneintrag vorhanden)
labels.append(label)
scatters.append(scatter)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return labels, scatters
# --- PLOT: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def pltMakeCategoricalCmap(baseColorsDef="tab10",catagoryColors=None,nOfSubCatsReq=3,reversedSubCatOrder=False):
"""
Returns a cmap with nOfCatsReq * nOfSubCatsReq discrete colors.
Parameter:
baseColorsDef: a (discrete) cmap defining the "base"colors
default: tab10
if baseColorsDef is not via get_cmap a matplotlib.colors.ListedColormap, baseColorsDef is interpreted via to_rgb as a list of colors
in this case catagoryColors is ignored
catagoryColors: a list of "base"colors indices for this cmap
the length of the list is the number of Categories requested: nOfCatsReq
apparently cmap's nOfColors must be ge than nOfCatsReq
default: None (==> nOfCatsReq = cmap's nOfColors)
i.e. [2,8,3] for tab10 is green, yellow (ocher), red
nOfSubCatsReq: number of Subcategories requested
reversedSubCatOrder: False (default): if True, the last color of a category is from baseColorsDef
reversedSubCatOrder can be a list
Returns:
cmap with nOfCatsReq * nOfSubCatsReq discrete colors; None if an error occurs
one "base"color per category
nOfSubCatsReq "sub"colors per category
so each category consists of nOfSubCatsReq colors
Raises:
RmError
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import Rm
>>> Rm.pltMakeCategoricalCmap().N
30
>>> Rm.pltMakeCategoricalCmap(catagoryColors=[2,8,3]).N # 2 8 3 in tab10: grün gelb rot
9
>>> baseColorsDef="tab10"
>>> catagoryColors=[2,8,3]
>>> nOfSubCatsReq=4
>>> # grün gelb rot mit je 4 Farben von hell nach dunkel
>>> cm=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> cm.colors
array([[0.75 , 1. , 0.75 ],
[0.51819172, 0.87581699, 0.51819172],
[0.32570806, 0.75163399, 0.32570806],
[0.17254902, 0.62745098, 0.17254902],
[0.9983871 , 1. , 0.75 ],
[0.91113148, 0.91372549, 0.51165404],
[0.82408742, 0.82745098, 0.30609849],
[0.7372549 , 0.74117647, 0.13333333],
[1. , 0.75 , 0.75142857],
[0.94640523, 0.53069452, 0.53307001],
[0.89281046, 0.33167491, 0.3348814 ],
[0.83921569, 0.15294118, 0.15686275]])
>>> cm2=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=[False]+2*[True])
>>> cm.colors[nOfSubCatsReq-1]==cm2.colors[0]
array([ True, True, True])
>>> plt.close()
>>> size_DINA6quer=(5.8,4.1)
>>> fig, ax = plt.subplots(figsize=size_DINA6quer)
>>> fig.subplots_adjust(bottom=0.5)
>>> norm=matplotlib.colors.Normalize(vmin=0, vmax=100)
>>> cb=matplotlib.colorbar.ColorbarBase(ax, cmap=cm2,norm=norm,orientation='horizontal')
>>> cb.set_label('baseColorsDef was (via get_cmap) a matplotlib.colors.ListedColormap')
>>> #plt.show()
>>> cm3=Rm.pltMakeCategoricalCmap(baseColorsDef=['b','c','m'],nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> cm3.colors
array([[0.75 , 0.75 , 1. ],
[0.5 , 0.5 , 1. ],
[0.25 , 0.25 , 1. ],
[0. , 0. , 1. ],
[0.75 , 1. , 1. ],
[0.45833333, 0.91666667, 0.91666667],
[0.20833333, 0.83333333, 0.83333333],
[0. , 0.75 , 0.75 ],
[1. , 0.75 , 1. ],
[0.91666667, 0.45833333, 0.91666667],
[0.83333333, 0.20833333, 0.83333333],
[0.75 , 0. , 0.75 ]])
>>> plt.close()
>>> fig, ax = plt.subplots(figsize=size_DINA6quer)
>>> fig.subplots_adjust(bottom=0.5)
>>> norm=matplotlib.colors.Normalize(vmin=0, vmax=100)
>>> cb=matplotlib.colorbar.ColorbarBase(ax, cmap=cm3,norm=norm,orientation='horizontal')
>>> cb.set_label('baseColorsDef was (via to_rgb) a list of colors')
>>> #plt.show()
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
cmap=None
try:
try:
# Farben, "base"colors, welche die cmap hat
nOfColors=plt.get_cmap(baseColorsDef).N
if catagoryColors==None:
catagoryColors=np.arange(nOfColors,dtype=int)
# verlangte Kategorien
nOfCatsReq=len(catagoryColors)
if nOfCatsReq > nOfColors:
logStrFinal="{0:s}: nOfCatsReq: {1:d} > cmap's nOfColors: {2:d}!".format(logStr,nOfCatsReq,nOfColors)
raise RmError(logStrFinal)
if max(catagoryColors) > nOfColors-1:
logStrFinal="{0:s}: max. Idx of catsReq: {1:d} > cmap's nOfColors-1: {2:d}!".format(logStr,max(catagoryColors),nOfColors-1)
raise RmError(logStrFinal)
# alle Farben holen, welche die cmap hat
ccolors = plt.get_cmap(baseColorsDef)(np.arange(nOfColors,dtype=int))
# die gewuenschten Kategorie"Basis"farben extrahieren
ccolors=[ccolors[idx] for idx in catagoryColors]
except:
listOfColors=baseColorsDef
nOfColors=len(listOfColors)
nOfCatsReq=nOfColors
ccolors=[]
for color in listOfColors:
ccolors.append(list(matplotlib.colors.to_rgb(color)))
finally:
pass
logger.debug("{0:s}ccolors: {1:s}".format(logStr,str(ccolors)))
logger.debug("{0:s}nOfCatsReq: {1:s}".format(logStr,str((nOfCatsReq))))
logger.debug("{0:s}nOfSubCatsReq: {1:s}".format(logStr,str((nOfSubCatsReq))))
# Farben bauen -------------------------------------
# resultierende Farben vorbelegen
cols = np.zeros((nOfCatsReq*nOfSubCatsReq, 3))
# ueber alle Kategoriefarben
if type(reversedSubCatOrder) is not list:
reversedSubCatOrderLst=nOfCatsReq*[reversedSubCatOrder]
else:
reversedSubCatOrderLst=reversedSubCatOrder
logger.debug("{0:s}reversedSubCatOrderLst: {1:s}".format(logStr,str((reversedSubCatOrderLst))))
for i, c in enumerate(ccolors):
rgb=pltMakeCategoricalColors(c,nOfSubColorsReq=nOfSubCatsReq,reversedOrder=reversedSubCatOrderLst[i])
cols[i*nOfSubCatsReq:(i+1)*nOfSubCatsReq,:] = rgb
cmap = matplotlib.colors.ListedColormap(cols)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return cmap
def pltMakePatchSpinesInvisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
def pltHlpAlignMarker(marker,halign='center',valign='middle'):
"""
create markers with specified alignment.
Parameters
----------
marker : a valid marker specification.
See mpl.markers
halign : string, float {'left', 'center', 'right'}
Specifies the horizontal alignment of the marker. *float* values
specify the alignment in units of the markersize/2 (0 is 'center',
-1 is 'right', 1 is 'left').
valign : string, float {'top', 'middle', 'bottom'}
Specifies the vertical alignment of the marker. *float* values
specify the alignment in units of the markersize/2 (0 is 'middle',
-1 is 'top', 1 is 'bottom').
Returns
-------
marker_array : numpy.ndarray
A Nx2 array that specifies the marker path relative to the
plot target point at (0, 0).
Notes
-----
The mark_array can be passed directly to ax.plot and ax.scatter, e.g.::
ax.plot(1, 1, marker=align_marker('>', 'left'))
"""
if isinstance(halign,str):
halign = {'right': -1.,
'middle': 0.,
'center': 0.,
'left': 1.,
}[halign]
if isinstance(valign,str):
valign = {'top': -1.,
'middle': 0.,
'center': 0.,
'bottom': 1.,
}[valign]
# Define the base marker
bm = markers.MarkerStyle(marker)
# Get the marker path and apply the marker transform to get the
# actual marker vertices (they should all be in a unit-square
# centered at (0, 0))
m_arr = bm.get_path().transformed(bm.get_transform()).vertices
# Shift the marker vertices for the specified alignment.
m_arr[:, 0] += halign / 2
m_arr[:, 1] += valign / 2
return Path(m_arr, bm.get_path().codes)
def pltNetFigAx(pDf,**kwds):
"""
Erzeugt eine für die Netzdarstellung verzerrungsfreie Axes-Instanz.
* verwendet gcf() (will return an existing figure if one is open, or it will make a new one if there is no active figure)
* an already existing figure might be created this way: fig=plt.figure(dpi=2*72,linewidth=1.)
* errechnet die verzerrungsfreie Darstellung unter Berücksichtigung einer zukünftigen horizontalen Farblegende
* erzeugt eine Axes-Instanz
* setzt Attribute der Axes-Instanz
* setzt Attribute der Figure-Instanz
Args:
pDf: dataFrame
Coordinates:
* pXCor_i: colName in pDf (default: 'pXCor_i'): x-Start Coordinate of all Edges to be plotted
* pYCor_i: colName in pDf (default: 'pYCor_i'): y-Start Coordinate of all Edges to be plotted
* pXCor_k: colName in pDf (default: 'pXCor_k'): x-End Coordinate of all Edges to be plotted
* pYCor_k: colName in pDf (default: 'pYCor_k'): y-End Coordinate of all Edges to be plotted
Colorlegend:
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
Figure:
* pltTitle: title [not suptitle] (default: 'pltNetFigAx')
* figFrameon: figure frame (background): displayed or invisible (default: True)
* figEdgecolor: edge color of the Figure rectangle (default: 'black')
* figFacecolor: face color of the Figure rectangle (default: 'white')
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Coordinates
if 'pXCor_i' not in keys:
kwds['pXCor_i']='pXCor_i'
if 'pYCor_i' not in keys:
kwds['pYCor_i']='pYCor_i'
if 'pXCor_k' not in keys:
kwds['pXCor_k']='pXCor_k'
if 'pYCor_k' not in keys:
kwds['pYCor_k']='pYCor_k'
# Colorlegend
if 'CBFraction' not in keys:
kwds['CBFraction']=0.05
if 'CBHpad' not in keys:
kwds['CBHpad']=0.0275
# Figure
if 'pltTitle' not in keys:
kwds['pltTitle']='pltNetFigAx'
if 'figFrameon' not in keys:
kwds['figFrameon']=True
if 'figEdgecolor' not in keys:
kwds['figEdgecolor']='black'
if 'figFacecolor' not in keys:
kwds['figFacecolor']='white'
except:
pass
try:
dx=max(pDf[kwds['pXCor_i']].max(),pDf[kwds['pXCor_k']].max())
dy=max(pDf[kwds['pYCor_i']].max(),pDf[kwds['pYCor_k']].max())
# erf. Verhältnis bei verzerrungsfreier Darstellung
dydx=dy/dx
if(dydx>=1):
dxInch=DINA4_x # Hochformat
else:
dxInch=DINA4_y # Querformat
figwidth=dxInch
#verzerrungsfrei: Blattkoordinatenverhaeltnis = Weltkoordinatenverhaeltnis
factor=1-(kwds['CBFraction']+kwds['CBHpad'])
# verzerrungsfreie Darstellung sicherstellen
figheight=figwidth*dydx*factor
# Weltkoordinatenbereich
xlimLeft=0
ylimBottom=0
xlimRight=dx
ylimTop=dy
# plt.figure(dpi=, facecolor=, edgecolor=, linewidth=, frameon=True)
fig = plt.gcf() # This will return an existing figure if one is open, or it will make a new one if there is no active figure.
fig.set_figwidth(figwidth)
fig.set_figheight(figheight)
logger.debug("{:s}dx={:10.2f} dy={:10.2f}".format(logStr,dx,dy))
logger.debug("{:s}figwidth={:10.2f} figheight={:10.2f}".format(logStr,figwidth,figheight))
ax=plt.subplot()
ax.set_xlim(left=xlimLeft)
ax.set_ylim(bottom=ylimBottom)
ax.set_xlim(right=xlimRight)
ax.set_ylim(top=ylimTop)
xTicks=ax.get_xticks()
dxTick = xTicks[1]-xTicks[0]
yTicks=ax.set_yticks([idx*dxTick for idx in range(math.floor(dy/dxTick)+1)])
plt.title(kwds['pltTitle'])
fig.set_frameon(kwds['figFrameon'])
fig.set_edgecolor(kwds['figEdgecolor'])
fig.set_facecolor(kwds['figFacecolor'])
# https://stackoverflow.com/questions/14827650/pyplot-scatter-plot-marker-size
# Size in pts:
# the argument markersize in plot denotes the markersize (i.e. diameter) in points
# the argument s in scatter denotes the markersize**2 in points^2
# so a given plot-marker with markersize=x needs a scatter-marker with s=x**2 if the scatter-marker shall cover the same "area" in points^2
# the "area" of the scatter-marker is proportional to the s param
# What are points - pts:
# the standard size of points in matplotlib is 72 ppi
# 1 point is hence 1/72 inches (1 inch = 1 Zoll = 2.54 cm)
# 1 point = 0.352777.... mm
# points and pixels - px:
# 1 point = dpi/ppi
# the standard dpi in matplotlib is 100
# a scatter-marker whos "area" covers always 10 pixel:
# s=(10*ppi/dpi)**2
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetNodes(pDf,**kwds):
"""
Scatters NODEs on gca().
Args:
pDf: dataFrame
NODE: Size (Attribute)
* pAttribute: colName (default: 'Attribute') in pDf
* pSizeFactor: (deafault: 1.)
* scatter Sy-Area in pts^2 = pSizeFactor * Attribute
NODE: Color (Measure)
* pMeasure: colName (default: 'Measure') in pDf
* pMeasureColorMap (default: plt.cm.autumn)
* pMeasureAlpha (default: 0.9)
* pMeasureClip (default: False)
* CBFixedLimits (default: True)
* CBFixedLimitLow (default: 0.)
* CBFixedLimitHigh (default: 1.)
NODE: 3Classes
* pMeasure3Classes (default: True)
* pMCategory: colName (default: 'MCategory') in pDf
* pMCatTopTxt (default: 'Top')
* pMCatMidTxt (default: 'Middle')
* pMCatBotTxt (default: 'Bottom')
* pMCatTopColor (default: 'palegreen')
* pMCatTopAlpha (default: 0.9)
* pMCatTopClip (default: False)
* pMCatMidColorMap (default: plt.cm.autumn)
* pMCatMidAlpha (default: 0.9)
* pMCatMidClip (default: False)
* pMCatBotColor (default: 'violet')
* pMCatBotAlpha (default: 0.9)
* pMCatBotClip (default: False)
NODE:
* pXCor: colName (default: 'pXCor_i') in pDf
* pYCor: colName (default: 'pYCor_i') in pDf
Returns:
(pcN, vmin, vmax)
* pcN: die mit Farbskala gezeichneten Symbole
* vmin/vmax: die für die Farbskala verwendeten Extremalwerte
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# NODE: Size (Attribute)
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pSizeFactor' not in keys:
kwds['pSizeFactor']=1.
# NODE: Color (Measure)
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureColorMap' not in keys:
kwds['pMeasureColorMap']=plt.cm.autumn
if 'pMeasureAlpha' not in keys:
kwds['pMeasureAlpha']=0.9
if 'pMeasureClip' not in keys:
kwds['pMeasureClip']=False
if 'CBFixedLimits' not in keys:
kwds['CBFixedLimits']=True
if 'CBFixedLimitLow' not in keys:
kwds['CBFixedLimitLow']=0.
if 'CBFixedLimitHigh' not in keys:
kwds['CBFixedLimitHigh']=1.
# NODE: 3Classes
if 'pMeasure3Classes' not in keys:
kwds['pMeasure3Classes']=True
if 'pMCategory' not in keys:
kwds['pMCategory']='MCategory'
if 'pMCatTopTxt' not in keys:
kwds['pMCatTopTxt']='Top'
if 'pMCatMidTxt' not in keys:
kwds['pMCatMidTxt']='Middle'
if 'pMCatBotTxt' not in keys:
kwds['pMCatBotTxt']='Bottom'
if 'pMCatTopColor' not in keys:
kwds['pMCatTopColor']='palegreen'
if 'pMCatTopAlpha' not in keys:
kwds['pMCatTopAlpha']=0.9
if 'pMCatTopClip' not in keys:
kwds['pMCatTopClip']=False
if 'pMCatMidColorMap' not in keys:
kwds['pMCatMidColorMap']=plt.cm.autumn
if 'pMCatMidAlpha' not in keys:
kwds['pMCatMidAlpha']=0.9
if 'pMCatMidClip' not in keys:
kwds['pMCatMidClip']=False
if 'pMCatBotColor' not in keys:
kwds['pMCatBotColor']='violet'
if 'pMCatBotAlpha' not in keys:
kwds['pMCatBotAlpha']=0.9
if 'pMCatBotClip' not in keys:
kwds['pMCatBotClip']=False
# NODE:
if 'pXCor' not in keys:
kwds['pXCor']='pXCor_i'
if 'pYCor' not in keys:
kwds['pYCor']='pYCor_i'
except:
pass
try:
ax=plt.gca()
if kwds['pMeasure3Classes']:
pN_top=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatTopTxt'])]
pN_mid=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatMidTxt'])]
pN_bot=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatBotTxt'])]
pN_top_Anz,col=pN_top.shape
pN_mid_Anz,col=pN_mid.shape
pN_bot_Anz,col=pN_bot.shape
pcN_top=ax.scatter(
pN_top[kwds['pXCor']],pN_top[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_top[kwds['pAttribute']]
,color=kwds['pMCatTopColor']
,alpha=kwds['pMCatTopAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatTopClip'])
logger.debug("{:s}Anzahl mit fester Farbe Top gezeichneter Symbole={:d}".format(logStr,pN_top_Anz))
if not kwds['CBFixedLimits']:
vmin=pN_mid[kwds['pMeasure']].min()
vmax=pN_mid[kwds['pMeasure']].max()
else:
vmin=kwds['CBFixedLimitLow']
vmax=kwds['CBFixedLimitHigh']
pcN=ax.scatter(
pN_mid[kwds['pXCor']],pN_mid[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_mid[kwds['pAttribute']]
# Farbskala
,cmap=kwds['pMCatMidColorMap']
# Normierung Farbe
,vmin=vmin
,vmax=vmax
# Farbwert
,c=pN_mid[kwds['pMeasure']]
,alpha=kwds['pMCatMidAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatMidClip']
)
logger.debug("{:s}Anzahl mit Farbskala gezeichneter Symbole={:d}".format(logStr,pN_mid_Anz))
pcN_bot=ax.scatter(
pN_bot[kwds['pXCor']],pN_bot[kwds['pYCor']]
,s=kwds['pSizeFactor']*pN_bot[kwds['pAttribute']]
,color=kwds['pMCatBotColor']
,alpha=kwds['pMCatBotAlpha']
,edgecolors='face'
,clip_on=kwds['pMCatBotClip'])
logger.debug("{:s}Anzahl mit fester Farbe Bot gezeichneter Symbole={:d}".format(logStr,pN_bot_Anz))
else:
pN_Anz,col=pDf.shape
if not kwds['CBFixedLimits']:
vmin=pDf[kwds['pMeasure']].min()
vmax=pDf[kwds['pMeasure']].max()
else:
vmin=kwds['CBFixedLimitLow']
vmax=kwds['CBFixedLimitHigh']
pcN=ax.scatter(
pDf[kwds['pXCor']],pDf[kwds['pYCor']]
,s=kwds['pSizeFactor']*pDf[kwds['pAttribute']]
# Farbskala
,cmap=kwds['pMeasureColorMap']
# Normierung Farbe
,vmin=vmin
,vmax=vmax
# Farbwert
,c=pDf[kwds['pMeasure']]
,alpha=kwds['pMeasureAlpha']
,edgecolors='face'
,clip_on=kwds['pMeasureClip']
)
logger.debug("{:s}Anzahl mit Farbskala gezeichneter Symbole={:d}".format(logStr,pN_Anz))
logger.debug("{:s}Farbskala vmin={:10.3f} Farbskala vmax={:10.3f}".format(logStr,vmin,vmax))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return (pcN, vmin, vmax)
def pltNetPipes(pDf,**kwds):
"""
Plots Lines with Marker on gca().
Args:
pDf: dataFrame
PIPE-Line:
* pAttribute: column in pDf (default: 'Attribute')
* pAttributeLs (default: '-')
* pAttributeSizeFactor: plot linewidth in pts = pAttributeSizeFactor (default: 1.0) * Attribute
* pAttributeSizeMin (default: None): if set: use pAttributeSizeMin-Value as Attribute for LineSize if Attribute < pAttributeSizeMin
* pAttributeColorMap (default: plt.cm.binary)
* pAttributeColorMapUsageStart (default: 1./3; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
PIPE-Marker:
* pMeasure: column in pDf (default: 'Measure')
* pMeasureMarker (default: '.')
* pMeasureSizeFactor: plot markersize in pts = pMeasureSizeFactor (default: 1.0) * Measure
* pMeasureSizeMin (default: None): if set: use pMeasureSizeMin-Value as Measure for MarkerSize if Measure < pMeasureSizeMin
* pMeasureColorMap (default: plt.cm.cool)
* pMeasureColorMapUsageStart (default: 0.; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
PIPE:
* pWAYPXCors: column in pDf (default: 'pWAYPXCors')
* pWAYPYCors: column in pDf (default: 'pWAYPYCors')
* pClip (default: False)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# PIPE-Line
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pAttributeSizeFactor' not in keys:
kwds['pAttributeSizeFactor']=1.
if 'pAttributeSizeMin' not in keys:
kwds['pAttributeSizeMin']=None
if 'pAttributeLs' not in keys:
kwds['pAttributeLs']='-'
if 'pAttributeColorMap' not in keys:
kwds['pAttributeColorMap']=plt.cm.binary
if 'pAttributeColorMapUsageStart' not in keys:
kwds['pAttributeColorMapUsageStart']=1./3.
# PIPE-Marker
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureSizeFactor' not in keys:
kwds['pMeasureSizeFactor']=1.
if 'pMeasureSizeMin' not in keys:
kwds['pMeasureSizeMin']=None
if 'pMeasureMarker' not in keys:
kwds['pMeasureMarker']='.'
if 'pMeasureColorMap' not in keys:
kwds['pMeasureColorMap']=plt.cm.cool
if 'pMeasureColorMapUsageStart' not in keys:
kwds['pMeasureColorMapUsageStart']=0.
# PIPE
if 'pWAYPXCors' not in keys:
kwds['pWAYPXCors']='pWAYPXCors'
if 'pWAYPYCors' not in keys:
kwds['pWAYPYCors']='pWAYPYCors'
if 'pClip' not in keys:
kwds['pClip']=False
except:
pass
try:
# Line
minLine=pDf[kwds['pAttribute']].min()
maxLine=pDf[kwds['pAttribute']].max()
logger.debug("{:s}minLine (Attribute): {:6.2f}".format(logStr,minLine))
logger.debug("{:s}maxLine (Attribute): {:6.2f}".format(logStr,maxLine))
normLine=colors.Normalize(minLine,maxLine)
usageLineValue=minLine+kwds['pAttributeColorMapUsageStart']*(maxLine-minLine)
usageLineColor=kwds['pAttributeColorMap'](normLine(usageLineValue))
# Marker
minMarker=pDf[kwds['pMeasure']].min()
maxMarker=pDf[kwds['pMeasure']].max()
logger.debug("{:s}minMarker (Measure): {:6.2f}".format(logStr,minMarker))
logger.debug("{:s}maxMarker (Measure): {:6.2f}".format(logStr,maxMarker))
normMarker=colors.Normalize(minMarker,maxMarker)
usageMarkerValue=minMarker+kwds['pMeasureColorMapUsageStart']*(maxMarker-minMarker)
usageMarkerColor=kwds['pMeasureColorMap'](normMarker(usageMarkerValue))
ax=plt.gca()
for xs,ys,vLine,vMarker in zip(pDf[kwds['pWAYPXCors']],pDf[kwds['pWAYPYCors']],pDf[kwds['pAttribute']],pDf[kwds['pMeasure']]):
if vLine >= usageLineValue:
colorLine=kwds['pAttributeColorMap'](normLine(vLine))
else:
colorLine=usageLineColor
if vMarker >= usageMarkerValue:
colorMarker=kwds['pMeasureColorMap'](normMarker(vMarker))
else:
colorMarker=usageMarkerColor
linewidth=kwds['pAttributeSizeFactor']*vLine
if kwds['pAttributeSizeMin'] != None:
if vLine < kwds['pAttributeSizeMin']:
linewidth=kwds['pAttributeSizeFactor']*kwds['pAttributeSizeMin']
mSize=kwds['pMeasureSizeFactor']*vMarker
if kwds['pMeasureSizeMin'] != None:
if vMarker < kwds['pMeasureSizeMin']:
mSize=kwds['pMeasureSizeFactor']*kwds['pMeasureSizeMin']
pcLines=ax.plot(xs,ys
,color=colorLine
,linewidth=linewidth
,ls=kwds['pAttributeLs']
,marker=kwds['pMeasureMarker']
,mfc=colorMarker
,mec=colorMarker
,mfcalt=colorMarker
,mew=0
,ms=mSize #kwds['pMeasureSizeFactor']*vMarker
,markevery=[0,len(xs)-1]
,aa=True
,clip_on=kwds['pClip']
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetLegendColorbar(pc,pDf,**kwds):
"""
Erzeugt eine Axes cax für den Legendenbereich aus ax (=gca()) und zeichnet auf cax die Farblegende (die Farbskala mit allen Eigenschaften).
Args:
pc: (eingefaerbte) PathCollection (aus pltNetNodes); wird für die Erzeugung der Farbskala zwingend benoetigt
pDf: dataFrame (default: None)
Measure:
* pMeasure: colName in pDf (default: 'Measure')
* pMeasureInPerc: Measure wird interpretiert in Prozent [0-1] (default: True)
* pMeasure3Classes (default: False d.h. Measure wird nicht in 3 Klassen dargestellt)
CBFixedLimits (Ticks):
* CBFixedLimits (default: False d.h. Farbskala nach vorh. min./max. Wert)
* CBFixedLimitLow (default: .10)
* CBFixedLimitHigh (default: .95)
Label:
* pMeasureUNIT (default: '[]')
* pMeasureTYPE (default: '')
CB
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
* CBLabelPad (default: -50)
* CBTicklabelsHPad (default: 0.)
* CBAspect: ratio of long to short dimension (default: 10.)
* CBShrink: fraction by which to shrink the colorbar (default: 0.3)
* CBAnchorHorizontal: horizontaler Fußpunkt der colorbar in Plot-% (default: 0.)
* CBAnchorVertical: vertikaler Fußpunkt der colorbar in Plot-% (default: 0.2)
Return:
cax
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Measure
if 'pMeasure' not in keys:
kwds['pMeasure']='Measure'
if 'pMeasureInPerc' not in keys:
kwds['pMeasureInPerc']=True
if 'pMeasure3Classes' not in keys:
kwds['pMeasure3Classes']=False
# Label
if 'pMeasureUNIT' not in keys:
kwds['pMeasureUNIT']='[]'
if 'pMeasureTYPE' not in keys:
kwds['pMeasureTYPE']=''
# CBFixedLimits
if 'CBFixedLimits' not in keys:
kwds['CBFixedLimits']=False
if 'CBFixedLimitLow' not in keys:
kwds['CBFixedLimitLow']=.10
if 'CBFixedLimitHigh' not in keys:
kwds['CBFixedLimitHigh']=.95
# CB
if 'CBFraction' not in keys:
kwds['CBFraction']=0.05
if 'CBHpad' not in keys:
kwds['CBHpad']=0.0275
if 'CBLabelPad' not in keys:
kwds['CBLabelPad']=-50
if 'CBTicklabelsHPad' not in keys:
kwds['CBTicklabelsHPad']=0
if 'CBAspect' not in keys:
kwds['CBAspect']=10.
if 'CBShrink' not in keys:
kwds['CBShrink']=0.3
if 'CBAnchorHorizontal' not in keys:
kwds['CBAnchorHorizontal']=0.
if 'CBAnchorVertical' not in keys:
kwds['CBAnchorVertical']=0.2
except:
pass
try:
ax=plt.gca()
fig=plt.gcf()
# cax
cax=None
cax,kw=make_axes(ax
,location='right'
,fraction=kwds['CBFraction'] # fraction of original axes to use for colorbar
,pad=kwds['CBHpad'] # fraction of original axes between colorbar and new image axes
,anchor=(kwds['CBAnchorHorizontal'],kwds['CBAnchorVertical']) # the anchor point of the colorbar axes
,aspect=kwds['CBAspect'] # ratio of long to short dimension
,shrink=kwds['CBShrink'] # fraction by which to shrink the colorbar
)
# colorbar
colorBar=fig.colorbar(pc
,cax=cax
,**kw
)
# tick Values
if kwds['pMeasure3Classes']: # FixedLimits should be True and FixedLimitHigh/Low should be set ...
minCBtickValue=kwds['CBFixedLimitLow']
maxCBtickValue=kwds['CBFixedLimitHigh']
else:
if kwds['CBFixedLimits'] and isinstance(kwds['CBFixedLimitHigh'],float) and isinstance(kwds['CBFixedLimitLow'],float):
minCBtickValue=kwds['CBFixedLimitLow']
maxCBtickValue=kwds['CBFixedLimitHigh']
else:
minCBtickValue=pDf[kwds['pMeasure']].min()
maxCBtickValue=pDf[kwds['pMeasure']].max()
colorBar.set_ticks([minCBtickValue,minCBtickValue+.5*(maxCBtickValue-minCBtickValue),maxCBtickValue])
# tick Labels
if kwds['pMeasureInPerc']:
if kwds['pMeasure3Classes']:
minCBtickLabel=">{:3.0f}%".format(minCBtickValue*100)
maxCBtickLabel="<{:3.0f}%".format(maxCBtickValue*100)
else:
minCBtickLabel="{:6.2f}%".format(minCBtickValue*100)
maxCBtickLabel="{:6.2f}%".format(maxCBtickValue*100)
else:
if kwds['pMeasure3Classes']:
minCBtickLabel=">{:6.2f}".format(minCBtickValue)
maxCBtickLabel="<{:6.2f}".format(maxCBtickValue)
else:
minCBtickLabel="{:6.2f}".format(minCBtickValue)
maxCBtickLabel="{:6.2f}".format(maxCBtickValue)
logger.debug("{:s}minCBtickLabel={:s} maxCBtickLabel={:s}".format(logStr,minCBtickLabel,maxCBtickLabel))
colorBar.set_ticklabels([minCBtickLabel,'',maxCBtickLabel])
colorBar.ax.yaxis.set_tick_params(pad=kwds['CBTicklabelsHPad'])
# Label
if kwds['pMeasureInPerc']:
CBLabelText="{:s} in [%]".format(kwds['pMeasureTYPE'])
else:
CBLabelText="{:s} in {:s}".format(kwds['pMeasureTYPE'],kwds['pMeasureUNIT'])
colorBar.set_label(CBLabelText,labelpad=kwds['CBLabelPad'])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return cax
def pltNetLegendColorbar3Classes(pDf,**kwds):
"""
Zeichnet auf gca() die ergaenzenden Legendeninformationen bei 3 Klassen.
* scatters the Top-Symbol
* scatters the Bot-Symbol
* the "Mid-Symbol" is the (already existing) colorbar with (already existing) ticks and ticklabels
Args:
pDf: dataFrame
Category:
* pMCategory: colName in pDf (default: 'MCategory')
* pMCatTopText
* pMCatMidText
* pMCatBotText
CBLegend (3Classes) - Parameterization of the representative Symbols
* CBLe3cTopVPad (default: 1+1*1/4)
* CBLe3cMidVPad (default: .5)
* CBLe3cBotVPad (default: 0-1*1/4)
* "1" is the height of the Colorbar
* the VPads (the vertical Sy-Positions) are defined in cax.transAxes Coordinates
* cax is the Colorbar Axes
* CBLe3cSySize=10**2 (Sy-Area in pts^2)
* CBLe3cSyType='o'
Color:
* pMCatBotColor='violet'
* pMCatTopColor='palegreen'
Returns:
(bbTop, bbMid, bbBot): the boundingBoxes of the 3Classes-Symbols
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# Cats
if 'pMCategory' not in keys:
kwds['pMCategory']='MCategory'
if 'pMCatTopText' not in keys:
kwds['pMCatTopText']='Top'
if 'pMCatMidText' not in keys:
kwds['pMCatMidText']='Middle'
if 'pMCatBotText' not in keys:
kwds['pMCatBotText']='Bottom'
# CBLegend3Cats
if 'CBLe3cTopVPad' not in keys:
kwds['CBLe3cTopVPad']=1+1*1/4
if 'CBLe3cMidVPad' not in keys:
kwds['CBLe3cMidVPad']=.5
if 'CBLe3cBotVPad' not in keys:
kwds['CBLe3cBotVPad']=0-1*1/4
if 'CBLe3cSySize' not in keys:
kwds['CBLe3cSySize']=10**2
if 'CBLe3cSyType' not in keys:
kwds['CBLe3cSyType']='o'
# CatAttribs
if 'pMCatTopColor' not in keys:
kwds['pMCatTopColor']='palegreen'
if 'pMCatBotColor' not in keys:
kwds['pMCatBotColor']='violet'
except:
pass
try:
cax=plt.gca()
pDf_top=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatTopTxt'])]
pDf_mid=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatMidTxt'])]
pDf_bot=pDf[(pDf[kwds['pMCategory']]==kwds['pMCatBotTxt'])]
pDf_top_Anz,col=pDf_top.shape
pDf_mid_Anz,col=pDf_mid.shape
pDf_bot_Anz,col=pDf_bot.shape
logger.debug("{:s} pDf_bot_Anz={:d} pDf_mid_Anz={:d} pDf_top_Anz={:d}".format(logStr,pDf_bot_Anz,pDf_mid_Anz,pDf_top_Anz))
logger.debug("{:s} CBLe3cBotVPad={:f} CBLe3cMidVPad={:f} CBLe3cTopVPad={:f}".format(logStr,kwds['CBLe3cBotVPad'],kwds['CBLe3cMidVPad'],kwds['CBLe3cTopVPad']))
bbBot=None
bbMid=None
bbTop=None
if pDf_bot_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cBotVPad']
,s=kwds['CBLe3cSySize']
,c=kwds['pMCatBotColor']
,alpha=0.9
,edgecolors='face'
,clip_on=False
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
# Text dazu
o=po.findobj(match=None)
p=o[0]
bbBot=p.get_datalim(cax.transAxes)
logger.debug("{:s} bbBot={!s:s}".format(logStr,bbBot))
# a=plt.annotate(pMCatBotText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cBotVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatBotColor
# )
# # weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_bot_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cBotVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatBotColor
# )
if pDf_top_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cTopVPad']
,s=kwds['CBLe3cSySize']
,c=kwds['pMCatTopColor']
,alpha=0.9
,edgecolors='face'
,clip_on=False
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
o=po.findobj(match=None)
p=o[0]
bbTop=p.get_datalim(cax.transAxes)
# #Text dazu
# o=po.findobj(match=None)
# p=o[0]
# bb=p.get_datalim(cax.transAxes)
# bbTop=bb
# a=plt.annotate(pMCatTopText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cTopVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatTopColor
# )
# #weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_top_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad++CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cTopVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatTopColor
# )
if pDf_mid_Anz >= 0:
po=cax.scatter( 0.,kwds['CBLe3cMidVPad']
,s=kwds['CBLe3cSySize']
,c='lightgrey'
,alpha=0.9
,edgecolors='face'
,clip_on=False
,visible=False # es erden nur die Koordinaten benoetigt
,marker=pltHlpAlignMarker(kwds['CBLe3cSyType'], halign='left')
)
o=po.findobj(match=None)
p=o[0]
bbMid=p.get_datalim(cax.transAxes)
# #Text dazu
# o=po.findobj(match=None)
# p=o[0]
# bb=p.get_datalim(cax.transAxes)
# a=plt.annotate(pMCatMidText
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0),CBLe3cMidVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatMidColor
# ,visible=False
# )
# #weiterer Text dazu
# a=plt.annotate("Anz HA: {:6d}".format(pDf_mid_Anz)
# ,xy=(CBHpad+CBLe3cHpadSymbol+CBLe3cHpad+CBLe3cTextSpaceFactor*(bb.x1-bb.x0)+.5,CBLe3cMidVpad)
# ,xycoords=cax.transAxes
# ,rotation='vertical' #90
# ,va='center'
# ,ha='center'
# ,color=pMCatMidColor
# ,visible=False
# )
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return (bbTop, bbMid, bbBot)
def pltNetLegendTitleblock(text='',**kwds):
"""
Zeichnet auf gca() ergaenzende Schriftfeldinformationen.
Args:
text
Parametrierung:
* anchorVertical
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
if 'anchorVertical' not in keys:
kwds['anchorVertical']=1.
except:
pass
cax=plt.gca()
try:
a=plt.text( 0.
,kwds['anchorVertical']
,text
,transform=cax.transAxes
,family='monospace'
,size='smaller'
,rotation='vertical'
,va='bottom'
,ha='left'
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetTextblock(text='',**kwds):
"""
Zeichnet einen Textblock auf gca().
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
if 'x' not in keys:
kwds['x']=0.
if 'y' not in keys:
kwds['y']=1.
except:
pass
ax=plt.gca()
try:
a=plt.text( kwds['x']
,kwds['y']
,text
,transform=ax.transAxes
,family='monospace'
,size='smaller'
,rotation='horizontal'
,va='bottom'
,ha='left'
)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
class Rm():
@classmethod
def pltNetPipes(cls,pDf,**kwds):
"""
Plots colored PIPES.
Args:
DATA:
pDf: dataFrame
* query: query to filter pDf; default: None; Exp.: ="CONT_ID == '1001'"
* fmask: function to filter pDf; default: None; Exp.: =lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
* query and fmask are used both (query 1st) if not None
* sort_values_by: list of colNames defining the plot order; default: None (d.h. die Plotreihenfolge - und damit die z-Order - ist dann die pDF-Reihenfolge)
* sort_values_ascending; default: False (d.h. kleine zuletzt und damit (wenn pAttrLineSize = pAttribute/pAttributeFunc) auch dünne über dicke); nur relevant bei sort_values_by
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
Colorlegend:
* CBFraction in % (default: 5)
* CBHpad (default: 0.05)
* CBLabel (default: pAttribute/pAttributeFunc)
* CBBinTicks (default: None, d.h. keine Vorgabe von Außen); Vorgabe N: N yTicks; bei diskreten CM gemeint im Sinne von N-1 diskreten Kategorien
* CBBinDiscrete (default: False, d.h. eine gegebene (kontinuierliche) CM wird nicht in eine diskrete gewandelt)
* wenn CBBinDiscrete, dann gilt N aus CBBinTicks fuer die Ticks (bzw. Kategorien); ist CBBinTicks undef. gilt 4 (also 3 Kategorien)
* bei den vorgenannten Kategorien handelt es sich um eine gleichmäßige Unterteilung des definierten Wertebereiches
* CBBinBounds (default: None): wenn die CM eine diskrete ist, dann wird eine vorgegebene BoundaryNorm angewandt; CBBinTicks hat dann keine Bedeutung
* CBTicks: individuell vorgegebene Ticks; wird am Schluss prozessiert, d.h. vorh. (ggf. auch durch CBBinTicks bzw. <=/>= u. v=/^= bereits manipulierte) ...
* ... Ticks werden überschrieben; kann ohne CBTickLabels verwendet werden
* CBTickLabels: individuell vorgegebene Ticklabels; wird danach prozessiert; Länge muss zu dann existierenden Ticks passen; kann auch ohne CBTicks verwendet werden
PIPE-Attribute:
* pAttribute: column in pDf (default: 'Attribute')
* pAttributeFunc:
* function to be used to construct a new col to be plotted
* if pAttributeFunc is not None pAttribute is not used: pAttribute is set to 'pAttributeFunc'
* the new constructed col is named 'pAttributeFunc'; this name can be used in sort_values_by
PIPE-Color:
* pAttributeColorMap (default: plt.cm.cool)
* Farbskalamapping:
* ------------------
* pAttributeColorMapMin (default: pAttribute.min()); ordnet der kleinsten Farbe einen Wert zu; CM: wenn angegeben _und unterschritten: <=
* pAttributeColorMapMax (default: pAttribute.max()); ordnet der größten Farbe einen Wert zu; CM: wenn angegeben _und überschritten: >=
* Standard: Farbskala wird voll ausgenutzt; d.h. der (ggf. mit Min/Max) eingegrenzte Wertebereich wird den Randfarben der Skala zugeordnet
* wenn ein anderer, kleinerer, Wertebereich mit derselben Farbskala geplottet wird, dann sind die Farben in den Plots nicht vergleichbar ...
* ... wenn eine Farbvergleichbarkeit erzielt werden soll, darf dieselbe Farbskala nicht voll ausgenutzt werden
* pAttributeColorMapUsageStart (default: 0.; Wertebereich: [0,1[)
* hier: die Farbskala wird unten nur ab UsageStart genutzt ...
* ... d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart; CM: v=
* pAttributeColorMapUsageEnd (default: 1.; Wertebereich: ]0,1])
* hier: die Farbskala wird oben nur bis UsageEnd genutzt ...
* ... d.h. Werte die eine "größere" Farbe hätten, bekommen die Farbe von UsageEnd; CM: ^=
* etwas anderes ist es, wenn man eine Farbskala an den Rändern nicht voll ausnutzen möchte weil einem die Farben dort nicht gefallen ...
PIPE-Color 2nd:
* um "unwichtige" Bereiche zu "dimmen": Beispiele:
* räumlich: nicht-Schnitt Bereiche; Bestand (2nd) vs. Ausbau; Zonen unwichtig (2nd) vs. Zonen wichtig; Ok (2nd) von NOK
* es werden erst die 2nd-Color Pipes gezeichnet; die (1st-)Color Pipes werden danach gezeichnet, liegen also "über" den "unwichtigen"
* es wird dieselbe Spalte pAttribute/pAttributeFunc für die 2. Farbskala verwendet
* es wird derselbe Linienstil (pAttributeLs) für die 2. Farbskala verwendet
* es wird dieselbe Dicke pAttrLineSize (pAttribute/pAttributeFunc) für die 2. Farbskala verwendet
* nur die Farbskala ist anders sowie ggf. das Farbskalamapping
* pAttributeColorMapFmask: function to filter pDf to decide to plot with colorMap; default: =lambda row: True
* pAttributeColorMap2ndFmask: function to filter pDf to decide to plot with colorMap2nd; default: =lambda row: False
* mit den beiden Funktionsmasken kann eine Filterung zusätzlich zu query und fmask realisiert werden
* die Funktionsmasken sollten schnittmengenfrei sein; wenn nicht: 2nd überschreibt
* pAttributeColorMap2nd (default: plt.cm.binary)
* Farbskalamapping:
* ------------------
* pAttributeColorMap2ndMin (default: pAttributeColorMapMin)
* pAttributeColorMap2ndMax (default: pAttributeColorMapMax)
* die Farbskala wird an den Rändern nicht voll ausgenutzt wenn die Farben dort ggf. nicht gefallen:
* pAttributeColorMap2ndUsageStart (default: 0.; Wertebereich: [0,1[)
* pAttributeColorMap2ndUsageEnd (default: 1.; Wertebereich: ]0,1])
PIPE-Linestyle:
* pAttributeLs (default: '-')
* same for all colors if mutliple colors are specified
PIPE-Linesize:
* pAttrLineSize: column in pDf; if not specified: pAttribute/pAttributeFunc
* pAttrLineSizeFactor (>0): plot linewidth in pts = pAttrLineSizeFactor (default: =...) * fabs(pAttrLineSize)
* ...: 1./(pDf[pAttrLineSize].std()*2.)
* same for all colors if mutliple colors are specified
PIPE-Geometry:
* pWAYPXCors: column in pDf (default: 'pWAYPXCors')
* pWAYPYCors: column in pDf (default: 'pWAYPYCors')
* pClip (default: True)
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import math
>>> # ---
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> xm=xms['DHNetwork']
>>> #mx=mxs['DHNetwork']
>>> # ---
>>> plt.close()
>>> size_DINA3quer=(16.5, 11.7)
>>> dpiSize=72
>>> fig=plt.figure(figsize=size_DINA3quer,dpi=dpiSize)
>>> gs = gridspec.GridSpec(4, 2)
>>> # ---
>>> vROHR=xm.dataFrames['vROHR']
>>> # ---
>>> # Attribute (with neg. Values)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttribute='ROHR~*~*~*~QMAV'
... )
>>> txt=axNfd.set_title('RL QMAV')
>>> # ---
>>> # Function as Attribute
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[1])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs')
>>> # --------------------------
>>> # ---
>>> # Mi/MaD zS auf
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[2])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1600.
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... )
>>> txt=axNfd.set_title('Mi/MaD zS auf')
>>> # --------------------------
>>> # ---
>>> # ind. Kategorien
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[3])
>>> cm = matplotlib.colors.ListedColormap(['cyan', 'royalblue', 'magenta', 'coral'])
>>> cm.set_over('0.25')
>>> cm.set_under('0.75')
>>> bounds = [10.,100.,200.,800.,1600.]
>>> norm = matplotlib.colors.BoundaryNorm(bounds, cm.N)
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMap=cm
... ,CBBinBounds=bounds
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... )
>>> txt=axNfd.set_title('ind. Kategorien')
>>> # --------------------------
>>> # ---
>>> # Unwichtiges ausblenden über 2nd Color
>>> # --------------------------
>>> vAGSN=xm.dataFrames['vAGSN']
>>> hpRL=vAGSN[(vAGSN['LFDNR']=='1') & (vAGSN['Layer']==2)]
>>> pDf=pd.merge(vROHR
... ,hpRL[hpRL.IptIdx=='S'] # wg. Innenpunkte
... ,how='left'
... ,left_on='pk'
... ,right_on='OBJID'
... ,suffixes=('','_AGSN')).filter(items=vROHR.columns.tolist()+['OBJID'])
>>> axNfd = fig.add_subplot(gs[4])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinTicks=7
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... )
>>> txt=axNfd.set_title('Unwichtiges ausblenden über 2nd Color')
>>> # --------------------------
>>> # ---
>>> # Farbskalen an den Rändern abschneiden
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[5])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... ,pAttributeColorMapUsageStart=3/15.
... ,pAttributeColorMapUsageEnd=12/15.
... )
>>> txt=axNfd.set_title('Farbskalen an den Rändern abschneiden')
>>> # --------------------------
>>> # ---
>>> # Farbskala diskretisieren
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[6])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinDiscrete=True
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... ,CBTicks=[250,750,1250]
... ,CBTickLabels=['klein','mittel','groß']
... )
>>> txt=axNfd.set_title('Farbskala diskretisieren')
>>> # --------------------------
>>> # ---
>>> # Unterkategorien
>>> # --------------------------
>>> baseColorsDef="tab10"
>>> catagoryColors=[9,6,1]
>>> nOfSubCatsReq=4
>>> cm=Rm.pltMakeCategoricalCmap(baseColorsDef=baseColorsDef,catagoryColors=catagoryColors,nOfSubCatsReq=nOfSubCatsReq,reversedSubCatOrder=True)
>>> axNfd = fig.add_subplot(gs[7])
>>> Rm.Rm.pltNetPipes(pDf
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... ,pAttributeColorMap=cm
... ,pAttributeColorMapMin=0.
... ,pAttributeColorMapMax=1500.
... ,CBBinTicks=16
... ,CBLabel='Q [t/h]'
... ,sort_values_by=['pAttributeFunc']
... ,sort_values_ascending=True
... ,pAttributeColorMapFmask=lambda row: True if not pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndFmask=lambda row: True if pd.isnull(row.OBJID) else False
... ,pAttributeColorMap2ndUsageStart=.5/5. # nicht zu weiß
... ,pAttributeColorMap2ndUsageEnd=2.5/5. # nicht zu schwarz
... )
>>> txt=axNfd.set_title('Unterkategorien')
>>> # --------------------------
>>> gs.tight_layout(fig)
>>> plt.show()
>>> plt.savefig('pltNetPipes.pdf',format='pdf',dpi=dpiSize*2)
>>> # -----
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=dpiSize)
>>> gs = gridspec.GridSpec(1, 1)
>>> # ---
>>> #
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' and row.LTGR_NAME=='NWDUF2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs (Ausschnitt)')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> # -----
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3,dpi=dpiSize)
>>> gs = gridspec.GridSpec(1, 1)
>>> # ---
>>> #
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> Rm.Rm.pltNetPipes(vROHR
... ,query="CONT_ID == '1001'"
... ,fmask=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' and row.LTGR_NAME=='NWDUF2' else False
... ,pAx=axNfd
... ,pAttributeFunc=lambda row: math.fabs(row['ROHR~*~*~*~QMAV'])
... )
>>> txt=axNfd.set_title('RL QMAV Abs (Ausschnitt)')
>>> gs.tight_layout(fig)
>>> plt.show()
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
# CB
if 'CBFraction' not in keys:
kwds['CBFraction']=5 # in %
if 'CBHpad' not in keys:
kwds['CBHpad']=0.05
if 'CBLabel' not in keys:
kwds['CBLabel']=None
# CB / Farbskala
if 'CBBinTicks' not in keys:
kwds['CBBinTicks']=None
if 'CBBinDiscrete' not in keys:
kwds['CBBinDiscrete']=False
if kwds['CBBinDiscrete']:
if kwds['CBBinTicks']==None:
kwds['CBBinTicks']=4 # (d.h. 3 Kategorien)
if 'CBBinBounds' not in keys:
kwds['CBBinBounds']=None
# customized yTicks
if 'CBTicks' not in keys:
kwds['CBTicks'] = None
if 'CBTickLabels' not in keys:
kwds['CBTickLabels'] = None
# DATA
if 'query' not in keys:
kwds['query']=None # Exp.: = "KVR_i=='2' & KVR_k=='2'"
if 'fmask' not in keys:
kwds['fmask']=None # Exp.: =lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
if 'sort_values_by' not in keys:
kwds['sort_values_by']=None
if 'sort_values_ascending' not in keys:
kwds['sort_values_ascending']=False
# PIPE-Attribute
if 'pAttribute' not in keys:
kwds['pAttribute']='Attribute'
if 'pAttributeFunc' not in keys:
logger.debug("{:s}pAttribute: not specified?! 'Attribute' will be used. pAttributeFunc is also not specified?!".format(logStr))
if 'pAttributeFunc' not in keys:
kwds['pAttributeFunc']=None
# PIPE-Color
if 'pAttributeColorMap' not in keys:
kwds['pAttributeColorMap']=plt.cm.cool
if 'pAttributeColorMapMin' not in keys:
kwds['pAttributeColorMapMin']=None
if 'pAttributeColorMapMax' not in keys:
kwds['pAttributeColorMapMax']=None
# Trunc Cmap
if 'pAttributeColorMapUsageStart' not in keys and 'pAttributeColorMapUsageEnd' not in keys:
kwds['pAttributeColorMapTrunc']=False
else:
kwds['pAttributeColorMapTrunc']=True
if 'pAttributeColorMapUsageStart' not in keys:
kwds['pAttributeColorMapUsageStart']=0.
if 'pAttributeColorMapUsageEnd' not in keys:
kwds['pAttributeColorMapUsageEnd']=1.
# PIPE-Color 1st/2nd - FMasks
if 'pAttributeColorMapFmask' not in keys:
kwds['pAttributeColorMapFmask']=lambda row: True
else:
logger.debug("{:s}Color 1st-PIPEs are filtered with fmask: {:s} ...".format(logStr,str(kwds['pAttributeColorMapFmask'])))
if 'pAttributeColorMap2ndFmask' not in keys:
kwds['pAttributeColorMap2ndFmask']=lambda row: False
else:
logger.debug("{:s}Color 2nd-PIPEs are filtered with fmask: {:s} ...".format(logStr,str(kwds['pAttributeColorMap2ndFmask'])))
# PIPE-Color 2nd
if 'pAttributeColorMap2nd' not in keys:
kwds['pAttributeColorMap2nd']=plt.cm.binary
if 'pAttributeColorMap2ndMin' not in keys:
kwds['pAttributeColorMap2ndMin']=kwds['pAttributeColorMapMin']
if 'pAttributeColorMap2ndMax' not in keys:
kwds['pAttributeColorMap2ndMax']=kwds['pAttributeColorMapMax']
# Trunc Cmap
if 'pAttributeColorMap2ndUsageStart' not in keys and 'pAttributeColorMap2ndUsageEnd' not in keys:
kwds['pAttributeColorMap2ndTrunc']=False
else:
kwds['pAttributeColorMap2ndTrunc']=True
if 'pAttributeColorMap2ndUsageStart' not in keys:
kwds['pAttributeColorMap2ndUsageStart']=0.
if 'pAttributeColorMap2ndUsageEnd' not in keys:
kwds['pAttributeColorMap2ndUsageEnd']=1.
# PIPE-Linestyle
if 'pAttributeLs' not in keys:
kwds['pAttributeLs']='-'
# PIPE-Linesize
if 'pAttrLineSize' not in keys:
kwds['pAttrLineSize']=None
if 'pAttrLineSizeFactor' not in keys:
kwds['pAttrLineSizeFactor']=None
# PIPE-Geometry
if 'pWAYPXCors' not in keys:
kwds['pWAYPXCors']='pWAYPXCors'
if 'pWAYPYCors' not in keys:
kwds['pWAYPYCors']='pWAYPYCors'
if 'pClip' not in keys:
kwds['pClip']=True
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
try:
# ggf. filtern
if kwds['query'] != None:
logger.debug("{:s}pDf is filtered with query: {:s} ...".format(logStr,str(kwds['query'])))
pDf=pd.DataFrame(pDf.query(kwds['query']).values,columns=pDf.columns)
if kwds['fmask'] != None:
logger.debug("{:s}pDf is filtered with fmask: {:s} ...".format(logStr,str(kwds['fmask'])))
pDf=pd.DataFrame(pDf[pDf.apply(kwds['fmask'],axis=1)].values,columns=pDf.columns)
# ggf. zu plottende Spalte(n) neu ausrechnen bzw. Plotreihenfolge ändern: Kopie erstellen
if kwds['pAttributeFunc'] != None or kwds['sort_values_by'] != None:
# Kopie!
logger.debug("{:s}pDf is copied ...".format(logStr))
pDf=pDf.copy(deep=True)
# ggf. zu plottende Spalte(n) neu ausrechnen
if kwds['pAttributeFunc'] != None:
logger.debug("{:s}pAttribute: col '{:s}' is not used: ...".format(logStr,kwds['pAttribute']))
logger.debug("{:s}... pAttributeFunc {:s} is used to calculate a new col named 'pAttributeFunc'".format(logStr,str(kwds['pAttributeFunc'])))
pDf['pAttributeFunc']=pDf.apply(kwds['pAttributeFunc'],axis=1)
kwds['pAttribute']='pAttributeFunc'
logger.debug("{:s}col '{:s}' is used as Attribute.".format(logStr,kwds['pAttribute']))
# Label für CB
if kwds['CBLabel'] == None:
kwds['CBLabel']=kwds['pAttribute']
# Spalte für Liniendicke ermitteln
if kwds['pAttrLineSize'] == None:
kwds['pAttrLineSize']=kwds['pAttribute']
logger.debug("{:s}col '{:s}' is used as LineSize.".format(logStr,kwds['pAttrLineSize']))
# Liniendicke skalieren
if kwds['pAttrLineSizeFactor']==None:
kwds['pAttrLineSizeFactor']=1./(pDf[kwds['pAttrLineSize']].std()*2.)
logger.debug("{:s}Faktor Liniendicke: {:12.6f} - eine Linie mit Attributwert {:6.2f} wird in {:6.2f} Pts Dicke geplottet.".format(logStr
,kwds['pAttrLineSizeFactor']
,pDf[kwds['pAttrLineSize']].std()*2.
,kwds['pAttrLineSizeFactor']*pDf[kwds['pAttrLineSize']].std()*2.
))
logger.debug("{:s}min. Liniendicke: Attributwert {:9.2f} Pts: {:6.2f}.".format(logStr
,math.fabs(pDf[kwds['pAttrLineSize']].min())
,kwds['pAttrLineSizeFactor']*math.fabs(pDf[kwds['pAttrLineSize']].min()))
)
logger.debug("{:s}max. Liniendicke: Attributwert {:9.2f} Pts: {:6.2f}.".format(logStr
,math.fabs(pDf[kwds['pAttrLineSize']].max())
,kwds['pAttrLineSizeFactor']*math.fabs(pDf[kwds['pAttrLineSize']].max()))
)
# ggf. Plotreihenfolge ändern
if kwds['sort_values_by'] != None:
logger.debug("{:s}pDf is sorted (=Plotreihenfolge) by {:s} ascending={:s}.".format(logStr,str(kwds['sort_values_by']),str(kwds['sort_values_ascending'])))
pDf.sort_values(by=kwds['sort_values_by'],ascending=kwds['sort_values_ascending'],inplace=True)
# ----------------------------------------------------------------------------------------------------------------------------------------
# x,y-Achsen: Lims ermitteln und setzen (das Setzen beeinflusst Ticks und data_ratio; ohne dieses Setzen wären diese auf Standardwerten)
# ----------------------------------------------------------------------------------------------------------------------------------------
xMin=923456789
yMin=923456789
xMax=0
yMax=0
for xs,ys in zip(pDf[kwds['pWAYPXCors']],pDf[kwds['pWAYPYCors']]):
xMin=min(xMin,min(xs))
yMin=min(yMin,min(ys))
xMax=max(xMax,max(xs))
yMax=max(yMax,max(ys))
logger.debug("{:s}pWAYPXCors: {:s} Min: {:6.2f} Max: {:6.2f}".format(logStr,kwds['pWAYPXCors'],xMin,xMax))
logger.debug("{:s}pWAYPYCors: {:s} Min: {:6.2f} Max: {:6.2f}".format(logStr,kwds['pWAYPYCors'],yMin,yMax))
dx=xMax-xMin
dy=yMax-yMin
dxdy=dx/dy
dydx=1./dxdy
# i.d.R. "krumme" Grenzen (die Ticks werden von mpl i.d.R. trotzdem "glatt" ermittelt)
kwds['pAx'].set_xlim(xMin,xMax)
kwds['pAx'].set_ylim(yMin,yMax)
# ----------------------------------------------------------------------------------------------------------------------------------------
# x,y-Achsen: Ticks ermitteln aber NICHT verändern -----------------------------------------------------------------
# auch bei "krummen" Grenzen setzt matplotlib i.d.R. "glatte" Ticks
# ----------------------------------------------------------------------------------------------------------------------------------------
# Ticks ermitteln
xTicks=kwds['pAx'].get_xticks()
yTicks=kwds['pAx'].get_yticks()
dxTick = xTicks[1]-xTicks[0]
xTickSpan=xTicks[-1]-xTicks[0]
dyTick = yTicks[1]-yTicks[0]
yTickSpan=yTicks[-1]-yTicks[0]
logger.debug("{:s}xTicks : {:s} dx: {:6.2f}".format(logStr,str(xTicks),dxTick))
logger.debug("{:s}yTicks : {:s} dy: {:6.2f}".format(logStr,str(yTicks),dyTick))
# dTick gleich setzen (deaktiviert)
if dyTick == dxTick:
pass # nichts zu tun
elif dyTick > dxTick:
# dyTick zu dxTick (kleinere) setzen
dTickW=dxTick
# erf. Anzahl
numOfTicksErf=math.floor(dy/dTickW)+1
newTicks=[idx*dTickW+yTicks[0] for idx in range(numOfTicksErf)]
#kwds['pAx'].set_yticks(newTicks)
#yTicks=kwds['pAx'].get_yticks()
#dyTick = yTicks[1]-yTicks[0]
#logger.debug("{:s}yTicks NEU: {:s} dy: {:6.2f}".format(logStr,str(yTicks),dyTick))
else:
# dxTick zu dyTick (kleinere) setzen
dTickW=dyTick
# erf. Anzahl
numOfTicksErf=math.floor(dx/dTickW)+1
newTicks=[idx*dTickW+xTicks[0] for idx in range(numOfTicksErf)]
#kwds['pAx'].set_xticks(newTicks)
#xTicks=kwds['pAx'].get_xticks()
#dxTick = xTicks[1]-xTicks[0]
#logger.debug("{:s}xTicks NEU: {:s} dx: {:6.2f}".format(logStr,str(xTicks),dxTick))
# ----------------------------------------------------------------------------------------------------------------------------------------
# Grid und Aspect
# ----------------------------------------------------------------------------------------------------------------------------------------
kwds['pAx'].grid()
kwds['pAx'].set_aspect(aspect='equal') # zur Sicherheit; andere als verzerrungsfreie Darstellungen machen im Netz kaum Sinn
kwds['pAx'].set_adjustable('box')
kwds['pAx'].set_anchor('SW')
## x,y-Seitenverhältnisse ermitteln ---------------------------------------------------------------------------
## total figure size
#figW, figH = kwds['pAx'].get_figure().get_size_inches()
## Axis pos. on figure
#x0, y0, w, h = kwds['pAx'].get_position().bounds
## Ratio of display units
#disp_ratio = (figH * h) / (figW * w)
#disp_ratioA = (figH) / (figW )
#disp_ratioB = (h) / (w)
## Ratio of data units
#data_ratio=kwds['pAx'].get_data_ratio()
#logger.debug("{:s}figW: {:6.2f} figH: {:6.2f}".format(logStr,figW,figH))
#logger.debug("{:s}x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
#logger.debug("{:s}pWAYPCors: Y/X: {:6.2f}".format(logStr,dydx))
#logger.debug("{:s}Ticks: Y/X: {:6.2f}".format(logStr,yTickSpan/xTickSpan))
#logger.debug("{:s}disp_ratio: {:6.2f} data_ratio: {:6.2f}".format(logStr,disp_ratio,data_ratio))
#logger.debug("{:s}disp_ratioA: {:6.2f} disp_ratioB: {:6.2f}".format(logStr,disp_ratioA,disp_ratioB))
# PIPE-Color: Farbskalamapping:
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['CBBinDiscrete'] and hasattr(cMap,'from_list'): # diskrete Farbskala aus kontinuierlicher erzeugen
N=kwds['CBBinTicks']-1
color_list = cMap(np.linspace(0, 1, N))
cmap_name = cMap.name + str(N)
kwds['pAttributeColorMap']=cMap.from_list(cmap_name, color_list, N)
minAttr=pDf[kwds['pAttribute']].min()
maxAttr=pDf[kwds['pAttribute']].max()
if kwds['pAttributeColorMapMin'] != None:
minLine=kwds['pAttributeColorMapMin']
else:
minLine=minAttr
if kwds['pAttributeColorMapMax'] != None:
maxLine=kwds['pAttributeColorMapMax']
else:
maxLine=maxAttr
logger.debug("{:s}Attribute: minLine (used for CM-Scaling): {:8.2f} min (Data): {:8.2f}".format(logStr,minLine,minAttr))
logger.debug("{:s}Attribute: maxLine (used for CM-Scaling): {:8.2f} max (Data): {:8.2f}".format(logStr,maxLine,maxAttr))
# Norm
normLine=colors.Normalize(minLine,maxLine)
# kont. Farbskala truncated: Farbskala und Norm anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['pAttributeColorMapTrunc'] and hasattr(cMap,'from_list'):
#
usageStartLineValue=minLine+kwds['pAttributeColorMapUsageStart']*(maxLine-minLine)
usageStartLineColor=kwds['pAttributeColorMap'](normLine(usageStartLineValue))
logger.debug("{:s}pAttributeColorMapUsageStart: {:6.2f} ==> usageStartLineValue: {:8.2f} (minLine: {:8.2f}) color: {:s}".format(logStr
,kwds['pAttributeColorMapUsageStart']
,usageStartLineValue,minLine,str(usageStartLineColor)))
#
usageEndLineValue=maxLine-(1.-kwds['pAttributeColorMapUsageEnd'])*(maxLine-minLine)
usageEndLineColor=kwds['pAttributeColorMap'](normLine(usageEndLineValue))
logger.debug("{:s}pAttributeColorMapUsageEnd: {:6.2f} ==> usageEndLineValue: {:8.2f} (maxLine: {:8.2f}) color: {:s}".format(logStr
,kwds['pAttributeColorMapUsageEnd']
,usageEndLineValue,maxLine,str(usageEndLineColor)))
nColors=100
kwds['pAttributeColorMap'] = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cMap.name, a=kwds['pAttributeColorMapUsageStart'], b=kwds['pAttributeColorMapUsageEnd'])
,cMap(np.linspace(kwds['pAttributeColorMapUsageStart'],kwds['pAttributeColorMapUsageEnd'],nColors)))
normLine=colors.Normalize(max(minLine,usageStartLineValue),min(maxLine,usageEndLineValue))
# diskrete Farbskala mit individuellen Kategorien: Norm anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap'])
if kwds['CBBinBounds'] != None and not hasattr(cMap,'from_list'): # diskrete Farbskala liegt vor und Bounds sind vorgegeben
normLine = colors.BoundaryNorm(kwds['CBBinBounds'],cMap.N)
#CBPropExtend='both'
CBPropExtend='neither'
else:
CBPropExtend='neither'
# PIPE-Color 2nd: Farbskalamapping:
if kwds['pAttributeColorMap2ndMin'] != None:
minLine2nd=kwds['pAttributeColorMap2ndMin']
else:
minLine2nd=minAttr
if kwds['pAttributeColorMap2ndMax'] != None:
maxLine2nd=kwds['pAttributeColorMap2ndMax']
else:
maxLine2nd=maxAttr
logger.debug("{:s}Attribute: minLine2nd (used for CM-Scaling): {:8.2f} min (Data): {:8.2f}".format(logStr,minLine2nd,minAttr))
logger.debug("{:s}Attribute: maxLine2nd (used for CM-Scaling): {:8.2f} max (Data): {:8.2f}".format(logStr,maxLine2nd,maxAttr))
# Norm
normLine2nd=colors.Normalize(minLine2nd,maxLine2nd)
# kont. Farbskala truncated: Farbskala anpassen
cMap=plt.cm.get_cmap(kwds['pAttributeColorMap2nd'])
if kwds['pAttributeColorMap2ndTrunc'] and hasattr(cMap,'from_list'):
#
usageStartLineValue2nd=minLine2nd+kwds['pAttributeColorMap2ndUsageStart']*(maxLine2nd-minLine2nd)
logger.debug("{:s}pAttributeColorMap2ndUsageStart: {:8.2f} ==> usageStartLineValue2nd: {:8.2f} (minLine2nd: {:8.2f})".format(logStr,kwds['pAttributeColorMap2ndUsageStart'],usageStartLineValue2nd,minLine2nd))
usageStartLineColor2nd=kwds['pAttributeColorMap2nd'](normLine2nd(usageStartLineValue2nd))
#
usageEndLineValue2nd=maxLine2nd-(1.-kwds['pAttributeColorMap2ndUsageEnd'])*(maxLine2nd-minLine2nd)
logger.debug("{:s}pAttributeColorMap2ndUsageEnd: {:8.2f} ==> usageEndLineValue2nd: {:8.2f} (maxLine2nd: {:8.2f})".format(logStr,kwds['pAttributeColorMap2ndUsageEnd'],usageEndLineValue2nd,maxLine2nd))
usageEndLineColor2nd=kwds['pAttributeColorMap2nd'](normLine2nd(usageEndLineValue2nd))
nColors=100
kwds['pAttributeColorMap2nd'] = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cMap.name, a=kwds['pAttributeColorMap2ndUsageStart'], b=kwds['pAttributeColorMap2ndUsageEnd'])
,cMap(np.linspace(kwds['pAttributeColorMap2ndUsageStart'],kwds['pAttributeColorMap2ndUsageEnd'],nColors)))
# PIPE-Color 2nd: PLOT
pDfColorMap2nd=pDf[pDf.apply(kwds['pAttributeColorMap2ndFmask'],axis=1)]
(rows ,cols)=pDf.shape
(rows2nd,cols)=pDfColorMap2nd.shape
logger.debug("{:s}Color 2nd-PIPEs: {:d} von {:d}".format(logStr,rows2nd,rows))
for xs,ys,vLine,tLine in zip(pDfColorMap2nd[kwds['pWAYPXCors']],pDfColorMap2nd[kwds['pWAYPYCors']],pDfColorMap2nd[kwds['pAttribute']],pDfColorMap2nd[kwds['pAttrLineSize']]):
#if vLine >= usageStartLineValue2nd and vLine <= usageEndLineValue2nd:
# colorLine=kwds['pAttributeColorMap2nd'](normLine2nd(vLine))
#elif vLine > usageEndLineValue2nd:
# colorLine=usageEndLineColor2nd
#else:
# colorLine=usageStartLineColor2nd
colorLine=kwds['pAttributeColorMap2nd'](normLine2nd(vLine))
pcLines=kwds['pAx'].plot(xs,ys
,color=colorLine
,linewidth=kwds['pAttrLineSizeFactor']*math.fabs(tLine)#(vLine)
,ls=kwds['pAttributeLs']
,solid_capstyle='round'
,aa=True
,clip_on=kwds['pClip']
)
# PIPE-Color: PLOT
pDfColorMap=pDf[pDf.apply(kwds['pAttributeColorMapFmask'],axis=1)]
(rows ,cols)=pDf.shape
(rows1st,cols)=pDfColorMap.shape
colorsCBValues=[]
logger.debug("{:s}Color 1st-PIPEs: {:d} von {:d}".format(logStr,rows1st,rows))
for xs,ys,vLine,tLine in zip(pDfColorMap[kwds['pWAYPXCors']],pDfColorMap[kwds['pWAYPYCors']],pDfColorMap[kwds['pAttribute']],pDfColorMap[kwds['pAttrLineSize']]):
#if vLine >= usageStartLineValue and vLine <= usageEndLineValue:
# colorLine=kwds['pAttributeColorMap'](normLine(vLine))
# value=vLine
#elif vLine > usageEndLineValue:
# colorLine=usageEndLineColor
# value=usageEndLineValue
#else:
# colorLine=usageStartLineColor
# value=usageStartLineValue
colorLine=kwds['pAttributeColorMap'](normLine(vLine))
colorsCBValues.append(vLine)
pcLines=kwds['pAx'].plot(xs,ys
,color=colorLine
,linewidth=kwds['pAttrLineSizeFactor']*math.fabs(tLine)#(vLine)
,ls=kwds['pAttributeLs']
,solid_capstyle='round'
,aa=True
,clip_on=kwds['pClip']
)
# PIPE-Color: PLOT der PIPE-Anfänge um Farbskala konstruieren zu koennen
xScatter=[]
yScatter=[]
for xs,ys in zip(pDfColorMap[kwds['pWAYPXCors']],pDfColorMap[kwds['pWAYPYCors']]):
xScatter.append(xs[0])
yScatter.append(ys[0])
s=kwds['pAttrLineSizeFactor']*pDfColorMap[kwds['pAttrLineSize']].apply(lambda x: math.fabs(x))
s=s.apply(lambda x: math.pow(x,2)) # https://stackoverflow.com/questions/14827650/pyplot-scatter-plot-marker-size
#pcN=kwds['pAx'].scatter(pDfColorMap['pXCor_i'],pDfColorMap['pYCor_i']
pcN=kwds['pAx'].scatter(xScatter,yScatter
,s=s
,linewidth=0 # the linewidth of the marker edges
# Farbskala
,cmap=kwds['pAttributeColorMap']
# Normierung Farbe
,norm=normLine
# Werte
,c=colorsCBValues
,edgecolors='none'
,clip_on=kwds['pClip']
)
# CB: Axes
divider = make_axes_locatable(kwds['pAx'])
cax = divider.append_axes('right',size="{:f}%".format(kwds['CBFraction']),pad=kwds['CBHpad'])
x0, y0, w, h = kwds['pAx'].get_position().bounds
#logger.debug("{:s}ohne Änderung?!: x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
kwds['pAx'].set_aspect(1.) #!
x0, y0, w, h = kwds['pAx'].get_position().bounds
#logger.debug("{:s}ohne Änderung?!: x0: {:6.2f} y0: {:6.2f} w: {:6.2f} h: {:6.2f}".format(logStr,x0,y0,w,h))
# CB
cB=plt.gcf().colorbar(pcN, cax=cax, orientation='vertical',extend=CBPropExtend,spacing='proportional')
# Label
cB.set_label(kwds['CBLabel'])
# CB Ticks
if kwds['CBBinTicks'] != None:
cB.set_ticks(np.linspace(minLine,maxLine,kwds['CBBinTicks']))
ticks=cB.get_ticks()
try:
ticks=np.unique(np.append(ticks,[usageStartLineValue,usageEndLineValue]))
except:
pass
cB.set_ticks(ticks)
# CB Ticklabels
labels=cB.ax.get_yticklabels()
if kwds['pAttributeColorMapUsageStart'] > 0:
idx=np.where(ticks == usageStartLineValue)
labels[idx[0][0]].set_text(labels[idx[0][0]].get_text()+" v=")
if kwds['pAttributeColorMapUsageEnd'] < 1:
idx=np.where(ticks == usageEndLineValue)
labels[idx[0][0]].set_text(labels[idx[0][0]].get_text()+" ^=")
if kwds['pAttributeColorMapMax'] != None and maxLine<maxAttr:
labels[-1].set_text(labels[-1].get_text()+" >=")
if kwds['pAttributeColorMapMin'] != None and minLine>minAttr:
labels[0].set_text(labels[0].get_text()+" <=")
cB.ax.set_yticklabels(labels)
# customized yTicks --------------------
if kwds['CBTicks'] != None:
cB.set_ticks(kwds['CBTicks'])
if kwds['CBTickLabels'] != None:
labels=cB.ax.get_yticklabels()
if len(labels)==len(kwds['CBTickLabels']):
for label,labelNew in zip(labels,kwds['CBTickLabels']):
label.set_text(labelNew)
cB.ax.set_yticklabels(labels)
else:
logStrFinal="{:s}Error: Anz. CB Ticklabels Ist: {:d} != Anz. Ticklabeles Soll: {:d} ?!".format(logStr,len(labels),len(kwds['CBTickLabels']))
logger.error(logStrFinal)
raise RmError(logStrFinal)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
@classmethod
def pltHP(cls,pDf,**kwds):
"""
Plots a Hydraulic Profile.
Args:
DATA:
pDf: dataFrame
defining the HPLINES (xy-curves) Identification:
the different HPs in pDf are identified by the two cols
NAMECol: default: 'NAME'; set to None if NAMECol is not criteria for Identification ...
and
LayerCol: default: 'Layer'; set to None if LayerCol is not criteria for Identification ...
for each HP several lines (xy-curves) are plotted
... not criteria ...
if NAMECol is None only LayerCol is used
if LayerCol also is None, all rows are treated as "the" HPLINE
defining the HPLINES (xy-curves) Geometry:
* xCol: col in pDf for x; example: 'x'
the col is the same for all HPs and all y
* edgeNodesColSequence: cols to be used for start-node, end-node, next-node; default: ['NAME_i','NAME_k','nextNODE']
* 'NAME'_'Layer' (i.e. Nord-Süd_1) NAMECol_LayerCol is used as an Index in hpLineGeoms
* hpLineGeoms - Example - = {
'V-Abzweig_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-3107','matchType':'starts'}
}
- masterHP: Bezugs-Schnitt
- masterNode: muss es in masterHP geben
- masterNode: muss es auch im Schnitt geben bei matchType='matches'; bei 'starts' wird der Anfang gemapped; bei 'ends' das Ende
defining the HPLINES (xy-curves) y-Achsentypen (y-Axes):
* hpLines: list of cols in pDf for y; example: ['P']
each col in hpLines defines a hpLine (a xy-curve) to be plotted
for each identified HP all defined hpLines are plotted
defining the HPLINES (xy-curves) Layout:
# 'NAME'_'Layer'_'hpLineType' (i.e. Nord-Süd_1_P) is used as an Index in hpLineProps
* hpLineProps - Example - = {
'Nord-Süd_1_P':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
,'Nord-Süd_2_P':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
}
if 'NAME'_'Layer'_'hpLine' not in hpLineProps:
default props are used
if hpLineProps['NAME'_'Layer'_'hpLine'] == None:
HPLINE is not plotted
y-Achsentypen (y-Axes):
* werden ermittelt aus hpLines
* der Spaltenname - z.B. 'P' - wird dabei als Bezeichner für den Achsentyp benutzt
* die Achsen werden erstellt in der Reihenfolge in der sie in hpLines auftreten
* Bezeichner wie 'P','P_1',... werden dabei als vom selben Achsentyp 'P' (selbe y-Achse also) gewertet
* P_1, P_2, ... können z.B. P zu verschiedenen Zeiten sein oder Aggregate über die Zeit wie Min/Max
* yAxesDetectionPattern: regExp mit welcher die Achsentypen ermittelt werden; default: '([\w ]+)(_)(\d+)$'
* yTwinedAxesPosDeltaHPStart: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
* yTwinedAxesPosDeltaHP: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
Return:
yAxes: dct with AXES; key=y-Achsentypen
yLines: dct with Line2Ds; key=Index from hpLineProps
xNodeInfs: dct with NodeInformation; key=Index also used in i.e. hpLineGeoms
key: NAMECol_LayerCol
value: dct
key: node
value: dct
kwds['xCol']: x in HP
kwds['xCol']+'Plot': x in HP-Plot
pDfIdx: Index in pDf
>>> # -q -m 0 -s pltHP -y no -z no -w DHNetwork
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import math
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> xm=xms['DHNetwork']
>>> mx=mxs['DHNetwork']
>>> xm.MxAdd(mx=mx,aggReq=['TIME','TMIN','TMAX'],timeReq=3*[mx.df.index[0]],timeReq2nd=3*[mx.df.index[-1]],viewList=['vAGSN'],ForceNoH5Update=True)
>>> vAGSN=xm.dataFrames['vAGSN']
>>> for PH,P,RHO,Z in zip(['PH','PH_1','PH_2'],['P','P_1','P_2'],['RHO','RHO_1','RHO_2'],['Z','Z_1','Z_2']):
... vAGSN[PH]=vAGSN.apply(lambda row: row[P]*math.pow(10.,5.)/(row[RHO]*9.81),axis=1)
... vAGSN[PH]=vAGSN[PH]+vAGSN[Z].astype('float64')
>>> for bBzg,P,RHO,Z in zip(['bBzg','bBzg_1','bBzg_2'],['P','P_1','P_2'],['RHO','RHO_1','RHO_2'],['Z','Z_1','Z_2']):
... vAGSN[bBzg]=vAGSN.apply(lambda row: row[RHO]*9.81/math.pow(10.,5.),axis=1)
... vAGSN[bBzg]=vAGSN[P]+vAGSN[Z].astype('float64')*vAGSN[bBzg]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> yAxes,yLines,xNodeInfs=Rm.Rm.pltHP(vAGSN,pAx=axNfd
... ,hpLines=['bBzg','bBzg_1','bBzg_2','Q']
... ,hpLineProps={
... 'AGFW Symposium DH_1_bBzg':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg_1':{'label':'RL min','color':'blue','linestyle':'-.','linewidth':1}
... ,'AGFW Symposium DH_1_bBzg_2':{'label':'VL max','color':'red' ,'linestyle':'-.','linewidth':1}
... ,'AGFW Symposium DH_1_bBzg_1':None
... ,'AGFW Symposium DH_2_bBzg_2':None
... ,'AGFW Symposium DH_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'AGFW Symposium DH_2_Q':{'label':'RL Q','color':'lightblue','linestyle':'--','linewidth':2}
... }
... )
>>> yAxes.keys()
dict_keys(['bBzg', 'Q'])
>>> yLines.keys()
dict_keys(['AGFW Symposium DH_1_bBzg', 'AGFW Symposium DH_1_bBzg_2', 'AGFW Symposium DH_1_Q', 'AGFW Symposium DH_2_bBzg', 'AGFW Symposium DH_2_bBzg_1', 'AGFW Symposium DH_2_Q'])
>>> txt=axNfd.set_title('HP')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> ###
>>> Rcuts=[
... {'NAME':'R-Abzweig','nl':['R-3107','R-3427']}
... ,{'NAME':'R-EndsTest','nl':['R-HWSU','R-HKW3S']}
... ,{'NAME':'R-MatchesTest','nl':['R-HKW1','R-2104']}
... ]
>>> Vcuts=[
... {'NAME':'V-Abzweig','nl':['V-3107','V-3427']}
... ,{'NAME':'V-EndsTest','nl':['V-HWSU','V-HKW3S']}
... ,{'NAME':'V-MatchesTest','nl':['V-HKW1','V-2104']}
... ]
>>> fV=lambda row: True if row.KVR_i=='1' and row.KVR_k=='1' else False
>>> fR=lambda row: True if row.KVR_i=='2' and row.KVR_k=='2' else False
>>> for vcut,rcut in zip(Vcuts,Rcuts):
... ret=xm.vAGSN_Add(nl=vcut['nl'],weight='L',Layer=1,AKTIV=None,NAME=vcut['NAME'],fmask=fV)
... ret=xm.vAGSN_Add(nl=rcut['nl'],weight='L',Layer=2,AKTIV=None,NAME=rcut['NAME'],fmask=fR)
>>> # Schnitte erneut mit Ergebnissen versorgen, da Schnitte neu definiert wurden
>>> xm.MxAdd(mx=mx,ForceNoH5Update=True)
>>> vAGSN=xm.dataFrames['vAGSN']
>>> for PH,P,RHO,Z in zip(['PH'],['P'],['RHO'],['Z']):
... vAGSN[PH]=vAGSN.apply(lambda row: row[P]*math.pow(10.,5.)/(row[RHO]*9.81),axis=1)
... vAGSN[PH]=vAGSN[PH]+vAGSN[Z].astype('float64')
>>> for bBzg,P,RHO,Z in zip(['bBzg'],['P'],['RHO'],['Z']):
... vAGSN[bBzg]=vAGSN.apply(lambda row: row[RHO]*9.81/math.pow(10.,5.),axis=1)
... vAGSN[bBzg]=vAGSN[P]+vAGSN[Z].astype('float64')*vAGSN[bBzg]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axNfd = fig.add_subplot(gs[0])
>>> yAxes,yLines,xNodeInfs=Rm.Rm.pltHP(vAGSN[vAGSN['NAME'].isin(['R-Abzweig','V-Abzweig','AGFW Symposium DH','R-EndsTest','V-EndsTest','R-MatchesTest','V-MatchesTest'])],pAx=axNfd
... ,hpLines=['bBzg','Q']
... ,hpLineGeoms={
... 'V-Abzweig_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-3107','matchType':'starts'}
... ,'R-Abzweig_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-3107','matchType':'starts'}
... ,'V-EndsTest_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-HKW3S','matchType':'ends'}
... ,'R-EndsTest_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-HKW3S','matchType':'ends'}
... ,'V-MatchesTest_1':{'masterHP':'AGFW Symposium DH_1','masterNode':'V-1312','matchType':'matches','offset':-500}
... ,'R-MatchesTest_2':{'masterHP':'AGFW Symposium DH_2','masterNode':'R-1312','matchType':'matches'}
... }
... ,hpLineProps={
... 'AGFW Symposium DH_1_bBzg':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_2_bBzg':{'label':'RL','color':'blue','linestyle':'-','linewidth':3}
... ,'AGFW Symposium DH_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'AGFW Symposium DH_2_Q':{'label':'RL Q','color':'lightblue','linestyle':'--','linewidth':2}
... ,'V-Abzweig_1_bBzg':{'label':'VL','color':'tomato' ,'linestyle':'-','linewidth':3}
... ,'R-Abzweig_2_bBzg':{'label':'RL','color':'plum' ,'linestyle':'-','linewidth':3}
... ,'V-Abzweig_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-Abzweig_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... ,'V-EndsTest_1_bBzg':{'label':'VL','color':'lightcoral' ,'linestyle':'-','linewidth':3}
... ,'R-EndsTest_2_bBzg':{'label':'RL','color':'aquamarine' ,'linestyle':'-','linewidth':3}
... ,'V-EndsTest_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-EndsTest_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... #,'V-MatchesTest_1_bBzg':{'label':'VL','color':'orange' ,'linestyle':'-','linewidth':1}
... ,'R-MatchesTest_2_bBzg':{'label':'RL','color':'slateblue' ,'linestyle':'-','linewidth':1}
... ,'V-MatchesTest_1_Q':{'label':'VL Q','color':'magenta' ,'linestyle':'--','linewidth':2}
... ,'R-MatchesTest_2_Q':{'label':'VL Q','color':'lightblue' ,'linestyle':'--','linewidth':2}
... }
... )
>>> txt=axNfd.set_title('HP')
>>> gs.tight_layout(fig)
>>> plt.show()
>>> sorted(xNodeInfs.keys())
['AGFW Symposium DH_1', 'AGFW Symposium DH_2', 'R-Abzweig_2', 'R-EndsTest_2', 'R-MatchesTest_2', 'V-Abzweig_1', 'V-EndsTest_1', 'V-MatchesTest_1']
>>> xNodeInf=xNodeInfs['R-Abzweig_2']
>>> nl=Rcuts[0]['nl']
>>> nodeInfS=xNodeInf[nl[0]]
>>> nodeInfE=xNodeInf[nl[-1]]
>>> sorted(nodeInfS.keys())
['pDfIdx', 'x', 'xPlot']
>>> dxPlot=nodeInfE['xPlot']-nodeInfS['xPlot']
>>> dxHP=nodeInfE['x']-nodeInfS['x']
>>> dxPlot==dxHP
True
>>> nodeInfE['x']=round(nodeInfE['x'],3)
>>> nodeInfE['xPlot']=round(nodeInfE['xPlot'],3)
>>> {key:value for key,value in nodeInfE.items() if key not in ['pDfIdx']}
{'x': 3285.0, 'xPlot': 20312.428}
>>>
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
if 'NAMECol' not in keys:
kwds['NAMECol']='NAME'
if 'LayerCol' not in keys:
kwds['LayerCol']='Layer'
if 'xCol' not in keys:
kwds['xCol']='x'
if 'hpLines' not in keys:
kwds['hpLines']=['P']
if 'hpLineProps' not in keys:
kwds['hpLineProps']={'NAME_1_P':{'label':'HP NAME Layer 1 P','color':'red','linestyle':'-','linewidth':3}}
if 'hpLineGeoms' not in keys:
kwds['hpLineGeoms']=None
if 'edgeColSequence' not in keys:
kwds['edgeColSequence']=['NAME_i','NAME_k','nextNODE']
if 'yTwinedAxesPosDeltaHPStart' not in keys:
# (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
kwds['yTwinedAxesPosDeltaHPStart']=-0.0125
if 'yTwinedAxesPosDeltaHP' not in keys:
# (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
kwds['yTwinedAxesPosDeltaHP']=-0.05
if 'yAxesDetectionPattern' not in keys:
# regExp mit welcher die Achsentypen ermittelt werden
kwds['yAxesDetectionPattern']='([\w ]+)(_)(\d+)$'
logger.debug("{:s}xCol: {:s}.".format(logStr,kwds['xCol']))
logger.debug("{:s}hpLines: {:s}.".format(logStr,str(kwds['hpLines'])))
logger.debug("{:s}hpLineProps: {:s}.".format(logStr,str(kwds['hpLineProps'])))
logger.debug("{:s}hpLineGeoms: {:s}.".format(logStr,str(kwds['hpLineGeoms'])))
logger.debug("{:s}edgeColSequence: {:s}.".format(logStr,str(kwds['edgeColSequence'])))
logger.debug("{:s}yTwinedAxesPosDeltaHPStart: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHPStart'])))
logger.debug("{:s}yTwinedAxesPosDeltaHP: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHP'])))
logger.debug("{:s}yAxesDetectionPattern: {:s}.".format(logStr,str(kwds['yAxesDetectionPattern'])))
# Schnitte und Layer ermitteln
if kwds['NAMECol'] != None and kwds['LayerCol'] != None:
hPs=pDf[[kwds['NAMECol'],kwds['LayerCol']]].drop_duplicates()
elif kwds['NAMECol'] != None:
hPs=pDf[[kwds['NAMECol']]].drop_duplicates()
hPs['Layer']=None
elif kwds['LayerCol'] != None:
hPs=pDf[[kwds['LayerCol']]].drop_duplicates()
hPs['NAME']=None
hPs=hPs[['NAME','Layer']]
else:
hPs=pd.DataFrame(data={'NAME':[None],'Layer':[None]})
#logger.debug("{:s}hPs: {:s}.".format(logStr,hPs.to_string()))
# hPs hat 2 Spalten: NAME und Layer
# y-Achsen-Typen ermitteln
hpLineTypesSequence=[col if re.search(kwds['yAxesDetectionPattern'],col)==None else re.search(kwds['yAxesDetectionPattern'],col).group(1) for col in kwds['hpLines']]
# y-Achsen konstruieren
yAxes={}
colType1st=hpLineTypesSequence[0]
axHP=kwds['pAx']
axHP.spines["left"].set_position(("axes",kwds['yTwinedAxesPosDeltaHPStart'] ))
axHP.set_ylabel(colType1st)
yAxes[colType1st]=axHP
logger.debug("{:s}colType: {:s} is attached to Axes pcAx .".format(logStr,colType1st))
for idx,colType in enumerate(hpLineTypesSequence[1:]):
if colType not in yAxes:
yPos=kwds['yTwinedAxesPosDeltaHPStart']+kwds['yTwinedAxesPosDeltaHP']*len(yAxes)
logger.debug("{:s}colType: {:s}: new Axes_ yPos: {:1.4f} ...".format(logStr,colType,yPos))
# weitere y-Achse
axHP = axHP.twinx()
axHP.spines["left"].set_position(("axes", yPos))
pltMakePatchSpinesInvisible(axHP)
axHP.spines['left'].set_visible(True)
axHP.yaxis.set_label_position('left')
axHP.yaxis.set_ticks_position('left')
axHP.set_ylabel(colType)
yAxes[colType]=axHP
yLines={}
xNodeInfs={}
for index,row in hPs.iterrows():
# über alle Schnitte (NAME) und Layer (Layer)
def getKeyBaseAndDf(dfSource,col1Name,col2Name,col1Value,col2Value):
#logger.debug("{:s}getKeyBaseAndDf: dfSource: {:s} ...".format(logStr,dfSource[[col1Name,col2Name,'nextNODE']].to_string()))
# dfSource bzgl. cols filtern
if col1Name != None and col2Name != None:
dfFiltered=dfSource[
(dfSource[col1Name].astype(str)==str(col1Value))
&
(dfSource[col2Name].astype(str)==str(col2Value))
]
keyBase=str(row[col1Name])+'_'+str(col2Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Schnitt: {!s:s} Layer: {!s:s} ...".format(logStr,col1Value,col2Value))
elif col1Name != None:
dfFiltered=dfSource[
(dfSource[col1Name].astype(str)==str(col1Value))
]
keyBase=str(col1Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Schnitt: {!s:s} ...".format(logStr,col1Value))
elif col2Name != None:
dfFiltered=dfSource[
(dfSource[col2Name].astype(str)==str(col2Value))
]
keyBase=str(col2Value)+'_'#+hpLine
logger.debug("{:s}getKeyBaseAndDf: Layer: {!s:s} ...".format(logStr,col2Value))
else:
dfFiltered=dfSource
keyBase=''
#logger.debug("{:s}getKeyBaseAndDf: dfFiltered: {:s} ...".format(logStr,dfFiltered[[col1Name,col2Name,'nextNODE']].to_string()))
return keyBase, dfFiltered
# Schnitt+Layer nach hPpDf filtern
keyBase,hPpDf=getKeyBaseAndDf(pDf
,kwds['NAMECol'] # Spaltenname 1
,kwds['LayerCol'] # Spaltenname 2
,row[kwds['NAMECol']] # Spaltenwert 1
,row[kwds['LayerCol']] # Spaltenwert 2
)
if hPpDf.empty:
logger.info("{:s}Schnitt: {!s:s} Layer: {!s:s}: NICHT in pDf ?! ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']]))
continue
xOffset=0
xOffsetStatic=0
xFactorStatic=1
if kwds['hpLineGeoms'] != None:
if keyBase.rstrip('_') in kwds['hpLineGeoms'].keys():
hpLineGeom=kwds['hpLineGeoms'][keyBase.rstrip('_')]
logger.debug("{:s}Line: {:s}: hpLineGeom: {:s} ...".format(logStr,keyBase.rstrip('_'),str(hpLineGeom)))
if 'offset' in hpLineGeom.keys():
xOffsetStatic=hpLineGeom['offset']
if 'factor' in hpLineGeom.keys():
xFactorStatic=hpLineGeom['factor']
if 'masterHP' in hpLineGeom.keys():
masterHP=hpLineGeom['masterHP']
name=masterHP.split('_')[0]
layer=masterHP.replace(name,'')
layer=layer.replace('_','')
keyBaseMaster,hPpDfMaster=getKeyBaseAndDf(pDf
,kwds['NAMECol'] # Spaltenname 1
,kwds['LayerCol'] # Spaltenname 2
,name # Spaltenwert 1
,layer # Spaltenwert 2
)
if 'masterNode' in hpLineGeom.keys():
masterNode=hpLineGeom['masterNode']
def fGetMatchingRows(row,cols,matchNode):
for col in cols:
if row[col]==matchNode:
return True
return False
# Anker x suchen anhand der Spalten ...
if 'matchAnchorCols' in hpLineGeom.keys():
matchAnchorCols=hpLineGeom['matchAnchorCols']
else:
matchAnchorCols=[kwds['edgeColSequence'][2]]
# AnkerKnoten: Zeilen die in Frage kommen ....
hPpDfMatched=hPpDf[hPpDf.apply(fGetMatchingRows,axis=1,cols=matchAnchorCols,matchNode=masterNode)]
hPpDfMasterMatched=hPpDfMaster[hPpDfMaster.apply(fGetMatchingRows,axis=1,cols=matchAnchorCols,matchNode=masterNode)]
if 'matchType' in hpLineGeom.keys():
matchType=hpLineGeom['matchType']
else:
matchType='starts'
# Anker x suchen in Master -------------------------
if 'matchAnchor' in hpLineGeom.keys():
matchAnchor=hpLineGeom['matchAnchor']
else:
matchAnchor='max'
if hPpDfMasterMatched.empty:
logger.info("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s}: in Master in den cols {!s:s} NICHT gefunden. Loesung: xMasterOffset=0.".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,matchAnchorCols))
xMasterOffset=0
else:
if matchAnchor=='min':
hPpDfMasterMatched=hPpDfMaster.loc[hPpDfMasterMatched[kwds['xCol']].idxmin(),:]
else: # matchAnchor=='max'
hPpDfMasterMatched=hPpDfMaster.loc[hPpDfMasterMatched.iloc[::-1][kwds['xCol']].idxmax(),:]
xMasterOffset=hPpDfMasterMatched[kwds['xCol']]
logger.debug("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s} xMasterOffset={:9.3f} ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,xMasterOffset))
# Anker x suchen in HP selbst --------------------------
if 'matchAnchorChild' in hpLineGeom.keys():
matchAnchorChild=hpLineGeom['matchAnchorChild']
else:
matchAnchorChild='max'
if hPpDfMatched.empty:
logStrTmp="{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s}: in Child in den cols {!s:s} NICHT gefunden.".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,matchAnchorCols)
if matchType=='matches':
logger.info(logStrTmp+' Loesung: xChildOffset=0.')
else:
if matchType=='ends':
logger.debug(logStrTmp+' Child endet nicht mit masterNode. xChildOffset=0')
else:
logger.debug(logStrTmp+' Child startet evtl. mit masterNode. xChildOffset=0')
xChildOffset=0
else:
if matchAnchorChild=='min':
hPpDfMatched=hPpDf.loc[hPpDfMatched[kwds['xCol']].idxmin(),:]
else: # matchAnchorChild=='max'
hPpDfMatched=hPpDf.loc[hPpDfMatched.iloc[::-1][kwds['xCol']].idxmax(),:]
xChildOffset=hPpDfMatched[kwds['xCol']]
logger.debug("{:s}Schnitt: {!s:s}_{!s:s} Master: {!s:s}_{!s:s} masterNode: {!s:s} xChildOffset={:9.3f} ...".format(logStr,row[kwds['NAMECol']],row[kwds['LayerCol']],name,layer,masterNode,xChildOffset))
# xOffset errechnen
if matchType=='starts':
xOffset=xMasterOffset-hPpDf[kwds['xCol']].min() # der Beginn
# matchNode ist Anfang
if hPpDf[kwds['edgeColSequence'][2]].iloc[0] == hPpDf[kwds['edgeColSequence'][1]].iloc[0]:
# nextNode = k
matchNode=hPpDf[kwds['edgeColSequence'][0]].iloc[0]
else:
# nextNode = i
matchNode=hPpDf[kwds['edgeColSequence'][1]].iloc[0]
elif matchType=='ends':
xOffset=xMasterOffset-hPpDf[kwds['xCol']].max() # das Ende
# matchNode ist Ende
if hPpDf[kwds['edgeColSequence'][2]].iloc[-1] == hPpDf[kwds['edgeColSequence'][1]].iloc[-1]:
# nextNode = k
matchNode=hPpDf[kwds['edgeColSequence'][1]].iloc[-1]
else:
# nextNode = i
matchNode=hPpDf[kwds['edgeColSequence'][0]].iloc[-1]
else: # 'matches'
# per Knoten
matchNode=masterNode
xOffset=xMasterOffset-xChildOffset
# xOffset wurde berechnet
# masterNode und matchNode sind bekannt
logger.debug("{:s}hPpDfMatched: {:s} ...".format(logStr,hPpDfMatched[[kwds['NAMECol'],kwds['LayerCol'],'nextNODE',kwds['xCol'],'NAME_i','NAME_k','OBJTYPE','IptIdx']].to_string()))
logger.debug("{:s}hPpDfMasterMatched: {:s} ...".format(logStr,hPpDfMasterMatched[[kwds['NAMECol'],kwds['LayerCol'],'nextNODE',kwds['xCol'],'NAME_i','NAME_k','OBJTYPE','IptIdx']].to_string()))
else:
logger.debug("{:s}Line: {:s}: keine Geometrieeigenschaften definiert.".format(logStr,keyBase.rstrip('_')))
# xNodeInfs ermitteln
nodeList=hPpDf[kwds['edgeColSequence'][2]].copy()
if hPpDf[kwds['edgeColSequence'][2]].iloc[0] == hPpDf[kwds['edgeColSequence'][1]].iloc[0]:
# nextNode = k
# 1. Knoten i
nodeList.iloc[0]=hPpDf[kwds['edgeColSequence'][0]].iloc[0]
else:
# nextNode = i
# 1. Knoten k
nodeList.iloc[0]=hPpDf[kwds['edgeColSequence'][1]].iloc[0]
nodeList=nodeList.unique()
xNodeInf={}
for idx,node in enumerate(nodeList):
nodeInf={}
if idx==0:
nodeInf[kwds['xCol']]=0
nodeInf['pDfIdx']=hPpDf.index.values[0]
else:
nodeInf[kwds['xCol']]=hPpDf[hPpDf[kwds['edgeColSequence'][2]]==node][kwds['xCol']].max()
nodeInf['pDfIdx']=hPpDf[hPpDf[kwds['edgeColSequence'][2]]==node][kwds['xCol']].idxmax()
nodeInf[kwds['xCol']+'Plot']=nodeInf[kwds['xCol']]*xFactorStatic+xOffset+xOffsetStatic
xNodeInf[node]=nodeInf
xNodeInfs[keyBase.rstrip('_')]=xNodeInf
# über alle Spalten (d.h. darzustellenden y-Werten)
for idx,hpLine in enumerate(kwds['hpLines']):
key=keyBase+hpLine
logger.debug("{:s}Line: {:s} ...".format(logStr,key))
if key in kwds['hpLineProps']:
hpLineProp=kwds['hpLineProps'][key]
if hpLineProp == None:
logger.debug("{:s}Line: {:s} ...: kein Plot.".format(logStr,key))
continue # kein Plot
label=key
color='black'
linestyle='-'
linewidth=3
hpLineType=hpLineTypesSequence[idx]
axHP=yAxes[hpLineType]
lines=axHP.plot(hPpDf[kwds['xCol']]*xFactorStatic+xOffset+xOffsetStatic,hPpDf[hpLine],label=label,color=color,linestyle=linestyle,linewidth=linewidth)
yLines[label]=lines[0]
if key in kwds['hpLineProps']:
hpLineProp=kwds['hpLineProps'][key]
logger.debug("{:s}Line: {:s}: hpLineProp: {:s}.".format(logStr,key,str(hpLineProp)))
for prop,value in hpLineProp.items():
plt.setp(yLines[label],"{:s}".format(prop),value)
else:
logger.debug("{:s}Line: {:s}: keine Eigenschaften definiert.".format(logStr,key))
continue
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return yAxes,yLines,xNodeInfs
@classmethod
def pltTC(cls,pDf,tcLines,**kwds):
"""
Plots a Time Curve Diagram.
Args:
DATA:
pDf: dataFrame
index: times
cols: values (with mx.df colnames)
tcLines: dct
defining the Curves and their Layout:
Key:
OBJTYPE~NAME1~NAME2~ATTRTYPE is used as a key, d.h. OBJTYPE_PK ist nicht im Schluessel enthalten
* tcLines - Example - = {
'KNOT~NAME1~~PH':{'label':'VL','color':'red' ,'linestyle':'-','linewidth':3}
}
Definition der y-Achsentypen (y-Axes):
* werden ermittelt aus den verschiedenen ATTRTYPEs in tcLines
* ATTRTYPE - z.B. 'PH' - wird dabei als Bezeichner für den Achsentyp benutzt
* die Achsen werden erstellt in der Reihenfolge in der sie in tcLines auftreten
* yTwinedAxesPosDeltaHPStart: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
* yTwinedAxesPosDeltaHP: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
Attribute:
* alle gültigen
* +
* forceYType
* offset
* factor
* timeStart
* timeEnd
* legendInfosFmt
* label
AXES:
pAx: Axes to be plotted on; if not specified: gca() is used
x-Achsen-Formatierung:
majLocator - Beispiele:
mdates.MinuteLocator(interval=5)
mdates.MinuteLocator(byminute=[0,5,10,15,20,25,30,35,40,45,50,55])
majFormatter - Beispiele:
mdates.DateFormatter('%d.%m.%y: %H:%M')
xTicksLabelsOff: wenn True, dann keine x-Achsen TickLabels
Return:
yAxes: dct with AXES; key=y-Achsentypen
yLines: dct with Line2Ds; key=Index from tcLines
vLines: dct with Line2Ds; key=Index from vLines
yLinesLegendLabels: dct with Legendlabels; key=Index from tcLines
>>> # -q -m 0 -s pltTC -y no -z no -w DHNetwork
>>> import pandas as pd
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.gridspec as gridspec
>>> import matplotlib.dates as mdates
>>> import math
>>> try:
... import Rm
... except ImportError:
... from PT3S import Rm
>>> # ---
>>> # xm=xms['DHNetwork']
>>> mx=mxs['DHNetwork']
>>> sir3sID=mx.getSir3sIDFromSir3sIDoPK('ALLG~~~LINEPACKGEOM') # 'ALLG~~~5151766074450398225~LINEPACKGEOM'
>>> # mx.df[sir3sID].describe()
>>> # mx.df[sir3sID].iloc[0]
>>> plt.close()
>>> fig=plt.figure(figsize=Rm.DINA3q,dpi=Rm.dpiSize)
>>> gs = gridspec.GridSpec(3, 1)
>>> # --------------------------
>>> axTC = fig.add_subplot(gs[0])
>>> yAxes,yLines,vLines,yLinesLegendLabels=Rm.Rm.pltTC(mx.df
... ,tcLines={
... 'ALLG~~~LINEPACKRATE':{'label':'Linepackrate','color':'red' ,'linestyle':'-','linewidth':3,'drawstyle':'steps','factor':10}
... ,'ALLG~~~LINEPACKGEOM':{'label':'Linepackgeometrie','color':'b' ,'linestyle':'-','linewidth':3,'offset':-mx.df[sir3sID].iloc[0]
... ,'timeStart':mx.df.index[0]+pd.Timedelta('10 Minutes')
... ,'timeEnd':mx.df.index[-1]-pd.Timedelta('10 Minutes')}
... ,'RSLW~wNA~~XA':{'label':'RSLW~wNA~~XA','color':'lime','forceYType':'N'}
... ,'PUMP~R-A-SS~R-A-DS~N':{'label':'PUMP~R-A-SS~R-A-DS~N','color':'aquamarine','linestyle':'--','legendInfosFmt':'{:4.0f}'}
... }
... ,pAx=axTC
... ,vLines={
... 'a vLine Label':{'time': mx.df.index[0] + pd.Timedelta('10 Minutes')
... ,'color':'dimgrey'
... ,'linestyle':'--'
... ,'linewidth':5.}
... }
... ,majLocator=mdates.MinuteLocator(byminute=[0,5,10,15,20,25,30,35,40,45,50,55])
... ,majFormatter=mdates.DateFormatter('%d.%m.%y: %H:%M')
... #,xTicksLabelsOff=True
... )
>>> sorted(yAxes.keys())
['LINEPACKGEOM', 'LINEPACKRATE', 'N']
>>> sorted(yLines.keys())
['ALLG~~~LINEPACKGEOM', 'ALLG~~~LINEPACKRATE', 'PUMP~R-A-SS~R-A-DS~N', 'RSLW~wNA~~XA']
>>> sorted(vLines.keys())
['a vLine Label']
>>> gs.tight_layout(fig)
>>> plt.show()
>>>
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keys = sorted(kwds.keys())
# AXES
if 'pAx' not in keys:
kwds['pAx']=plt.gca()
if 'yTwinedAxesPosDeltaHPStart' not in keys:
# (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche; default: -0.0125
kwds['yTwinedAxesPosDeltaHPStart']=-0.0125
if 'yTwinedAxesPosDeltaHP' not in keys:
# (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche; default: -0.05
kwds['yTwinedAxesPosDeltaHP']=-0.05
logger.debug("{:s}tcLines: {:s}.".format(logStr,str(tcLines)))
logger.debug("{:s}yTwinedAxesPosDeltaHPStart: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHPStart'])))
logger.debug("{:s}yTwinedAxesPosDeltaHP: {:s}.".format(logStr,str(kwds['yTwinedAxesPosDeltaHP'])))
if 'lLoc' not in keys:
kwds['lLoc']='best'
if 'lFramealpha' not in keys:
kwds['lFramealpha']=matplotlib.rcParams["legend.framealpha"]
if 'lFacecolor' not in keys:
kwds['lFacecolor']='white'
if 'lOff' not in keys:
kwds['lOff']=False
yAxes=yLines=vLines=None
# fuer jede Spalte Schluessel ohne OBJTYPE_PK ermitteln == Schluessel in tcLines
colFromTcKey={}
for col in pDf.columns.tolist():
if pd.isna(col):
continue
try:
colNew=Mx.getSir3sIDoPKFromSir3sID(col)
colFromTcKey[colNew]=col # merken welche Originalspalte zu dem tcLines Schluessel gehoert
logger.debug("{:s}Zu Spalte ohne Schlüssel: {:s} gehört Spalte: {:s} in pDf.".format(logStr,colNew,col))
except:
logger.debug("{:s}keine Zuordnung gefunden (z.B. kein Mx.getSir3sIDoPKFromSir3sID-match) fuer pDf-Spalte: {:s}. Spaltenname(n) keine vollständigen SIR 3S Schluessel (mehr)?!".format(logStr,col))
# y-Achsen-Typen ermitteln
yTypesSequence=[]
for key,props in tcLines.items():
try:
mo=re.match(Mx.reSir3sIDoPKcompiled,key)
yType=mo.group('ATTRTYPE')
if 'forceYType' in props.keys():
yType=props['forceYType']
if yType not in yTypesSequence:
yTypesSequence.append(yType)
logger.debug("{:s}neuer y-Achsentyp: {:s}.".format(logStr,yType))
except:
logger.debug("{:s}kein Achsentyp ermittelt (z.B. kein Mx.reSir3sIDoPKcompiled-match) fuer: {:s}. tcLine(s) Schluessel kein SIR 3S Schluessel oPK?!".format(logStr,key))
# y-Achsen konstruieren
yAxes={}
colType1st=yTypesSequence[0]
axTC=kwds['pAx']
axTC.spines["left"].set_position(("axes",kwds['yTwinedAxesPosDeltaHPStart'] ))
axTC.set_ylabel(colType1st)
yAxes[colType1st]=axTC
logger.debug("{:s}colType: {:s}: is attached to 1st Axes.".format(logStr,colType1st))
for idx,colType in enumerate(yTypesSequence[1:]):
# weitere y-Achse
yPos=kwds['yTwinedAxesPosDeltaHPStart']+kwds['yTwinedAxesPosDeltaHP']*len(yAxes)
logger.debug("{:s}colType: {:s}: is attached to a new Axes: yPos: {:1.4f} ...".format(logStr,colType,yPos))
axTC = axTC.twinx()
axTC.spines["left"].set_position(("axes", yPos))
pltMakePatchSpinesInvisible(axTC)
axTC.spines['left'].set_visible(True)
axTC.yaxis.set_label_position('left')
axTC.yaxis.set_ticks_position('left')
axTC.set_ylabel(colType)
yAxes[colType]=axTC
# ueber alle definierten Kurven
# max. Länge label vor Infos ermitteln
labels=[]
infos=[]
for key,props in tcLines.items():
label=key
if 'label' in props:
label=props['label']
labels.append(label)
if 'legendInfosFmt' in props:
legendInfosFmt=props['legendInfosFmt']
else:
legendInfosFmt='{:6.2f}'
if key not in colFromTcKey.keys():
logger.debug("{:s}Line: {:s}: es konnte keine Spalte in pDf ermittelt werden. Spaltenname(n) kein SIR 3S Schluessel?! Kein Plot.".format(logStr,key))
continue
else:
col=colFromTcKey[key]
logger.debug("{:s}Line: {:s}: Spalte in pDf: {:s}.".format(logStr,key,col))
if 'timeStart' in props:
timeStart=props['timeStart']
else:
timeStart=pDf.index[0]
if 'timeEnd' in props:
timeEnd=props['timeEnd']
else:
timeEnd=pDf.index[-1]
plotDf=pDf.loc[timeStart:timeEnd,:]
infos.append(legendInfosFmt.format(plotDf[col].min()))
infos.append(legendInfosFmt.format(plotDf[col].max()))
labelsLength=[len(label) for label in labels]
labelsLengthMax=max(labelsLength)
infosLength=[len(info) for info in infos]
infosLengthMax=max(infosLength)
# zeichnen
yLines={}
yLinesLegendLabels={}
# ueber alle definierten Kurven
for key,props in tcLines.items():
if key not in colFromTcKey.keys():
logger.debug("{:s}Line: {:s}: es konnte keine Spalte in pDf ermittelt werden. Spaltenname(n) kein SIR 3S Schluessel?! Kein Plot.".format(logStr,key))
continue
else:
col=colFromTcKey[key]
mo=re.match(Mx.reSir3sIDoPKcompiled,key)
yType=mo.group('ATTRTYPE')
if 'forceYType' in props.keys():
yType=props['forceYType']
axTC=yAxes[yType]
logger.debug("{:s}Line: {:s} on Axes {:s} ...".format(logStr,key,yType))
label=key
color='black'
linestyle='-'
linewidth=3
if 'offset' in props:
offset=props['offset']
else:
offset=0.
if 'factor' in props:
factor=props['factor']
else:
factor=1.
if 'timeStart' in props:
timeStart=props['timeStart']
else:
timeStart=pDf.index[0]
if 'timeEnd' in props:
timeEnd=props['timeEnd']
else:
timeEnd=pDf.index[-1]
if 'legendInfosFmt' in props:
legendInfosFmt=props['legendInfosFmt']
else:
legendInfosFmt='{:6.2f}'
plotDf=pDf.loc[timeStart:timeEnd,:]
lines=axTC.plot(plotDf.index.values,plotDf[col]*factor+offset,label=label,color=color,linestyle=linestyle,linewidth=linewidth)
yLines[key]=lines[0]
if 'label' in props:
label=props['label']
else:
label=label
legendLabelFormat="Anf.: {:s} Ende: {:s} Min: {:s} Max: {:s}"#.format(*4*[legendInfosFmt])
legendLabelFormat="{:s} "+legendLabelFormat
legendInfos=[plotDf[col].iloc[0],plotDf[col].iloc[-1],plotDf[col].min(),plotDf[col].max()]
legendInfos=[factor*legendInfo+offset for legendInfo in legendInfos]
legendLabel=legendLabelFormat.format(label.ljust(labelsLengthMax,' '),
*["{:s}".format(legendInfosFmt).format(legendInfo).rjust(infosLengthMax,' ') for legendInfo in legendInfos]
)
yLinesLegendLabels[key]=legendLabel
logger.debug("{:s}legendLabel: {:s}.".format(logStr,legendLabel))
for prop,value in props.items():
if prop not in ['forceYType','offset','factor','timeStart','timeEnd','legendInfosFmt']:
plt.setp(yLines[key],"{:s}".format(prop),value)
# x-Achse
# ueber alle Axes
for key,ax in yAxes.items():
ax.set_xlim(pDf.index[0],pDf.index[-1])
if 'majLocator' in kwds.keys():
ax.xaxis.set_major_locator(kwds['majLocator'])
if 'majFormatter' in kwds.keys():
ax.xaxis.set_major_formatter(kwds['majFormatter'])
plt.setp(ax.xaxis.get_majorticklabels(),rotation='vertical',ha='center')
ax.xaxis.grid()
# Beschriftung ausschalten
if 'xTicksLabelsOff' in kwds.keys(): # xTicksOff
if kwds['xTicksLabelsOff']:
logger.debug("{:s}Achse: {:s}: x-Achse Labels aus.".format(logStr,key))
#for tic in ax.xaxis.get_major_ticks():
# tic.tick1On = tic.tick2On = False
ax.set_xticklabels([])
# vLines
# ueber alle definierten vLines
vLines={}
if 'vLines' in kwds.keys():
for key,props in kwds['vLines'].items():
if 'time' in props.keys():
logger.debug("{:s}vLine: {:s} ....".format(logStr,key))
vLine=ax.axvline(x=props['time'], ymin=0, ymax=1, label=key)
vLines[key]=vLine
for prop,value in props.items():
if prop not in ['time']:
plt.setp(vLine,"{:s}".format(prop),value)
else:
logger.debug("{:s}vLine: {:s}: time nicht definiert.".format(logStr,key))
# Legend
import matplotlib.font_manager as font_manager
font = font_manager.FontProperties(family='monospace'
#weight='bold',
#style='normal',
#size=16
)
if not kwds['lOff']:
l=kwds['pAx'].legend(
tuple([yLines[yline] for yline in yLines])
,
tuple([yLinesLegendLabels[yLine] for yLine in yLinesLegendLabels])
,loc=kwds['lLoc']
,framealpha=kwds['lFramealpha']
,facecolor=kwds['lFacecolor']
,prop=font
)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return yAxes,yLines,vLines,yLinesLegendLabels
def __init__(self,xm=None,mx=None):
"""
Args:
xm: Xm.Xm Object
mx: Mx.Mx Object
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
self.xm=xm
self.mx=mx
try:
vNRCV_Mx1=self.xm.dataFrames['vNRCV_Mx1'] # d.h. Sachdaten bereits annotiert mit MX1-Wissen
except:
logger.debug("{:s}{:s} not in {:s}. Sachdaten mit MX1-Wissen zu annotieren wird nachgeholt ...".format(logStr,'vNRCV_Mx1','dataFrames'))
self.xm.MxSync(mx=self.mx)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def pltNetDHUS(self,**kwds):
"""Plot: Net: DistrictHeatingUnderSupply.
Args (optional):
TIMEs (als TIMEDELTA zu Szenariumbeginn):
* timeDeltaToRef: Reference Scenariotime (for MeasureInRefPerc-Calculations) (default: pd.to_timedelta('0 seconds'))
* timeDeltaToT: Scenariotime (default: pd.to_timedelta('0 seconds'))
FWVB
* pFWVBFilterFunction: Filterfunction to be applied to FWVB to determine the FWVB to be plotted
* default: lambda df: (df.CONT_ID.astype(int).isin([1001])) & (df.W0LFK>0)
* CONT_IDisIn: [1001]
* um zu vermeiden, dass FWVB aus Bloecken gezeichnet werden (unwahrscheinlich, dass es solche gibt)
* W0LFK>0:
* um zu vermeiden, dass versucht wird, FWVB mit der Soll-Leistung 0 zu zeichnen (pFWVBAttribute default is 'W0LFK')
FWVB Attribute (Size, z-Order) - from vFWVB
* pFWVBAttribute: columnName (default: 'W0LFK')
* the column must be able to be converted to a float
* the conversion is done before FilterFunction
* see ApplyFunction and NaNValue for conversion details:
* pFWVBAttributeApplyFunction: Function to be applied to column pFWVBAttribute
* default: lambda x: pd.to_numeric(x,errors='coerce')
* pFWVBAttributeApplyFunctionNaNValue: Value for NaN-Values produced by pFWVBAttributeApplyFunction if any
* default: 0
* .fillna(pFWVBAttributeApplyFunktionNaNValue).astype(float) is called after ApplyFunction
* pFWVBAttributeAsc: z-Order (default: False d.h. "kleine auf große")
* pFWVBAttributeRefSize: scatter Sy-Area in pts^2 of for RefSizeValue (default: 10**2)
* corresponding RefSizeValue is Attribute.std() or Attribute.mean() if Attribute.std() is < 1
FWVB (plot only large (small, medium) FWVB ...)
* quantil_pFWVBAttributeHigh <= (default: 1.)
* quantil_pFWVBAttributeLow >= (default: .0)
* default: all FWVB are plotted
* note that Attribute >0 is a precondition
FWVB Measure (Color) - from mx
* pFWVBMeasure (default: 'FWVB~*~*~*~W')
* float() must be possible
* pFWVBMeasureInRefPerc (default: True d.h. Measure wird verarbeitet in Prozent T zu Ref)
* 0-1
* if refValue is 0 than refPerc-Result is set to 1
* pFWVBMeasureAlpha/Colormap/Clip
* 3Classes
* pFWVBMeasure3Classes (default: False)
* False:
* Measure wird nicht in 3 Klassen dargestellt
* die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt dennoch
* CatTexts (werden verwendet wenn 3Classes Wahr gesetzt ist)
* für CBLegend (3Classes) als _zusätzliche Beschriftung rechts
* als Texte für die Spalte MCategory in return pFWVB
* pMCatTopText
* pMCatMidText
* pMCatBotText
* CatAttribs (werden verwendet wenn 3Classes Wahr gesetzt ist)
* für die Knotendarstellung
* pMCatTopAlpha/Color/Clip
* pMCatMidAlpha/Colormap/Clip
* pMCatBotAlpha/Color/Clip
* CBFixedLimits
* pFWVBMeasureCBFixedLimits (default: False d.h. Farbskala nach vorh. min./max. Wert)
* wird Wahr gesetzt sein, wenn 3Classes Wahr gesetzt ist
* damit die mittlere Farbskala den Klassengrenzen "gehorcht"
* pFWVBMeasureCBFixedLimitLow (default: .10)
* pFWVBMeasureCBFixedLimitHigh (default: .95)
CB
* CBFraction: fraction of original axes to use for colorbar (default: 0.05)
* CBHpad: fraction of original axes between colorbar and new image axes (default: 0.0275)
* CBLabelPad (default: -50)
* CBTicklabelsHPad (default: 0.)
* CBAspect: ratio of long to short dimension (default: 10.)
* CBShrink: fraction by which to shrink the colorbar (default: .3)
* CBAnchorHorizontal: horizontaler Fußpunkt der colorbar in Plot-% (default: 0.)
* CBAnchorVertical: vertikaler Fußpunkt der colorbar in Plot-% (default: 0.2)
CBLegend (3Classes) - Parameterization of the representative Symbols
* CBLe3cTopVPad (default: 1+1*1/4)
* CBLe3cMidVPad (default: .5)
* CBLe3cBotVPad (default: 0-1*1/4)
* "1" is the height of the Colorbar
* the VPads (the vertical Sy-Positions) are defined in cax.transAxes Coordinates
* cax is the Colorbar Axes
* CBLe3cSySize=10**2 (Sy-Area in pts^2)
* CBLe3cSyType='o'
ROHR
* pROHRFilterFunction: Filterfunction to be applied to PIPEs to determine the PIPEs to be plotted
* default: lambda df: (df.KVR.astype(int).isin([2])) & (df.CONT_ID.astype(int).isin([1001])) & (df.DI.astype(float)>0)
* KVRisIn: [2]
* 1: supply-line
* 2: return-line
* CONT_IDisIn: [1001]
* um zu vermeiden, dass Rohre aus Bloecken gezeichnet werden (deren Koordinaten nicht zu den Koordinaten von Rohren aus dem Ansichtsblock passen)
* DI>0:
* um zu vermeiden, dass versucht wird, Rohre mit dem Innendurchmesser 0 zu zeichnen (pROHRAttribute default is 'DI')
ROHR (PIPE-Line: Size and Color, z-Order) - from vROHR
* pROHRAttribute: columnName (default: 'DI')
* the column must be able to be converted to a float
* the conversion is done before FilterFunction
* see ApplyFunction and NaNValue for conversion details:
* pROHRAttributeApplyFunction: Function to be applied to column pROHRAttribute
* default: lambda x: pd.to_numeric(x,errors='coerce')
* pROHRAttributeApplyFunctionNaNValue: Value for NaN-Values produced by pROHRAttributeApplyFunction if any
* default: 0
* .fillna(pROHRAttributeApplyFunktionNaNValue).astype(float) is called after ApplyFunction
* pROHRAttributeAsc: z-Order (default: False d.h. "kleine auf grosse")
* pROHRAttributeLs (default: '-')
* pROHRAttributeRefSize: plot linewidth in pts for RefSizeValue (default: 1.0)
* pROHRAttributeSizeMin (default: None): if set: use pROHRAttributeSizeMin-Value as Attribute for LineSize if Attribute < pROHRAttributeSizeMin
* corresponding RefSizeValue is Attribute.std() or Attribute.mean() if Attribute.std() is < 1
* pROHRAttributeColorMap (default: plt.cm.binary)
* pROHRAttributeColorMapUsageStart (default: 1./3; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe haetten, bekommen die Farbe von UsageStart
ROHR (plot only large (small, medium) pipes ...)
* quantil_pROHRAttributeHigh <= (default: 1.)
* quantil_pROHRAttributeLow >= (default: .75)
* default: only the largest 25% are plotted
* note that Attribute >0 is a precondition
ROHR (PIPE-Marker: Size and Color) - from mx
* pROHRMeasure columnName (default: 'ROHR~*~*~*~QMAV')
* pROHRMeasureApplyFunction: Function to be applied to column pROHRMeasure (default: lambda x: math.fabs(x))
* pROHRMeasureMarker (default: '.')
* pROHRMeasureRefSize: plot markersize for RefSizeValue in pts (default: 1.0)
* pROHRMeasureSizeMin (default: None): if set: use pROHRMeasureSizeMin-Value as Measure for MarkerSize if Measure < pROHRMeasureSizeMin
* corresponding RefSizeValue is Measure.std() or Measure.mean() if Measure.std() is < 1
* if pROHRMeasureRefSize is None: plot markersize will be plot linewidth
* pROHRMeasureColorMap (default: plt.cm.cool)
* pROHRMeasureColorMapUsageStart (default: 0.; Wertebereich: [0,1])
* Farbskala nach vorh. min./max. Wert
* die Farbskala wird nur ab UsageStart genutzt
* d.h. Werte die eine "kleinere" Farbe hätten, bekommen die Farbe von UsageStart
NRCVs - NumeRiCal Values to be displayed
* pFIGNrcv: List of Sir3sID RegExps to be displayed (i.e. ['KNOT~PKON-Knoten~\S*~\S+~QM']) default: None
the 1st Match is used if a RegExp matches more than 1 Channel
further Examples for RegExps (and corresponding Texts):
* WBLZ~WärmeblnzGes~\S*~\S+~WES (Generation)
* WBLZ~WärmeblnzGes~\S*~\S+~WVB (Load)
* WBLZ~WärmeblnzGes~\S*~\S+~WVERL (Loss)
WBLZ~[\S ]+~\S*~\S+~\S+: Example for a RegExp matching all Channels with OBJTYPE WBLZ
* pFIGNrcvTxt: corresponding (same length required!) List of Texts (i.e. ['Kontrolle DH']) default: None
* pFIGNrcvFmt (i.e. '{:12s}: {:8.2f} {:6s}')
* Text (from pFIGNrcvTxt)
* Value
* UNIT (determined from Channel-Data)
* pFIGNrcvPercFmt (i.e. ' {:6.1f}%')
* ValueInRefPercent
* if refValue==0: 100%
* pFIGNrcvXStart (.5 default)
* pFIGNrcvYStart (.5 default)
Category - User Heat Balances to be displayed
* pFWVBGCategory: List of Heat Balances to be displayed (i.e. ['BLNZ1u5u7']) default: None
* pFWVBGCategoryUnit: Unit of all these Balances (default: '[kW]'])
* pFWVBGCategoryXStart (.1 default)
* pFWVBGCategoryYStart (.9 default)
* pFWVBGCategoryCatFmt (i.e. '{:12s}: {:6.1f} {:4s}')
* Category NAME
* Category Load
* pFWVBGCategoryUnit
* pFWVBGCategoryPercFmt (i.e. ' {:6.1f}%')
* Last Ist/Soll
* pFWVBGCategory3cFmt (i.e. ' {:5d}/{:5d}/{:5d}')
* NOfTops
* NOfMids
* NOfBots
VICs - VeryImportantCustomers whose Values to be displayed
* pVICsDf: DataFrame with VeryImportantCustomers (Text & Specification)
columns expected:
* Kundenname (i.e. 'VIC1') - Text
* Knotenname (i.e. 'V-K007') - Specification by Supply-Node
i.e.: pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']})
* pVICsPercFmt (i.e. '{:12s}: {:6.1f}%')
* Kundenname
* Load in Percent to Reference
* pVICsFmt (i.e. '{:12s}: {:6.1f} {:6s}')
* Kundenname
* Load
* pFWVBGCategoryUnit
* pVICsXStart (.5 default)
* pVICsYStart (.1 default)
Figure:
* pltTitle: title [not suptitle] (default: 'pltNetFigAx')
* figFrameon: figure frame (background): displayed or invisible (default: True)
* figEdgecolor: edge color of the Figure rectangle (default: 'black')
* figFacecolor: face color of the Figure rectangle (default: 'white')
Returns:
pFWVB
* columns changed (compared to vFWVB):
* pFWVBAttribute (wg. z.B. pFWVBAttributeApplyFunction und .astype(float))
* columns added (compared to vFWVB):
* Measure (in % zu Ref wenn pFWVBMeasureInRefPer=True)
* MeasureRef (Wert von Measure im Referenzzustand)
* MeasureOrig (Wert von Measure)
* MCategory: str (Kategorisierung von Measure mit FixedLimitHigh/Low-Werten):
* TopText or
* MidText or
* BotText
* GCategory: list (non-empty only if req. GCategories are a subset of the available Categories and object belongs to a req. Category)
* VIC (filled with Kundenname from pVICsDf)
* rows (compared to vFWVB):
* pFWVB enthaelt dieselben Objekte wie vFWVB
* aber: die geplotteten Objekte sind ggf. nur eine Teilmenge (wg. z.B. pFWVBFilterFunction)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
keysDefined=['CBAnchorHorizontal', 'CBAnchorVertical', 'CBAspect', 'CBFraction', 'CBHpad', 'CBLabelPad'
,'CBLe3cBotVPad', 'CBLe3cMidVPad', 'CBLe3cSySize', 'CBLe3cSyType', 'CBLe3cTopVPad'
,'CBShrink', 'CBTicklabelsHPad'
,'figEdgecolor', 'figFacecolor', 'figFrameon'
,'pFIGNrcv','pFIGNrcvFmt', 'pFIGNrcvPercFmt','pFIGNrcvTxt', 'pFIGNrcvXStart', 'pFIGNrcvYStart'
,'pFWVBFilterFunction'
,'pFWVBAttribute'
,'pFWVBAttributeApplyFunction','pFWVBAttributeApplyFunctionNaNValue'
,'pFWVBAttributeAsc'
,'pFWVBAttributeRefSize'
,'pFWVBGCategory', 'pFWVBGCategoryUnit','pFWVBGCategory3cFmt','pFWVBGCategoryCatFmt', 'pFWVBGCategoryPercFmt', 'pFWVBGCategoryXStart', 'pFWVBGCategoryYStart'
,'pFWVBMeasure', 'pFWVBMeasure3Classes', 'pFWVBMeasureAlpha', 'pFWVBMeasureCBFixedLimitHigh', 'pFWVBMeasureCBFixedLimitLow', 'pFWVBMeasureCBFixedLimits', 'pFWVBMeasureClip', 'pFWVBMeasureColorMap', 'pFWVBMeasureInRefPerc'
,'pMCatBotAlpha', 'pMCatBotClip', 'pMCatBotColor', 'pMCatBotText', 'pMCatMidAlpha', 'pMCatMidClip', 'pMCatMidColorMap', 'pMCatMidText', 'pMCatTopAlpha', 'pMCatTopClip', 'pMCatTopColor', 'pMCatTopText'
,'pROHRFilterFunction'
,'pROHRAttribute'
,'pROHRAttributeApplyFunction','pROHRAttributeApplyFunctionNaNValue'
,'pROHRAttributeAsc', 'pROHRAttributeColorMap', 'pROHRAttributeColorMapUsageStart', 'pROHRAttributeLs', 'pROHRAttributeRefSize','pROHRAttributeSizeMin'
,'pROHRMeasure','pROHRMeasureApplyFunction'
,'pROHRMeasureColorMap', 'pROHRMeasureColorMapUsageStart', 'pROHRMeasureMarker', 'pROHRMeasureRefSize','pROHRMeasureSizeMin'
,'pVICsDf','pVICsPercFmt','pVICsFmt','pVICsXStart', 'pVICsYStart'
,'pltTitle'
,'quantil_pFWVBAttributeHigh', 'quantil_pFWVBAttributeLow'
,'quantil_pROHRAttributeHigh', 'quantil_pROHRAttributeLow'
,'timeDeltaToRef', 'timeDeltaToT']
keys=sorted(kwds.keys())
for key in keys:
if key in keysDefined:
value=kwds[key]
logger.debug("{0:s}kwd {1:s}: {2:s}".format(logStr,key,str(value)))
else:
logger.warning("{0:s}kwd {1:s} NOT defined!".format(logStr,key))
del kwds[key]
# TIMEs
if 'timeDeltaToRef' not in keys:
kwds['timeDeltaToRef']= | pd.to_timedelta('0 seconds') | pandas.to_timedelta |
import warnings
import numpy as np
from pandas import Categorical, DataFrame, Series
from .pandas_vb_common import tm
class Construction:
params = ["str", "string"]
param_names = ["dtype"]
def setup(self, dtype):
self.series_arr = tm.rands_array(nchars=10, size=10 ** 5)
self.frame_arr = self.series_arr.reshape((50_000, 2)).copy()
# GH37371. Testing construction of string series/frames from ExtensionArrays
self.series_cat_arr = Categorical(self.series_arr)
self.frame_cat_arr = Categorical(self.frame_arr)
def time_series_construction(self, dtype):
Series(self.series_arr, dtype=dtype)
def peakmem_series_construction(self, dtype):
Series(self.series_arr, dtype=dtype)
def time_frame_construction(self, dtype):
DataFrame(self.frame_arr, dtype=dtype)
def peakmem_frame_construction(self, dtype):
DataFrame(self.frame_arr, dtype=dtype)
def time_cat_series_construction(self, dtype):
Series(self.series_cat_arr, dtype=dtype)
def peakmem_cat_series_construction(self, dtype):
Series(self.series_cat_arr, dtype=dtype)
def time_cat_frame_construction(self, dtype):
DataFrame(self.frame_cat_arr, dtype=dtype)
def peakmem_cat_frame_construction(self, dtype):
DataFrame(self.frame_cat_arr, dtype=dtype)
class Methods:
def setup(self):
self.s = Series(tm.makeStringIndex(10 ** 5))
def time_center(self):
self.s.str.center(100)
def time_count(self):
self.s.str.count("A")
def time_endswith(self):
self.s.str.endswith("A")
def time_extract(self):
with warnings.catch_warnings(record=True):
self.s.str.extract("(\\w*)A(\\w*)")
def time_findall(self):
self.s.str.findall("[A-Z]+")
def time_find(self):
self.s.str.find("[A-Z]+")
def time_rfind(self):
self.s.str.rfind("[A-Z]+")
def time_get(self):
self.s.str.get(0)
def time_len(self):
self.s.str.len()
def time_join(self):
self.s.str.join(" ")
def time_match(self):
self.s.str.match("A")
def time_normalize(self):
self.s.str.normalize("NFC")
def time_pad(self):
self.s.str.pad(100, side="both")
def time_partition(self):
self.s.str.partition("A")
def time_rpartition(self):
self.s.str.rpartition("A")
def time_replace(self):
self.s.str.replace("A", "\x01\x01")
def time_translate(self):
self.s.str.translate({"A": "\x01\x01"})
def time_slice(self):
self.s.str.slice(5, 15, 2)
def time_startswith(self):
self.s.str.startswith("A")
def time_strip(self):
self.s.str.strip("A")
def time_rstrip(self):
self.s.str.rstrip("A")
def time_lstrip(self):
self.s.str.lstrip("A")
def time_title(self):
self.s.str.title()
def time_upper(self):
self.s.str.upper()
def time_lower(self):
self.s.str.lower()
def time_wrap(self):
self.s.str.wrap(10)
def time_zfill(self):
self.s.str.zfill(10)
class Repeat:
params = ["int", "array"]
param_names = ["repeats"]
def setup(self, repeats):
N = 10 ** 5
self.s = Series(tm.makeStringIndex(N))
repeat = {"int": 1, "array": np.random.randint(1, 3, N)}
self.values = repeat[repeats]
def time_repeat(self, repeats):
self.s.str.repeat(self.values)
class Cat:
params = ([0, 3], [None, ","], [None, "-"], [0.0, 0.001, 0.15])
param_names = ["other_cols", "sep", "na_rep", "na_frac"]
def setup(self, other_cols, sep, na_rep, na_frac):
N = 10 ** 5
mask_gen = lambda: np.random.choice([True, False], N, p=[1 - na_frac, na_frac])
self.s = Series(tm.makeStringIndex(N)).where(mask_gen())
if other_cols == 0:
# str.cat self-concatenates only for others=None
self.others = None
else:
self.others = DataFrame(
{i: tm.makeStringIndex(N).where(mask_gen()) for i in range(other_cols)}
)
def time_cat(self, other_cols, sep, na_rep, na_frac):
# before the concatenation (one caller + other_cols columns), the total
# expected fraction of rows containing any NaN is:
# reduce(lambda t, _: t + (1 - t) * na_frac, range(other_cols + 1), 0)
# for other_cols=3 and na_frac=0.15, this works out to ~48%
self.s.str.cat(others=self.others, sep=sep, na_rep=na_rep)
class Contains:
params = [True, False]
param_names = ["regex"]
def setup(self, regex):
self.s = Series(tm.makeStringIndex(10 ** 5))
def time_contains(self, regex):
self.s.str.contains("A", regex=regex)
class Split:
params = [True, False]
param_names = ["expand"]
def setup(self, expand):
self.s = Series(tm.makeStringIndex(10 ** 5)).str.join("--")
def time_split(self, expand):
self.s.str.split("--", expand=expand)
def time_rsplit(self, expand):
self.s.str.rsplit("--", expand=expand)
class Dummies:
def setup(self):
self.s = Series(tm.makeStringIndex(10 ** 5)).str.join("|")
def time_get_dummies(self):
self.s.str.get_dummies("|")
class Encode:
def setup(self):
self.ser = Series(tm.makeUnicodeIndex())
def time_encode_decode(self):
self.ser.str.encode("utf-8").str.decode("utf-8")
class Slice:
def setup(self):
self.s = | Series(["abcdefg", np.nan] * 500000) | pandas.Series |
import numpy as np
import time
import math
import pandas as pd
import gym
import gym.spaces
from gym.utils import seeding
from collections import deque
from utils import convert_datetime, get_commisions, resample_ohlc
from datetime import datetime, timedelta
from enum import Enum
from utils.influxdb.connection import HistoricalConnection
import settings
import warnings
import pytz
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from utils import random_date
from gym_trading.render import BitcoinTradingGraph
warnings.simplefilter(action='ignore', category=FutureWarning)
class Actions(Enum):
Buy = 0
Sell = 1
Nothing = 2
Close = 3
Moveup_Stoplimit = 4
Movedown_Stoplimit = 5
class MarketPosition(Enum):
Long = 0
Short = 1
Flat = 2
class State:
def __init__(self, symbol, start_date, end_date, connection: HistoricalConnection, info_st, info_lt=None, enable_tick_by_tick_step=False, transposed=True):
assert isinstance(symbol, str)
assert isinstance(start_date, str)
assert isinstance(end_date, str)
self.symbol = symbol
self.start_date = convert_datetime(start_date)
self.end_date = convert_datetime(end_date)
# Convert start_date and end_date to timezone
tz_new_york = pytz.timezone(settings.DEFAULT_TIME_ZONE)
tz_utc = pytz.timezone('UTC')
print("{} ****** {}".format(start_date, end_date))
self.start_date = tz_new_york.localize(self.start_date).astimezone(tz_utc)
self.end_date = tz_new_york.localize(self.end_date).astimezone(tz_utc)
self.transposed = transposed
self.info_st = info_st
self.info_lt = info_lt
long_term_elements = 0
if info_lt is not None:
long_term_elements = len(info_lt)
self.info = deque(maxlen=long_term_elements + 1)
# Add Short Term Info
self.info.append(self.info_st)
# Add Long Term Info
if info_lt is not None:
for i in info_lt:
self.info.append(i)
# Tick by Tick Timeserie Enable
self.enable_tick_by_tick_step = enable_tick_by_tick_step
self.switch_tick_by_tick_step = False
# InfluxDB Connection
self.connection = connection
# The market have a screenshot with the essential information for the agent
self.state = deque(maxlen=len(self.info))
self.columns = ['open', 'high', 'low', 'close', 'volume', 'cons']
self.columns_state = ['open', 'high', 'low', 'close', 'volume']
# BarsArray
self.bars_array = deque(maxlen=len(self.info))
self._bars_array = {}
self.iterator = deque(maxlen=len(self.info))
self.current_bars = deque(maxlen=len(self.info))
for i, info in enumerate(self.info):
self.current_bars.append(info['backperiods'])
# Validate if resample is needed
in_list = False
for ts in settings.INFLUXDB_DEFAULT_SERIES_PER_INSTRUMENT:
if ts['type'] == info['type'] and ts['frame'] == info['frame']:
in_list = True
print("Find {} {}".format(ts['type'], ts['frame ']))
type_query = info['type']
frame_query = info['frame']
offset_query = 0
# ++++++++++++++++++++++++ InfluxDB get data ++++++++++++++++++++++
# Get the start consecutive start and end
offset_query = -self.current_bars[-1]
get_cons = self.connection.get_first_cons(measurement='ohlc', start=self.start_date.strftime("%Y-%m-%d %H:%M:%S"),
end=self.end_date.strftime("%Y-%m-%d %H:%M:%S"), symbol=symbol,
type=type_query, frame=frame_query)
cons_values = list(get_cons)[0]
start_cons = cons_values['first']
end_cons = cons_values['last']
# Validate if exist enough backperiods
if start_cons - info['backperiods'] < 0:
assert IndexError("No se puede manejar indices negativos.")
# Get the data series with the start_cons and end_cons
data = self.connection.get_data(start_cons=start_cons, end_cons=end_cons, symbol=symbol, type=type_query, frame=frame_query, offset=offset_query)
self.bars_array.append(data)
# El cons de la fila con fecha de inicio de la simulacion
id_start_date = self.bars_array[-1].loc[start_date:]['cons'][0]
# Truncar bars_array con los datos necesarios solamente, de forma que la fecha de inicio corresponda al numero de backperiods utilizando el iloc
mask2 = (self.bars_array[-1]['cons'] > (id_start_date - self.current_bars[-1]))
self.bars_array[-1] = self.bars_array[-1].loc[mask2, self.columns_state]
# Get State --- Option 1
self.state.append(self.bars_array[-1].iloc[:self.current_bars[-1], :][self.columns_state])
self.iterator.append(0)
# Used for syncronize timeframes
base_bars = [{'type': 'Min', 'frame': 1}]
for i in base_bars:
# +++++++++++++++++++++++++++ InfluxDB Get Data ++++++++++++++++++++++++++++++++
# Get the start consecutive start and end
cons_values = list(self.connection.get_first_cons(measurement='ohlc', start=self.start_date.strftime("%Y-%m-%d %H:%M:%S"), end=self.end_date.strftime("%Y-%m-%d %H:%M:%S"), symbol=symbol, type=i['type'], frame=i['frame']))[0]
start_cons = cons_values['first']
end_cons = cons_values['last']
self._bars_array[i['type']] = self.connection.get_data(start_cons=start_cons, end_cons=end_cons, symbol=symbol, type=i['type'], frame=i['frame'])
# Market variables
self.market_position = MarketPosition.Flat
self.account_balance = settings.ACCOUNT_MONEY
self.max_account_balance = self.account_balance
self.current_price = 0.0
self.current_date = 0.0
self.price_entry = 0.0
self.stoploss = 0.0
self.stoploss_ticks = 12
self.stoploss_position = False
self.risk_init = 0
# Current Trade
self.current_trade = None
# Trades taken by the agent
self.data = {
'date_entry': [],
'date_exit': [],
'symbol': [],
'market_position': [],
'contracts': [],
'price_entry': [],
'price_exit': [],
'commisions': [],
'gross_profit': [],
'profit': []
}
# Valor del portafolio
self.net_worths = [self.account_balance]
self.trades = | pd.DataFrame(data=self.data) | pandas.DataFrame |
import os
import random
import time
import pandas as pd
from pinyinlib import Pinyin
# 重置随机种子
random.seed(time.time())
# 读取原始表格
df = pd.read_table(os.path.dirname(__file__)+'/input.txt',sep='!',encoding='gbk',low_memory=False)
# 生成序号列表
serial = []
for i in range(0, df.shape[0]):
serial.append(str(i+1).zfill(6))
dfo1 = pd.DataFrame({'序号': serial})
# 生成户名列表
name = df.loc[:, '客户信息平台姓名'].values.tolist()
dfo2 = pd.DataFrame({'户名': name})
# 生成证件类型列表
dfo3 = pd.DataFrame({'证件类型': ([10]*df.shape[0])})
# 生成证件号码列表
id_number = list(map(str, df.loc[:, '客户信息平台证件号码'].values.tolist()))
dfo4 = pd.DataFrame({'证件号码': id_number})
# 生成英文名
english_name = []
for i in name:
english_name.append(Pinyin().get_pinyin_name(i, '', '', 'upper'))
dfo5 = pd.DataFrame({'英文名': english_name})
# 生成性别列表
gender = []
for i in id_number:
# 如果是身份证号码,则生成性别
if(len(i)==18):
# 男:1,女:2,未知:0
if(int(i[-2]) % 2 == 0):
gender.append(2)
else:
gender.append(1)
else:
gender.append(0)
dfo6 = pd.DataFrame({'性别': gender})
# 生成国籍/地区列表
dfo7 = pd.DataFrame({'国籍/地区': (['CN']*df.shape[0])})
# 生成固定电话列表,注意生成的固定电话不带区号键“-”
landline = []
for i in list(map(str, df.loc[:, '固话'].values.tolist())):
# 固定电话自带的情况
if(i != 'nan'):
# 将可能的区号键和小数去掉
i = i.replace('-', '').replace('.0', '')
# 如果长度为七,则插入区号0728,如果长度少一位或者多一位,则随机生成数字自动补全,分为带区号和不带区号两种情况,随机补全的电话尾部有“~”号
if(len(i) == 7):
landline.append('0728'+i)
elif(len(i) == 6 or len(i) == 10):
landline.append(i+str(random.randint(0, 9))+'~')
elif(len(i) == 8 or len(i) == 12):
landline.append(i[0:-1])
else:
landline.append(i)
# 固定电话非自带的情况
else:
# 随机生成,经查证,目前只有城镇5开头和乡镇4开头的区分,故第一位只生成4或5,第二位生成非0,其余五位随机生成,随机生成的电话尾部有“~”号
landline.append('0728'+str(random.randint(4, 5)) +
str(random.randint(1, 9))+str(random.randint(0, 99999)).zfill(5)+'~')
dfo8 = pd.DataFrame({'固定电话': landline})
# 生成手机号列表
phone_number=[]
for i in list(map(str, df.loc[:, '手机号'].values.tolist())):
# 手机号自带的情况
if(i!='nan'):
phone_number.append(i)
# 手机号不自带的情况
else:
phone_number.append('')
dfo9 = pd.DataFrame({'手机号': phone_number})
# 生成通讯地址列表
dfo10 = pd.DataFrame({'通讯地址': df.loc[:, '联系地址'].values.tolist()})
# 生成职业列表
career_list = [
'49900~',
'50100~',
'50101~',
'50102~',
'50199~',
'50200~',
'50201~',
'50202~',
'50203~',
'50204~',
'50299~',
'50300~',
'50301~',
'50302~',
'50303~',
'50399~',
'50400~',
'50401~',
'50402~',
'50403~',
'50499~',
'50500~',
'50501~',
'50502~',
'50503~',
'50504~',
'50505~',
'50506~',
'50599~',
'59900~',
'60100~',
'60101~'
]
career = []
for i in range(0, df.shape[0]):
# 从职业列表中随机生成职业代码,随机生成出来的号码尾部有“~”号
career.append(random.sample(career_list, 1)[0])
dfo11 = pd.DataFrame({'职业': career})
# 生成是否居住满一年列表
lived_full_year = []
for i in id_number:
# 如果是身份证号码,则生成居住是否满一年
if(len(i)==18):
if(i[0:6] == '429006' or i[0:6] == '422428'):
lived_full_year.append('1')
else:
lived_full_year.append('0')
# 否则,生成“1”
else:
lived_full_year.append('1')
dfo12 = pd.DataFrame({'是否居住满一年': lived_full_year})
# 生成发证机关所在地代码列表
locate_code = []
for i in id_number:
# 如果是身份证号码,则生成发证机关所在地代码
if(len(i)==18):
if(i[0:6] == '422428'):
locate_code.append('429006')
else:
locate_code.append(i[0:6])
# 否则,生成“429006”
else:
locate_code.append('429006')
dfo13 = pd.DataFrame({'发证机关所在地代码': locate_code})
# 生成证件有效截止日期列表
birthdate = []
for i in id_number:
# 如果是身份证号码,则生成出生日期
if(len(i)==18):
birthdate.append(int(i[6:14]))
else:
birthdate.append(0)
turncate_date = []
n = -1
for i in list(map(str, df.loc[:, '证件到期日'].values.tolist())):
n = n+1
# 证件有效截止日期自带的情况
if(i != 'nan'):
i = i.replace('.0', '').replace(
'年', '').replace('月', '').replace('日', '')
# 没有过期
if(int(i) >= 20200610):
turncate_date.append(i)
else:
# 生日为空或90后,截止日期往后延10年,判断闰年情况
if(birthdate[n]==0 or birthdate[n] >= 19900610):
if(i[4:8] == '0229'):
years = int(i[0:4])+10
if (not((years % 4 == 0 and years % 100 != 0) or (years % 400 == 0))):
turncate_date.append(str(years)+'0301')
continue
turncate_date.append(str(int(i)+100000))
# 72-90年,截止日期往后延20年,判断闰年情况
elif(birthdate[n] > 19720610 and birthdate[n] < 19900610):
if(i[4:8] == '0229'):
years = int(i[0:4])+20
if (not((years % 4 == 0 and years % 100 != 0) or (years % 400 == 0))):
turncate_date.append(str(years)+'0301')
continue
turncate_date.append(str(int(i)+200000))
# 72年前,截止日期设为永久
else:
turncate_date.append('20991231')
# 证件有效截止日期非自带的情况
else:
# 生日为空,则来自户口本信息,留空
if(birthdate[n]==0):
turncate_date.append('')
# 未满16岁的截止日期改为16岁生日当天,判断闰年情况
elif(birthdate[n] > 20041231):
if(str(birthdate[n])[4:8] == '0229'):
years = int(str(birthdate[n])[0:4])+16
if (not((years % 4 == 0 and years % 100 != 0) or (years % 400 == 0))):
turncate_date.append(str(years)+'0301')
continue
turncate_date.append(str(birthdate[n]+160000))
# 已满16岁但是是72年前的,随机生成2021年到2038年的到期日期,随机生成的日期尾部有“~”号
elif(birthdate[n] > 19730610 and birthdate[n] <= 20041231):
random_time = random.randint(time.mktime((
2021, 1, 1, 0, 0, 0, 0, 0, 0)), time.mktime((2038, 12, 31, 23, 59, 59, 0, 0, 0)))
turncate_date.append(time.strftime(
'%Y%m%d', time.localtime(random_time))+'~')
else:
turncate_date.append('20991231')
dfo14 = | pd.DataFrame({'证件有效截止日期': turncate_date}) | pandas.DataFrame |
"""Helpers for building Dash applications."""
import csv
import json
import sqlite3
import time
from contextlib import ContextDecorator
from datetime import datetime
from pathlib import Path
import pandas as pd
from cerberus import Validator
# ----------------------------------------------------------------------------------------------------------------------
# For Working with Data
def enable_verbose_pandas(max_columns=None, max_rows=None, max_seq_items=None):
"""Update global pandas configuration for printed dataframes.
Args:
max_columns: the number of max columns. Default is None (to show all)
max_rows: the number of max rows. Default is None (to show all)
max_seq_items: the number of max sequence items. Default is None (to show all) # TODO: what does this set?
"""
# Enable all columns to be displayed at once (or tweak to set a new limit)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
| pd.set_option('display.max_colwidth', max_columns) | pandas.set_option |
# 1.题出问题
# 什么样的人在泰坦尼克号中更容易存活?
# 2.理解数据
# 2.1 采集数据
# https://www.kaggle.com/c/titanic
# 2.2 导入数据
# 忽略警告提示
import warnings
warnings.filterwarnings('ignore')
# 导入处理数据包
import numpy as np
import pandas as pd
# 导入数据
# 训练数据集
train = pd.read_csv("./train.csv")
# 测试数据集
test = | pd.read_csv("./test.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
#
# Written by <NAME>, https://github.com/MatthieuSarkis
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from datetime import datetime
import gym
import itertools
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from pathlib import Path
import pickle
import seaborn as sns
sns.set_theme()
from sklearn.preprocessing import StandardScaler
from typing import List, Union
def create_directory_tree(mode: str,
experimental: bool,
checkpoint_directory: str):
date: str = datetime.now().strftime("%Y.%m.%d.%H.%M.%S")
if experimental:
checkpoint_directory = os.path.join("saved_outputs", "experimental")
else:
checkpoint_directory = os.path.join("saved_outputs", date) if mode=='train' else checkpoint_directory
# Create various subdirectories
checkpoint_directory_networks = os.path.join(checkpoint_directory, "networks")
checkpoint_directory_logs = os.path.join(checkpoint_directory, "logs")
checkpoint_directory_plots = os.path.join(checkpoint_directory, "plots")
Path(checkpoint_directory_networks).mkdir(parents=True, exist_ok=True)
Path(checkpoint_directory_logs).mkdir(parents=True, exist_ok=True)
Path(checkpoint_directory_plots).mkdir(parents=True, exist_ok=True)
# write the checkpoint directory name in a file for quick access when testing
if mode == 'train':
with open(os.path.join(checkpoint_directory, "checkpoint_directory.txt"), "w") as f:
f.write(checkpoint_directory)
return checkpoint_directory
def plot_reward(x: List[int],
rewards: np.ndarray,
figure_file: str,
mode: str,
bins: int = 20,
) -> None:
"""Helper function to plot the reward history in train mode and the reward probability distribution in test mode.
Args:
x (np.array): a linspace, horizontal axis of the plot
rewards (np.array): reward history
figure_file (str): filepath of the figure file
mode (str): train or test to decide what type of plot to generate
bins (int): number of bins for the histogram, by default set to the square root of the number of samples
Returns:
no value
"""
running_average = np.zeros(len(rewards))
for i in range(len(running_average)):
running_average[i] = np.mean(rewards[max(0, i-50): i+1])
if mode == 'train':
plt.plot(x, rewards, linestyle='-', color='blue', label='reward')
plt.plot(x, running_average, linestyle='--', color='green', label='running average 50')
plt.legend()
plt.title('Reward as a function of the epoch/episode')
elif mode == 'test':
plt.hist(rewards, bins=bins)
plt.title('Reward distribution')
plt.savefig(figure_file)
def plot_portfolio_value(x: List[int],
values: np.ndarray,
figure_file: str,
) -> None:
plt.plot(x, values.T, linestyle='-', linewidth=0.5)
plt.xlim((0, len(x)))
plt.title('Portfolio value')
plt.savefig(figure_file)
def instanciate_scaler(env: gym.Env,
mode: str,
checkpoint_directory: str) -> StandardScaler:
"""Instanciate and either fit or load parameter depending on mode.
In train mode, the agent behaves randomly in order to store typical observations in the environment.
The random agent trades for 10 episodes to have a more accurate scaler.
Args:
env (gym.Env): trading environment
mode (mode): train or test
Returns:
trained sklearn standard scaler
"""
scaler = StandardScaler()
if mode == 'train':
observations = []
for _ in range(10):
observation = env.reset()
observations.append(observation)
done = False
while not done:
action = env.action_space.sample()
observation_, _, done, _ = env.step(action)
observations.append(observation_)
scaler.fit(observations)
with open(os.path.join(checkpoint_directory, 'networks', 'scaler.pkl'), 'wb') as f:
pickle.dump(scaler, f)
if mode == 'test':
with open(os.path.join(checkpoint_directory, 'networks', 'scaler.pkl'), 'rb') as f:
scaler = pickle.load(f)
return scaler
def prepare_initial_portfolio(initial_portfolio: Union[int, float, str],
tickers: List[str]) -> dict:
"""Prepare the initial portfolio to give to the environment constructor.
Args:
initial_portfolio (int or float or string): if numerical then initial cash in bank assuming no shares are owned initially, otherwise
path to a json file prescribing the initial cah in bank as well as the number of owned shares of each asset.
tickers (List[str]): list of asset names
Returns:
dictionary giving the structure of the initial portfolio
"""
print('>>>>> Reading the provided initial portfolio <<<<<')
if isinstance(initial_portfolio, int) or isinstance(initial_portfolio, float):
initial_portfolio_returned = {key: 0 for key in tickers}
initial_portfolio_returned["Bank_account"] = initial_portfolio
else:
with open(initial_portfolio, "r") as file:
initial_portfolio = json.load(file)
initial_portfolio_returned = {key: 0 for key in tickers}
initial_portfolio_returned["Bank_account"] = initial_portfolio["Bank_account"]
for key in initial_portfolio_returned.keys():
if key in initial_portfolio.keys():
initial_portfolio_returned[key] = initial_portfolio[key]
return initial_portfolio_returned
def append_corr_matrix(df: pd.DataFrame,
window: int,
) -> pd.DataFrame:
"""Append the sliding correlation matrix of a multidimensional time series.
timewise flattens it and extracts just the upper triangular part (since it is symmetric),
then appends it to the initial time series.
Args:
df (pd.DataFrame): the multidimensional time series whose sliding correlation matrix is computed
window (int): size of the sliding window used to compute the correlation matrix
Returns:
the input time series with the sliding correlation matrix appended
"""
print('>>>>> Appending the correlation matrix <<<<<')
columns = ['{}/{}'.format(m, n) for (m, n) in itertools.combinations_with_replacement(df.columns, r=2)]
corr = df.rolling(window).cov()
corr_flattened = | pd.DataFrame(index=columns) | pandas.DataFrame |
import pdb
import glob
import copy
import os
import pickle
import joblib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
import sklearn.feature_selection
from random import choices
class FeatureColumn:
def __init__(self, category, field, preprocessors, args=None, cost=None):
self.category = category
self.field = field
self.preprocessors = preprocessors
self.args = args
self.data = None
self.cost = cost
class NHANES:
def __init__(self, db_path=None, columns=None):
self.db_path = db_path
self.columns = columns # Depricated
self.dataset = None # Depricated
self.column_data = None
self.column_info = None
self.df_features = None
self.df_targets = None
self.costs = None
def process(self):
df = None
cache = {}
# collect relevant data
df = []
for fe_col in self.columns:
sheet = fe_col.category
field = fe_col.field
data_files = glob.glob(self.db_path+sheet+'/*.XPT')
df_col = []
for dfile in data_files:
print(80*' ', end='\r')
print('\rProcessing: ' + dfile.split('/')[-1], end='')
# read the file
if dfile in cache:
df_tmp = cache[dfile]
else:
df_tmp = pd.read_sas(dfile)
cache[dfile] = df_tmp
# skip of there is no SEQN
if 'SEQN' not in df_tmp.columns:
continue
#df_tmp.set_index('SEQN')
# skip if there is nothing interseting there
sel_cols = set(df_tmp.columns).intersection([field])
if not sel_cols:
continue
else:
df_tmp = df_tmp[['SEQN'] + list(sel_cols)]
df_tmp.set_index('SEQN', inplace=True)
df_col.append(df_tmp)
try:
df_col = pd.concat(df_col)
except:
#raise Error('Failed to process' + field)
raise Exception('Failed to process' + field)
df.append(df_col)
df = pd.concat(df, axis=1)
#df = pd.merge(df, df_sel, how='outer')
# do preprocessing steps
df_proc = []#[df['SEQN']]
for fe_col in self.columns:
field = fe_col.field
fe_col.data = df[field].copy()
# do preprocessing
if fe_col.preprocessors is not None:
prepr_col = df[field]
for x in range(len(fe_col.preprocessors)):
prepr_col = fe_col.preprocessors[x](prepr_col, fe_col.args[x])
else:
prepr_col = df[field]
# handle the 1 to many
if (len(prepr_col.shape) > 1):
fe_col.cost = [fe_col.cost] * prepr_col.shape[1]
else:
fe_col.cost = [fe_col.cost]
df_proc.append(prepr_col)
self.dataset = pd.concat(df_proc, axis=1)
return self.dataset
# Preprocessing functions
def preproc_onehot(df_col, args=None):
return pd.get_dummies(df_col, prefix=df_col.name, prefix_sep='#')
def preproc_map_mode(df_col, args=None):
if args is None:
args={'cutoff':np.inf}
df_col[df_col > args['cutoff']] = np.nan
df_col[pd.isna(df_col)] = df_col.mode()
return df_col
def preproc_map_median(df_col, args=None):
if args is None:
args={'cutoff':np.inf}
df_col[df_col > args['cutoff']] = np.nan
df_col[pd.isna(df_col)] = df_col.median()
return df_col
def preproc_real(df_col, args=None):
if args is None:
args={'cutoff':np.inf}
# other answers as nan
df_col[df_col > args['cutoff']] = np.nan
# nan replaced by mean
df_col[pd.isna(df_col)] = df_col.mean()
# statistical normalization
df_col = (df_col-df_col.mean()) / df_col.std()
return df_col
def preproc_real_mapped(df_col, args):
if args is None or not 'mappedValue' in args:
raise Exception("Missing required argument 'mappedValue'!")
if not 'cutoff' in args:
args['cutoff'] = np.inf
if not 'normalize' in args:
args['normalize'] = True
# other answers as nan
df_col[df_col > args['cutoff']] = np.nan
# nan replaced by mappedValue
df_col[pd.isna(df_col)] = args['mappedValue']
# statistical normalization
if args['normalize'] == True:
df_col = (df_col-df_col.mean()) / df_col.std()
return df_col
def preproc_real_transform(df_col, args):
if args is None or not 'toTransform' in args:
raise Exception("Missing required argument 'toTransform'!")
if not 'cutoff' in args:
args['cutoff'] = np.inf
if not 'useOldMean' in args:
args['useOldMean'] = True
if not 'normalize' in args:
args['normalize'] = True
# other answers as nan
df_col[df_col > args['cutoff']] = np.nan
old_mean = df_col.mean()
# replace certain values with other values
for x in args['toTransform']:
df_col[df_col == x['selectValue']] = x['mappedValue']
if 'mappedValue' in args:
# nan replaced by mappedValue
df_col[ | pd.isna(df_col) | pandas.isna |
'''
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
"""
Los productos que salen del reporte diario son:
3
4
5
7
8
9
10
11
12
13
14
17
20
23
24
26
27
30
36
"""
import pandas as pd
from utils import *
from shutil import copyfile
from os import listdir
from os.path import isfile, join
from datetime import datetime
import numpy as np
def prod4(fte, producto):
print('Generando producto 4')
now = datetime.now()
today = now.strftime("%Y-%m-%d")
output = producto + today + '-CasosConfirmados-totalRegional.csv'
df = pd.read_csv(fte, quotechar='"', sep=',', thousands=r'.', decimal=",")
df.rename(columns={'Unnamed: 0': 'Region'}, inplace=True)
if 'Unnamed: 7' in df.columns:
df.drop(columns=['Unnamed: 7'], inplace=True)
df_obj = df.select_dtypes(['object'])
df[df_obj.columns] = df_obj.apply(lambda x: x.str.strip())
regionName(df)
df.at[16, 'Region'] = 'Total'
# texttract reconoce 0 como o
df.replace({'O': 0}, inplace=True)
numeric_columns = [x for x in df.columns if x != 'Region']
for i in numeric_columns:
df[i] = df[i].astype(str)
#df[i] = df[i].replace({r'\.': ''}, regex=True)
df[i] = df[i].replace({r'\,': '.'}, regex=True)
df.to_csv(output, index=False)
def prod5(fte, producto):
print('Generando producto 5')
# necesito series a nivel nacional por fecha:
# Casos nuevos con sintomas
# Casos totales
# Casos recuperados #ya no se reporta
# Fallecidos
# Casos activos
# Casos nuevos sin sintomas
now = datetime.now()
timestamp = now.strftime("%Y-%m-%d")
timestamp_dia_primero = now.strftime("%d-%m-%Y")
a = pd.read_csv(fte + 'CasosConfirmados.csv')
a['Fecha'] = timestamp
a = a[a['Region'] == 'Total']
print(a.to_string())
#las columnas son :
# Casos totales acumulados Casos nuevos totales Casos nuevos con sintomas Casos nuevos sin sintomas* Fallecidos totales % Total Fecha
a.rename(columns={'Casos totales acumulados': 'Casos totales',
'Casos nuevos totales': 'Casos nuevos totales',
'Casos nuevos con sintomas': 'Casos nuevos con sintomas',
'Casos nuevos sin sintomas*': 'Casos nuevos sin sintomas',
'Fallecidos totales': 'Fallecidos'}, inplace=True)
#Faltan casos activos: prod 5 esta ahora en el reporte diario, hay que migrar para alla
casos_confirmados_totales = | pd.read_csv('../input/ReporteDiario/CasosConfirmadosTotales.csv') | pandas.read_csv |
from datetime import datetime
import utils as utils
import coinbase_api as cb
import algorithms as alg
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import sys
import warnings
class BackTest:
def __init__(self, df):
self.df = df
self.index = int(0.85 * len(df))
def calculate_sma(self, coin_id, n_days):
sma = 0
j = 0
while (j <= n_days * 24 and (self.index + j) < len(self.df)):
sma += float(self.df.close[self.index + j])
j += 24
return sma / n_days
def get_current_price(self, coin_id):
price = float(self.df.close[self.index])
self.index -= 1
return price
def obtain_crypto_dataset(ticker):
data = []
i = 4
while(i < 5000):
start_days_prior = i
num_days = 4
data += cb.getHistoricalData(ticker, start_days_prior, num_days)
if (len(data) <= 1):
utils.writelog(f"Error obtaining historical data. response = {data}", True)
print(i)
break
i += 4
df = pd.DataFrame(data, columns=[ "time", "low", "high", "open", "close", "volume" ])
df.to_csv(f"../datasets/{ticker}.csv", mode='a')
return df
def plot_test_results(ticker, df, buy_and_sell_points):
plt.locator_params(axis="x", nbins=4)
plt.locator_params(axis="y", nbins=4)
plt.xlabel("Time")
plt.ylabel("Price in Dollars")
plt.plot(df.time, df.close)
plt.title(f"{ticker} price over time.")
warnings.filterwarnings("ignore")
plt.axes().set_xticklabels([utils.epoch_time_to_human(x) for x in plt.axes().get_xticks()])
plt.axes().set_yticklabels(['$' + str(y) for y in plt.axes().get_yticks()])
for point in buy_and_sell_points:
(action, (time, price)) = point
plt.annotate(action, (time, price))
plt.savefig(f"{ticker}-test-results.png", figsize=(10, 10), dpi=100)
def main():
if (len(sys.argv) != 2):
print(f"Error with arguments. \nUsage:\n{sys.argv[0]} <ticker>")
sys.exit()
ticker = sys.argv[1]
try:
df = | pd.read_csv(f"../datasets/{ticker}.csv") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import pickle
import pandas as pd
from pandas import *
import numpy as np
from nltk import word_tokenize
from nltk.util import ngrams
import collections
from collections import Counter
import os
import regex as re
from make_ngrams import compute_ngrams
import math
from collections import defaultdict
from processing_functions import load_list, load_speakerlist, process_excel, remove_diacritic, compute_tfidf, normalize_dicts, write_to_excel, convert_keys_to_string, compute_difference, cosine_similarity
from scipy import spatial
global num_speeches
doc_freq = pickle.load(open("bigram_doc_freq.pickle", "rb"))
# This is the function that reads in the Excel files and calls the necessary functions to compute the distances
# It then writes those distance dictionaries to Excel
def distance_analysis():
gir_tfidf = process_excel('girondins_tfidf_allbigrams.xlsx')
mont_tfidf = process_excel("montagnards_tfidf_allbigrams.xlsx")
gir_dict = convert_keys_to_string(gir_tfidf)
mont_dict = convert_keys_to_string(mont_tfidf)
gir_mont_diff = compute_difference(gir_dict, mont_dict)
by_month = pd.read_excel("By_Month.xlsx")
by_date = pd.read_excel("By_Date.xlsx")
by_speaker = pd.read_excel("By_Speaker_Convention.xlsx")
"""by_month = create_tfidf_vectors(by_month)
by_month_dist = compute_distances(by_month, 'month', gir_dict, mont_dict, gir_mont_diff)
write_to_excel(by_month_dist, 'by_month_distances.xlsx')
by_date = create_tfidf_vectors(by_date)
by_period = aggregate_by_period(by_date)
by_period_dist = compute_distances(by_period, 'period', gir_dict, mont_dict, gir_mont_diff)
write_to_excel(by_period_dist, "by_period_distances.xlsx")
by_date_dist = compute_distances(by_date, 'date', gir_dict, mont_dict, gir_mont_diff)
write_to_excel(by_date_dist, 'by_date_distances.xlsx')"""
by_speaker = create_tfidf_vectors(by_speaker)
by_speaker_dist = compute_distances(by_speaker, 'speaker', gir_dict, mont_dict, gir_mont_diff)
write_to_excel(by_speaker_dist, 'by_speaker_distances.xlsx')
# Aggregates the data based on three periods - before the convention, during the convention, and after the convention
def aggregate_by_period(dataframe):
before_convention = Counter()
convention = Counter()
after_convention = Counter()
for i, time in enumerate(dataframe['Full Date']):
# Convert time to a string to do the string equality analysis to determine which period the row belongs to
time = str(time)
if (time >= "1792-6-10") and (time <= "1792-8-10"):
before_convention = before_convention + dataframe['ngrams'].iloc[i]
if (time >= "1792-9-20") and (time < "1793-6-2"):
convention = convention + dataframe['ngrams'].iloc[i]
if (time >= "1793-6-2") and (time <= "1793-8-2"):
after_convention = after_convention + dataframe['ngrams'].iloc[i]
before_convention_tfidf = compute_tfidf(before_convention, num_speeches, doc_freq)
convention_tfidf = compute_tfidf(convention, num_speeches, doc_freq)
after_convention_tfidf = compute_tfidf(after_convention, num_speeches, doc_freq)
before_convention_df = pd.DataFrame.from_dict(before_convention_tfidf, orient = "index")
convention_df = pd.DataFrame.from_dict(convention_tfidf, orient = "index")
after_convention_df = pd.DataFrame.from_dict(after_convention_tfidf, orient = "index")
#period_df = pd.DataFrame([before_convention, convention, after_convention])
#write_to_excel(period_df, 'periods.xlsx')
period_df = [before_convention_tfidf, convention_tfidf, after_convention_tfidf]
return period_df
# Creates two new columns in each dataframe - ngram Counter objects and tfidf dictionaries
# These columsn are used for aggregation and cosine similarity computation
def create_tfidf_vectors(dataframe):
speeches = dataframe['concat_speeches'].tolist()
ngrams = []
for unit in speeches:
ngrams.append(compute_ngrams(unit, 2))
ngrams_to_add = pd.Series(ngrams)
dataframe['ngrams'] = ngrams_to_add.values
tfidf = []
for element in ngrams:
tfidf.append(compute_tfidf(element, num_speeches, doc_freq))
tfidf_to_add = pd.Series(tfidf)
dataframe['tfidf'] = tfidf_to_add.values
return dataframe
# This function computes the cosine similarity and distances between the given dataframe and the three points of analysis
# It assumes that the dataframe contains a tfidf column
def compute_distances(dataframe, period, gir_dict, mont_dict, gir_mont_diff):
period_vector = []
if period == 'month':
period_vector = dataframe['Year-Month'].tolist()
period_vector = pd.Series(period_vector)
tfidf_scores = dataframe['tfidf'].tolist()
elif period == 'date':
period_vector = dataframe['Date'].tolist()
period_vector = pd.Series(period_vector)
tfidf_scores = dataframe['tfidf'].tolist()
elif period == 'speaker':
period_vector = dataframe['Speaker'].tolist()
period_vector = | pd.Series(period_vector) | pandas.Series |
# generate dataset with spice parameters and exported file of ads
import numpy as np
import pandas as pd
spice_data = './spice_data/spice_data.csv'
# ads_export = './spice_data/ads_export_mass_0-10.csv'
ads_export = './spice_data/ads_export_mass_0-10_f=2.3-2.6.csv'
spice_pd = pd.read_csv(spice_data)
ads_pd = | pd.read_csv(ads_export) | pandas.read_csv |
from re import X
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.sparse import dia
from sklearn.model_selection import train_test_split
from pandas.plotting import scatter_matrix
from scipy.stats import f_oneway
from statsmodels.stats.multicomp import pairwise_tukeyhsd
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from scipy.stats import iqr
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn import linear_model
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.ensemble import RandomForestRegressor
# read data, using read_csv function from pandas module
df = pd.read_csv("dataset/diamonds.csv")
# display the first 5 rows for the dataframe
print(df.head())
print(df.describe())
# return a description of data, the description containt an information about data:
# count, mean, std, min value, 25%,50%, 75%, max value
print(df.info())
# display data information
# # Replace x,y,z:
df1 = df
df["cubic_volume"] = df.x * df.y * df.z
# create a diamond size series, by Multiplying The three dimensions of a diamond
df.cubic_volume.replace(0, np.median(df.cubic_volume), inplace=True)
# replace zeros values from cubic_volume series with the median
df = df.drop("Unnamed: 0", axis=1)
# delete ID "unnamed" attribute, because we don't need this
df = df.drop(['x', 'y', 'z'], axis=1)
# delete x,y,z series
print(df.head())
# # Delete Outliers:
# outliers data records that differ dramatically from all others.
# directly effect the machine learning model
# we use IQR to detect outliers
df_without_outliers = df
for i in ['price', "carat", 'depth', 'table', 'cubic_volume']:
q75, q25 = np.percentile(df[i], [75, 25])
iqr = q75 - q25
high_outlier = q75 + 1.5 * iqr
low_outlier = q25 - 1.5 * iqr
df_without_outliers = df_without_outliers[
(df_without_outliers[i] < high_outlier) & (df_without_outliers[i] > low_outlier)]
final_df = df_without_outliers
print('outliers ratio:', (len(df) - len(final_df)) / len(df))
print("number of outliers", len(df) - len(final_df))
# # Data Visualization
#
# Data With Outliers:
# his diagram
df.hist(figsize=(20, 14), bins=50)
plt.show()
fig, ax = plt.subplots(1, 4, figsize=(20, 6))
# Create a figure and a subplots, with size of figure 20X6
fig.subplots_adjust(wspace=0.25)
j = 0
for i in ['carat', 'depth', 'table', 'cubic_volume']:
sns.regplot(data=df, x=i, y='price', color="#003f5c", line_kws={'color': '#ffa600'}, ax=ax[j])
# method to plot data and a linear regression model fit.
j = j + 1
fig.show()
Numerical_attr = ["carat", "depth", "color", "x", "y", "z"]
scatter_matrix(df1[Numerical_attr], figsize=(12, 12))
# draw a matrix of scatter plots, with figure size of 12X12
plt.show()
# Without the (x,y,z)
fig, ax = plt.subplots(1, 4, figsize=(20, 6))
# Create a figure and a subplots, with size of figure 20X6
fig.subplots_adjust(wspace=0.25)
j = 0
for i in ['carat', 'depth', 'table', 'cubic_volume']:
sns.regplot(data=df, x=i, y='price', color="#003f5c", line_kws={'color': '#ffa600'}, ax=ax[j])
# method to plot data and a linear regression model fit.
j = j + 1
fig.show()
fig, ax = plt.subplots(1, 4, figsize=(20, 6))
# Create a figure and a subplots, with size of figure 20X6, n row is 1 an n column is 4
# A kernel density estimate (KDE) plot is a method for visualizing the distribution of observations in a dataset,
sns.kdeplot(data=df, x="carat", color="#003f5c", fill=True, edgecolor='red', alpha=1, ax=ax[0])
sns.kdeplot(data=df, x="depth", color="#003f5c", fill=True, edgecolor='red', alpha=1, ax=ax[1])
sns.kdeplot(data=df, x="table", color="#003f5c", fill=True, edgecolor='red', alpha=1, ax=ax[2])
sns.kdeplot(data=df, x="cubic_volume", color="#003f5c", fill=True, edgecolor='red', alpha=1, ax=ax[3])
fig.show()
# diaplay the figures
fig = plt.figure(figsize=(12, 8))
# create a new figure, with size 12X8
sns.heatmap(df.corr(), linewidths=2, annot=True, cmap="Blues")
# heatmap is plot rectangular data as a color-encoded matrix.
# compute pairwise correlation of columns, excluding NA/null values.
# correlation is a term that is a measure of the strength of a linear relationship between two quantitative variables
# positive correlation, negative correlation,neutral correlation
plt.title("Correlation", size=15, weight='bold')
# set title for figure
fig, ax = plt.subplots(1, 2, figsize=(15, 4))
# Create a figure and a subplots, with size of figure 15X4
sns.boxplot(data=df, x="carat", color="#003f5c", ax=ax[0])
# A box plot shows the distribution of quantitative data in a way that facilitates comparisons between variables
# or across levels of a categorical variable
sns.boxplot(data=df, x="depth", color="#003f5c", ax=ax[1])
fig, ax = plt.subplots(1, 2, figsize=(15, 4))
# Create a figure and a subplots, with size of figure 15X4, n row is 1 an n column is 2
sns.boxplot(data=df, x="table", color="#003f5c", ax=ax[0])
sns.boxplot(data=df, x="cubic_volume", color="#003f5c", ax=ax[1])
fig, ax = plt.subplots(1, 2, figsize=(15, 4))
sns.boxplot(data=df, x="price", color="#003f5c", ax=ax[1])
fig.show()
# Data Without Outliers:
fig, ax = plt.subplots(1, 2, figsize=(15, 4))
sns.boxplot(data=df_without_outliers, x="carat", color="#003f5c", ax=ax[0])
sns.boxplot(data=df_without_outliers, x="depth", color="#003f5c", ax=ax[1])
fig, ax = plt.subplots(1, 2, figsize=(15, 4))
sns.boxplot(data=df_without_outliers, x="table", color="#003f5c", ax=ax[0])
sns.boxplot(data=df_without_outliers, x="cubic_volume", color="#003f5c", ax=ax[1])
fig, ax = plt.subplots(1, 2, figsize=(15, 4))
sns.boxplot(data=df_without_outliers, x="price", color="#003f5c", ax=ax[1])
fig.show()
fig, ax = plt.subplots(1, 4, figsize=(20, 6))
# Create a figure and a subplots, with size of figure 20X6, n row is 1 an n column is 4
# A kernel density estimate (KDE) plot is a method for visualizing the distribution of observations in a dataset,
sns.kdeplot(data=df_without_outliers, x="carat", color="#003f5c", fill=True, edgecolor='red', alpha=1, ax=ax[0])
sns.kdeplot(data=df_without_outliers, x="depth", color="#003f5c", fill=True, edgecolor='red', alpha=1, ax=ax[1])
sns.kdeplot(data=df_without_outliers, x="table", color="#003f5c", fill=True, edgecolor='red', alpha=1, ax=ax[2])
sns.kdeplot(data=df_without_outliers, x="cubic_volume", color="#003f5c", fill=True, edgecolor='red', alpha=1, ax=ax[3])
fig.show()
# diaplay the figures
# # Train and Test Sets:
train_set, test_set = train_test_split(final_df, test_size=0.2, random_state=1)
# split the data into random train and test subsets randomly
diamonds_labels = train_set["price"].copy()
# creating data label
test_diamonds_labels = test_set["price"].copy()
diamonds = train_set.drop("price", axis=1)
# delete the price data, because we need the model to predict the price, so we need to delete this
diamonds_testset = test_set.drop("price", axis=1)
diamonds_labels = train_set["price"].copy()
# creating data label
test_diamonds_labels = test_set["price"].copy()
diamonds = train_set.drop("price", axis=1)
# delete the price data, because we need the model to predict the price, so we need to delete this
diamonds_testset = test_set.drop("price", axis=1)
diamonds_num = diamonds.drop(["cut", "color", "clarity"], axis=1)
diamonds_num.head()
# display the first 5 rows for the object based on position.
# # Color ordinal encoding:
diamond_color_ascending = ['J', 'I', 'H', 'G', 'F', 'E', 'D']
# color is an cateigoral data, so we need to encode this as numarical data
ordinal_encoder = OrdinalEncoder(categories=[diamond_color_ascending])
# OrdinalEncoder: encode categorical features as an integer array.
diamond_color_encoded = ordinal_encoder.fit_transform(diamonds[['color']])
# fit_transform joins two steps, (fit)calculates the parameters, (transform)apply the transformation to any particular
diamond_color_encoded_df = | pd.DataFrame(diamond_color_encoded) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import Series
import pandas._testing as tm
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_rolling_apply_consistency_sum(
consistency_data, rolling_consistency_cases, center, f
):
x, is_constant, no_nans = consistency_data
window, min_periods = rolling_consistency_cases
if f is np.nansum and min_periods == 0:
pass
elif f is np.sum and not no_nans:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var(
consistency_data, rolling_consistency_cases, center, ddof
):
x, is_constant, no_nans = consistency_data
window, min_periods = rolling_consistency_cases
var_x = x.rolling(window=window, min_periods=min_periods, center=center).var(
ddof=ddof
)
assert not (var_x < 0).any().any()
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x = x.rolling(window=window, min_periods=min_periods, center=center).mean()
mean_x2 = (
(x * x)
.rolling(window=window, min_periods=min_periods, center=center)
.mean()
)
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(
consistency_data, rolling_consistency_cases, center, ddof
):
x, is_constant, no_nans = consistency_data
window, min_periods = rolling_consistency_cases
if is_constant:
count_x = x.rolling(
window=window, min_periods=min_periods, center=center
).count()
var_x = x.rolling(window=window, min_periods=min_periods, center=center).var(
ddof=ddof
)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if ddof == 1:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("ddof", [0, 1])
def test_rolling_consistency_var_std_cov(
consistency_data, rolling_consistency_cases, center, ddof
):
x, is_constant, no_nans = consistency_data
window, min_periods = rolling_consistency_cases
var_x = x.rolling(window=window, min_periods=min_periods, center=center).var(
ddof=ddof
)
assert not (var_x < 0).any().any()
std_x = x.rolling(window=window, min_periods=min_periods, center=center).std(
ddof=ddof
)
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
cov_x_x = x.rolling(window=window, min_periods=min_periods, center=center).cov(
x, ddof=ddof
)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("ddof", [0, 1])
def test_rolling_consistency_series_cov_corr(
consistency_data, rolling_consistency_cases, center, ddof
):
x, is_constant, no_nans = consistency_data
window, min_periods = rolling_consistency_cases
if isinstance(x, Series):
var_x_plus_y = (
(x + x)
.rolling(window=window, min_periods=min_periods, center=center)
.var(ddof=ddof)
)
var_x = x.rolling(window=window, min_periods=min_periods, center=center).var(
ddof=ddof
)
var_y = x.rolling(window=window, min_periods=min_periods, center=center).var(
ddof=ddof
)
cov_x_y = x.rolling(window=window, min_periods=min_periods, center=center).cov(
x, ddof=ddof
)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.rolling(
window=window, min_periods=min_periods, center=center
).corr(x)
std_x = x.rolling(window=window, min_periods=min_periods, center=center).std(
ddof=ddof
)
std_y = x.rolling(window=window, min_periods=min_periods, center=center).std(
ddof=ddof
)
| tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y)) | pandas._testing.assert_equal |
from collections import namedtuple
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
from dask.array.utils import assert_eq as assert_eq_ar
from dask.dataframe.utils import assert_eq as assert_eq_df
from dask_ml.datasets import make_classification
from dask_ml.utils import (
_num_samples,
assert_estimator_equal,
check_array,
check_chunks,
check_consistent_length,
check_matching_blocks,
check_random_state,
handle_zeros_in_scale,
slice_columns,
)
df = dd.from_pandas(pd.DataFrame(5 * [range(42)]).T, npartitions=5)
s = dd.from_pandas(pd.Series([0, 1, 2, 3, 0]), npartitions=5)
a = da.from_array(np.array([0, 1, 2, 3, 0]), chunks=3)
X, y = make_classification(chunks=(2, 20))
Foo = namedtuple("Foo", "a_ b_ c_ d_")
Bar = namedtuple("Bar", "a_ b_ d_ e_")
def test_slice_columns():
columns = [2, 3]
df2 = slice_columns(df, columns)
X2 = slice_columns(X, columns)
assert list(df2.columns) == columns
assert_eq_df(df[columns].compute(), df2.compute())
assert_eq_ar(X.compute(), X2.compute())
def test_handle_zeros_in_scale():
s2 = handle_zeros_in_scale(s)
a2 = handle_zeros_in_scale(a)
assert list(s2.compute()) == [1, 1, 2, 3, 1]
assert list(a2.compute()) == [1, 1, 2, 3, 1]
x = np.array([1, 2, 3, 0], dtype="f8")
expected = np.array([1, 2, 3, 1], dtype="f8")
result = handle_zeros_in_scale(x)
np.testing.assert_array_equal(result, expected)
x = pd.Series(x)
expected = pd.Series(expected)
result = handle_zeros_in_scale(x)
tm.assert_series_equal(result, expected)
x = da.from_array(x.values, chunks=2)
expected = expected.values
result = handle_zeros_in_scale(x)
assert_eq_ar(result, expected)
x = dd.from_dask_array(x)
expected = pd.Series(expected)
result = handle_zeros_in_scale(x)
assert_eq_df(result, expected)
def test_assert_estimator_passes():
l = Foo(1, 2, 3, 4)
r = Foo(1, 2, 3, 4)
assert_estimator_equal(l, r) # it works!
def test_assert_estimator_different_attributes():
l = Foo(1, 2, 3, 4)
r = Bar(1, 2, 3, 4)
with pytest.raises(AssertionError):
assert_estimator_equal(l, r)
def test_assert_estimator_different_scalers():
l = Foo(1, 2, 3, 4)
r = Foo(1, 2, 3, 3)
with pytest.raises(AssertionError):
assert_estimator_equal(l, r)
@pytest.mark.parametrize(
"a", [np.array([1, 2]), da.from_array(np.array([1, 2]), chunks=1)]
)
def test_assert_estimator_different_arrays(a):
l = Foo(1, 2, 3, a)
r = Foo(1, 2, 3, np.array([1, 0]))
with pytest.raises(AssertionError):
assert_estimator_equal(l, r)
@pytest.mark.parametrize(
"a",
[
pd.DataFrame({"A": [1, 2]}),
dd.from_pandas(pd.DataFrame({"A": [1, 2]}), npartitions=2),
],
)
def test_assert_estimator_different_dataframes(a):
l = Foo(1, 2, 3, a)
r = Foo(1, 2, 3, pd.DataFrame({"A": [0, 1]}))
with pytest.raises(AssertionError):
assert_estimator_equal(l, r)
def test_check_random_state():
for rs in [None, 0]:
result = check_random_state(rs)
assert isinstance(result, da.random.RandomState)
rs = da.random.RandomState(0)
result = check_random_state(rs)
assert result is rs
with pytest.raises(TypeError):
check_random_state(np.random.RandomState(0))
@pytest.mark.parametrize("chunks", [None, 4, (2000, 4), [2000, 4]])
def test_get_chunks(chunks):
from unittest import mock
with mock.patch("dask_ml.utils.cpu_count", return_value=4):
result = check_chunks(n_samples=8000, n_features=4, chunks=chunks)
expected = (2000, 4)
assert result == expected
@pytest.mark.parametrize("chunks", [None, 8])
def test_get_chunks_min(chunks):
result = check_chunks(n_samples=8, n_features=4, chunks=chunks)
expected = (100, 4)
assert result == expected
def test_get_chunks_raises():
with pytest.raises(AssertionError):
check_chunks(1, 1, chunks=(1, 2, 3))
with pytest.raises(AssertionError):
check_chunks(1, 1, chunks=[1, 2, 3])
with pytest.raises(ValueError):
check_chunks(1, 1, chunks=object())
def test_check_array_raises():
X = da.random.uniform(size=(10, 5), chunks=2)
with pytest.raises(TypeError) as m:
check_array(X)
assert m.match("Chunking is only allowed on the first axis.")
@pytest.mark.parametrize(
"data",
[
np.random.uniform(size=10),
da.random.uniform(size=10, chunks=5),
da.random.uniform(size=(10, 4), chunks=5),
dd.from_pandas(pd.DataFrame({"A": range(10)}), npartitions=2),
dd.from_pandas(pd.Series(range(10)), npartitions=2),
],
)
def test_num_samples(data):
assert _num_samples(data) == 10
def test_check_array_1d():
arr = da.random.uniform(size=(10,), chunks=5)
check_array(arr, ensure_2d=False)
@pytest.mark.parametrize(
"arrays",
[
[],
[da.random.uniform(size=10, chunks=5)],
[da.random.uniform(size=10, chunks=5), da.random.uniform(size=10, chunks=5)],
[
dd.from_pandas(pd.Series([1, 2, 3]), 2),
dd.from_pandas(pd.Series([1, 2, 3]), 2),
],
[
dd.from_pandas(pd.Series([1, 2, 3]), 2),
dd.from_pandas( | pd.DataFrame({"A": [1, 2, 3]}) | pandas.DataFrame |
import pandas as pd
from io import StringIO
def build_report(data, asc=None):
"""Return list of drivers results:
[
{'driver': '<NAME>',
'car': 'FERRARI',
'start': Timestamp('2018-05-24 12:02:58.917000'),
'end': Timestamp('2018-05-24 12:04:03.332000'),
'time': Timedelta('0 days 00:01:04.415000'),
'result': '1:04.415'
'position': 1},
{'driver': '<NAME>',
'car': 'MERCEDES',
'start': Timestamp('2018-05-24 12:00:00'),
'end': Timestamp('2018-05-24 12:01:12.434000'),
'time': Timedelta('0 days 00:01:12.434000'),
'result': '1:12.434'
'position': 2},]"""
# Reading data
abb_df = pd.read_csv(
StringIO(data['abb']), sep='_', header=None,
names=['abbreviation', 'driver', 'car'])
abb_df.set_index('abbreviation', inplace=True)
start_df = pd.read_csv(
StringIO(data['start']), sep='_', header=None,
names=['abbreviation', 'data_start', 'time_start'])
start_df.set_index('abbreviation', inplace=True)
end_df = pd.read_csv(
StringIO(data['end']), sep='_', header=None,
names=['abbreviation', 'data_end', 'time_end'])
end_df.set_index('abbreviation', inplace=True)
# Columns filling
main_df = abb_df.join(start_df).join(end_df)
main_df['start'] = main_df["data_start"] + ' ' + main_df["time_start"]
main_df['start'] = pd.to_datetime(main_df['start'], errors='coerce', format='')
main_df['end'] = main_df["data_end"] + ' ' + main_df["time_end"]
main_df['end'] = pd.to_datetime(main_df['end'], errors='coerce', format='')
main_df['time'] = main_df['end'] - main_df['start']
main_df.loc[main_df['start'] >= main_df['end'], 'time'] = float('nan')
main_df['result'] = main_df['time'].astype(str).str.replace('0 days 00:0', '').str.slice(stop=-3)
main_df.loc[ | pd.isna(main_df['time']) | pandas.isna |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 16:00:06 2018
@author: nmei
"""
if __name__ == "__main__":
import os
import pandas as pd
import numpy as np
import utils
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
from matplotlib import pyplot as plt
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from itertools import combinations
import pymc3 as pm
import theano.tensor as tt
# define result saving directory
dir_saving = 'consective_transitions'
if not os.path.exists(dir_saving):
os.makedirs(dir_saving)
############################################################################################
df1 = pd.read_csv('e1.csv').iloc[:,1:]
df = df1.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'awareness'
experiment = 'e1'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
# ['ah', 'av', 'bj', 'cm', 'db', 'ddb', 'fcm', 'kf', 'kk', 'ml', 'qa','sk', 'yv']
# get one of the participants' data
transition_matrix,sub = [], []
transition_count = []
for participant,df_sub in df.groupby('participant'):
awareness = df_sub['awareness'].values - 1
with pm.Model() as model:
a = pm.Beta('a', 0.5, 0.5)
yl = pm.Bernoulli('yl', a, observed=awareness)
trace = pm.sample(1000,
step=pm.SMC(),
random_seed=42)
# for 1-back
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),
normalize=1)
transition_matrix.append(temp.get_values().flatten())
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),)
transition_count.append(temp.get_values().flatten())
sub.append(participant)
df1_transition = pd.DataFrame(transition_matrix,columns=['unaware-unaware',
'aware-unaware',
'unaware-aware',
'aware-aware'])
df1_count = pd.DataFrame(transition_count,columns=['unaware-unaware',
'aware-unaware',
'unaware-aware',
'aware-aware'])
df1_transition['experiment'] = 1
df1_transition['sub'] = sub
df1_count['experiment'] = 1
df1_count['sub'] = sub
##############################################################################
df2 = pd.read_csv('e2.csv').iloc[:,1:]
df = df2.copy()
# select the columns that I need
df = df[['blocks.thisN',
'trials.thisN',
'key_resp_2.keys',
'resp.corr',
'resp_mrating.keys',
'participant',]]
# rename the columns
df.columns = ['blocks',
'trials',
'awareness',
'correctness',
'confidence',
'participant',]
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correctness',
'awareness',
'confidence',
]
target_name = 'awareness'
experiment = 'e2'
# for some of the variables, we need to rescale them to a more preferable range like 0-1
name_for_scale = ['awareness']
transition_matrix,sub = [],[]
transition_count = []
for participant,df_sub in df.groupby('participant'):
df_sub = df_sub[df_sub['awareness'] != 3]
# for 1-back
temp = pd.crosstab(pd.Series(df_sub['awareness'].values[1:],name='N'),
pd.Series(df_sub['awareness'].values[:-1],name='N-1'),
normalize=1)
transition_matrix.append(temp.get_values().flatten())
temp = pd.crosstab( | pd.Series(df_sub['awareness'].values[1:],name='N') | pandas.Series |
import os
import tempfile
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import simulation as sim
from ...utils.testing import assert_frames_equal
def setup_function(func):
sim.clear_sim()
sim.enable_cache()
def teardown_function(func):
sim.clear_sim()
sim.enable_cache()
@pytest.fixture
def df():
return pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6]},
index=['x', 'y', 'z'])
def test_tables(df):
wrapped_df = sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
assert set(sim.list_tables()) == {'test_frame', 'test_func'}
table = sim.get_table('test_frame')
assert table is wrapped_df
assert table.columns == ['a', 'b']
assert table.local_columns == ['a', 'b']
assert len(table) == 3
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a)
pdt.assert_series_equal(table.a, df.a)
pdt.assert_series_equal(table['b'], df['b'])
table = sim._TABLES['test_func']
assert table.index is None
assert table.columns == []
assert len(table) is 0
pdt.assert_frame_equal(table.to_frame(), df / 2)
pdt.assert_frame_equal(table.to_frame(columns=['a']), df[['a']] / 2)
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a / 2)
pdt.assert_series_equal(table.a, df.a / 2)
pdt.assert_series_equal(table['b'], df['b'] / 2)
assert len(table) == 3
assert table.columns == ['a', 'b']
def test_table_func_cache(df):
sim.add_injectable('x', 2)
@sim.table(cache=True)
def table(variable='x'):
return df * variable
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.add_injectable('x', 3)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.get_table('table').clear_cached()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.add_injectable('x', 4)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.clear_cache()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 4)
sim.add_injectable('x', 5)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 4)
sim.add_table('table', table)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 5)
def test_table_func_cache_disabled(df):
sim.add_injectable('x', 2)
@sim.table('table', cache=True)
def asdf(x):
return df * x
sim.disable_cache()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.add_injectable('x', 3)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.enable_cache()
sim.add_injectable('x', 4)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
def test_table_copy(df):
sim.add_table('test_frame_copied', df, copy_col=True)
sim.add_table('test_frame_uncopied', df, copy_col=False)
sim.add_table('test_func_copied', lambda: df, copy_col=True)
sim.add_table('test_func_uncopied', lambda: df, copy_col=False)
@sim.table(copy_col=True)
def test_funcd_copied():
return df
@sim.table(copy_col=False)
def test_funcd_uncopied():
return df
@sim.table(copy_col=True)
def test_funcd_copied2(test_frame_copied):
# local returns original, but it is copied by copy_col.
return test_frame_copied.local
@sim.table(copy_col=True)
def test_funcd_copied3(test_frame_uncopied):
# local returns original, but it is copied by copy_col.
return test_frame_uncopied.local
@sim.table(copy_col=False)
def test_funcd_uncopied2(test_frame_copied):
# local returns original.
return test_frame_copied.local
@sim.table(copy_col=False)
def test_funcd_uncopied3(test_frame_uncopied):
# local returns original.
return test_frame_uncopied.local
sim.add_table('test_cache_copied', lambda: df, cache=True, copy_col=True)
sim.add_table(
'test_cache_uncopied', lambda: df, cache=True, copy_col=False)
@sim.table(cache=True, copy_col=True)
def test_cached_copied():
return df
@sim.table(cache=True, copy_col=False)
def test_cached_uncopied():
return df
# Create tables with computed columns.
sim.add_table('test_copied_columns', pd.DataFrame(index=df.index),
copy_col=True)
sim.add_table('test_uncopied_columns', pd.DataFrame(index=df.index),
copy_col=False)
for column_name in ['a', 'b']:
label = "test_frame_uncopied.{}".format(column_name)
func = lambda col=label: col
for table_name in ['test_copied_columns', 'test_uncopied_columns']:
sim.add_column(table_name, column_name, func)
for name in ['test_frame_uncopied', 'test_func_uncopied',
'test_funcd_uncopied', 'test_funcd_uncopied2',
'test_funcd_uncopied3', 'test_cache_uncopied',
'test_cached_uncopied', 'test_uncopied_columns',
'test_frame_copied', 'test_func_copied',
'test_funcd_copied', 'test_funcd_copied2',
'test_funcd_copied3', 'test_cache_copied',
'test_cached_copied', 'test_copied_columns']:
table = sim.get_table(name)
table2 = sim.get_table(name)
# to_frame will always return a copy.
pdt.assert_frame_equal(table.to_frame(), df)
assert table.to_frame() is not df
pdt.assert_frame_equal(table.to_frame(), table.to_frame())
assert table.to_frame() is not table.to_frame()
pdt.assert_series_equal(table.to_frame()['a'], df['a'])
assert table.to_frame()['a'] is not df['a']
pdt.assert_series_equal(table.to_frame()['a'],
table.to_frame()['a'])
assert table.to_frame()['a'] is not table.to_frame()['a']
if 'uncopied' in name:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is table2['a']
else:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is not df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is not table2['a']
def test_columns_for_table():
sim.add_column(
'table1', 'col10', pd.Series([1, 2, 3], index=['a', 'b', 'c']))
sim.add_column(
'table2', 'col20', pd.Series([10, 11, 12], index=['x', 'y', 'z']))
@sim.column('table1')
def col11():
return pd.Series([4, 5, 6], index=['a', 'b', 'c'])
@sim.column('table2', 'col21')
def asdf():
return pd.Series([13, 14, 15], index=['x', 'y', 'z'])
t1_col_names = sim._list_columns_for_table('table1')
assert set(t1_col_names) == {'col10', 'col11'}
t2_col_names = sim._list_columns_for_table('table2')
assert set(t2_col_names) == {'col20', 'col21'}
t1_cols = sim._columns_for_table('table1')
assert 'col10' in t1_cols and 'col11' in t1_cols
t2_cols = sim._columns_for_table('table2')
assert 'col20' in t2_cols and 'col21' in t2_cols
def test_columns_and_tables(df):
sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
sim.add_column('test_frame', 'c', pd.Series([7, 8, 9], index=df.index))
@sim.column('test_func', 'd')
def asdf(test_func):
return test_func.to_frame(columns=['b'])['b'] * 2
@sim.column('test_func')
def e(column='test_func.d'):
return column + 1
test_frame = sim.get_table('test_frame')
assert set(test_frame.columns) == set(['a', 'b', 'c'])
assert_frames_equal(
test_frame.to_frame(),
pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_frame.to_frame(columns=['a', 'c']),
pd.DataFrame(
{'a': [1, 2, 3],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
test_func_df = sim._TABLES['test_func']
assert set(test_func_df.columns) == set(['d', 'e'])
assert_frames_equal(
test_func_df.to_frame(),
pd.DataFrame(
{'a': [0.5, 1, 1.5],
'b': [2, 2.5, 3],
'c': [3.5, 4, 4.5],
'd': [4., 5., 6.],
'e': [5., 6., 7.]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_func_df.to_frame(columns=['b', 'd']),
pd.DataFrame(
{'b': [2, 2.5, 3],
'd': [4., 5., 6.]},
index=['x', 'y', 'z']))
assert set(test_func_df.columns) == set(['a', 'b', 'c', 'd', 'e'])
assert set(sim.list_columns()) == {('test_frame', 'c'), ('test_func', 'd'),
('test_func', 'e')}
def test_column_cache(df):
sim.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@sim.table()
def table():
return df
@sim.column(*key, cache=True)
def column(variable='x'):
return series * variable
c = lambda: sim._COLUMNS[key]
pdt.assert_series_equal(c()(), series * 2)
sim.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 2)
c().clear_cached()
pdt.assert_series_equal(c()(), series * 3)
sim.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
sim.clear_cache()
pdt.assert_series_equal(c()(), series * 4)
sim.add_injectable('x', 5)
pdt.assert_series_equal(c()(), series * 4)
sim.get_table('table').clear_cached()
pdt.assert_series_equal(c()(), series * 5)
sim.add_injectable('x', 6)
pdt.assert_series_equal(c()(), series * 5)
sim.add_column(*key, column=column, cache=True)
pdt.assert_series_equal(c()(), series * 6)
def test_column_cache_disabled(df):
sim.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@sim.table()
def table():
return df
@sim.column(*key, cache=True)
def column(x):
return series * x
c = lambda: sim._COLUMNS[key]
sim.disable_cache()
pdt.assert_series_equal(c()(), series * 2)
sim.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 3)
sim.enable_cache()
sim.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
def test_update_col(df):
wrapped = sim.add_table('table', df)
wrapped.update_col('b', pd.Series([7, 8, 9], index=df.index))
pdt.assert_series_equal(wrapped['b'], pd.Series([7, 8, 9], index=df.index))
wrapped.update_col_from_series('a', pd.Series([]))
pdt.assert_series_equal(wrapped['a'], df['a'])
wrapped.update_col_from_series('a', pd.Series([99], index=['y']))
pdt.assert_series_equal(
wrapped['a'], pd.Series([1, 99, 3], index=df.index))
def test_models(df):
sim.add_table('test_table', df)
df2 = df / 2
sim.add_table('test_table2', df2)
@sim.model()
def test_model(test_table, test_column='test_table2.b'):
tt = test_table.to_frame()
test_table['a'] = tt['a'] + tt['b']
pdt.assert_series_equal(test_column, df2['b'])
with pytest.raises(KeyError):
sim.get_model('asdf')
model = sim.get_model('test_model')
assert model._tables_used() == set(['test_table', 'test_table2'])
model()
table = sim.get_table('test_table')
pdt.assert_frame_equal(
table.to_frame(),
pd.DataFrame(
{'a': [5, 7, 9],
'b': [4, 5, 6]},
index=['x', 'y', 'z']))
assert sim.list_models() == ['test_model']
def test_model_run(df):
sim.add_table('test_table', df)
@sim.table()
def table_func(test_table):
tt = test_table.to_frame()
tt['c'] = [7, 8, 9]
return tt
@sim.column('table_func')
def new_col(test_table, table_func):
tt = test_table.to_frame()
tf = table_func.to_frame(columns=['c'])
return tt['a'] + tt['b'] + tf['c']
@sim.model()
def test_model1(year, test_table, table_func):
tf = table_func.to_frame(columns=['new_col'])
test_table[year] = tf['new_col'] + year
@sim.model('test_model2')
def asdf(table='test_table'):
tt = table.to_frame()
table['a'] = tt['a'] ** 2
sim.run(models=['test_model1', 'test_model2'], years=[2000, 3000])
test_table = sim.get_table('test_table')
assert_frames_equal(
test_table.to_frame(),
pd.DataFrame(
{'a': [1, 16, 81],
'b': [4, 5, 6],
2000: [2012, 2015, 2018],
3000: [3012, 3017, 3024]},
index=['x', 'y', 'z']))
m = sim.get_model('test_model1')
assert set(m._tables_used()) == {'test_table', 'table_func'}
def test_get_broadcasts():
sim.broadcast('a', 'b')
sim.broadcast('b', 'c')
sim.broadcast('z', 'b')
sim.broadcast('f', 'g')
with pytest.raises(ValueError):
sim._get_broadcasts(['a', 'b', 'g'])
assert set(sim._get_broadcasts(['a', 'b', 'c', 'z']).keys()) == \
{('a', 'b'), ('b', 'c'), ('z', 'b')}
assert set(sim._get_broadcasts(['a', 'b', 'z']).keys()) == \
{('a', 'b'), ('z', 'b')}
assert set(sim._get_broadcasts(['a', 'b', 'c']).keys()) == \
{('a', 'b'), ('b', 'c')}
assert set(sim.list_broadcasts()) == \
{('a', 'b'), ('b', 'c'), ('z', 'b'), ('f', 'g')}
def test_collect_variables(df):
sim.add_table('df', df)
@sim.table()
def df_func():
return df
@sim.column('df')
def zzz():
return df['a'] / 2
sim.add_injectable('answer', 42)
@sim.injectable()
def injected():
return 'injected'
@sim.table('source table', cache=True)
def source():
return df
with pytest.raises(KeyError):
sim._collect_variables(['asdf'])
with pytest.raises(KeyError):
sim._collect_variables(names=['df'], expressions=['asdf'])
names = ['df', 'df_func', 'answer', 'injected', 'source_label', 'df_a']
expressions = ['source table', 'df.a']
things = sim._collect_variables(names, expressions)
assert set(things.keys()) == set(names)
assert isinstance(things['source_label'], sim.DataFrameWrapper)
pdt.assert_frame_equal(things['source_label'].to_frame(), df)
assert isinstance(things['df_a'], pd.Series)
pdt.assert_series_equal(things['df_a'], df['a'])
def test_collect_variables_expression_only(df):
@sim.table()
def table():
return df
vars = sim._collect_variables(['a'], ['table.a'])
pdt.assert_series_equal(vars['a'], df.a)
def test_injectables():
sim.add_injectable('answer', 42)
@sim.injectable()
def func1(answer):
return answer * 2
@sim.injectable('func2', autocall=False)
def asdf(variable='x'):
return variable / 2
@sim.injectable()
def func3(func2):
return func2(4)
@sim.injectable()
def func4(func='func1'):
return func / 2
assert sim._INJECTABLES['answer'] == 42
assert sim._INJECTABLES['func1']() == 42 * 2
assert sim._INJECTABLES['func2'](4) == 2
assert sim._INJECTABLES['func3']() == 2
assert sim._INJECTABLES['func4']() == 42
assert sim.get_injectable('answer') == 42
assert sim.get_injectable('func1') == 42 * 2
assert sim.get_injectable('func2')(4) == 2
assert sim.get_injectable('func3') == 2
assert sim.get_injectable('func4') == 42
with pytest.raises(KeyError):
sim.get_injectable('asdf')
assert set(sim.list_injectables()) == \
{'answer', 'func1', 'func2', 'func3', 'func4'}
def test_injectables_combined(df):
@sim.injectable()
def column():
return pd.Series(['a', 'b', 'c'], index=df.index)
@sim.table()
def table():
return df
@sim.model()
def model(table, column):
df = table.to_frame()
df['new'] = column
sim.add_table('table', df)
sim.run(models=['model'])
table_wr = sim.get_table('table').to_frame()
pdt.assert_frame_equal(table_wr[['a', 'b']], df)
pdt.assert_series_equal(table_wr['new'], column())
def test_injectables_cache():
x = 2
@sim.injectable(autocall=True, cache=True)
def inj():
return x * x
i = lambda: sim._INJECTABLES['inj']
assert i()() == 4
x = 3
assert i()() == 4
i().clear_cached()
assert i()() == 9
x = 4
assert i()() == 9
sim.clear_cache()
assert i()() == 16
x = 5
assert i()() == 16
sim.add_injectable('inj', inj, autocall=True, cache=True)
assert i()() == 25
def test_injectables_cache_disabled():
x = 2
@sim.injectable(autocall=True, cache=True)
def inj():
return x * x
i = lambda: sim._INJECTABLES['inj']
sim.disable_cache()
assert i()() == 4
x = 3
assert i()() == 9
sim.enable_cache()
assert i()() == 9
x = 4
assert i()() == 9
sim.disable_cache()
assert i()() == 16
def test_clear_cache_all(df):
@sim.table(cache=True)
def table():
return df
@sim.column('table', cache=True)
def z(table):
return df.a
@sim.injectable(cache=True)
def x():
return 'x'
sim.eval_variable('table.z')
sim.eval_variable('x')
assert sim._TABLE_CACHE.keys() == ['table']
assert sim._COLUMN_CACHE.keys() == [('table', 'z')]
assert sim._INJECTABLE_CACHE.keys() == ['x']
sim.clear_cache()
assert sim._TABLE_CACHE == {}
assert sim._COLUMN_CACHE == {}
assert sim._INJECTABLE_CACHE == {}
def test_clear_cache_scopes(df):
@sim.table(cache=True, cache_scope='forever')
def table():
return df
@sim.column('table', cache=True, cache_scope='iteration')
def z(table):
return df.a
@sim.injectable(cache=True, cache_scope='step')
def x():
return 'x'
sim.eval_variable('table.z')
sim.eval_variable('x')
assert sim._TABLE_CACHE.keys() == ['table']
assert sim._COLUMN_CACHE.keys() == [('table', 'z')]
assert sim._INJECTABLE_CACHE.keys() == ['x']
sim.clear_cache(scope='step')
assert sim._TABLE_CACHE.keys() == ['table']
assert sim._COLUMN_CACHE.keys() == [('table', 'z')]
assert sim._INJECTABLE_CACHE == {}
sim.clear_cache(scope='iteration')
assert sim._TABLE_CACHE.keys() == ['table']
assert sim._COLUMN_CACHE == {}
assert sim._INJECTABLE_CACHE == {}
sim.clear_cache(scope='forever')
assert sim._TABLE_CACHE == {}
assert sim._COLUMN_CACHE == {}
assert sim._INJECTABLE_CACHE == {}
def test_cache_scope(df):
sim.add_injectable('x', 11)
sim.add_injectable('y', 22)
sim.add_injectable('z', 33)
sim.add_injectable('iterations', 1)
@sim.injectable(cache=True, cache_scope='forever')
def a(x):
return x
@sim.injectable(cache=True, cache_scope='iteration')
def b(y):
return y
@sim.injectable(cache=True, cache_scope='step')
def c(z):
return z
@sim.model()
def m1(year, a, b, c):
sim.add_injectable('x', year + a)
sim.add_injectable('y', year + b)
sim.add_injectable('z', year + c)
assert a == 11
@sim.model()
def m2(year, a, b, c, iterations):
assert a == 11
if year == 1000:
assert b == 22
assert c == 1033
elif year == 2000:
assert b == 1022
assert c == 3033
sim.add_injectable('iterations', iterations + 1)
sim.run(['m1', 'm2'], years=[1000, 2000])
def test_table_func_local_cols(df):
@sim.table()
def table():
return df
sim.add_column('table', 'new', pd.Series(['a', 'b', 'c'], index=df.index))
assert sim.get_table('table').local_columns == ['a', 'b']
def test_is_table(df):
sim.add_table('table', df)
assert sim._is_table('table') is True
assert sim._is_table('asdf') is False
@pytest.fixture
def store_name(request):
fname = tempfile.NamedTemporaryFile(suffix='.h5').name
def fin():
if os.path.isfile(fname):
os.remove(fname)
request.addfinalizer(fin)
return fname
def test_write_tables(df, store_name):
sim.add_table('table', df)
@sim.model()
def model(table):
pass
sim.write_tables(store_name, ['model'], None)
with pd.get_store(store_name, mode='r') as store:
assert 'table' in store
pdt.assert_frame_equal(store['table'], df)
sim.write_tables(store_name, ['model'], 1969)
with pd.get_store(store_name, mode='r') as store:
assert '1969/table' in store
pdt.assert_frame_equal(store['1969/table'], df)
def test_run_and_write_tables(df, store_name):
sim.add_table('table', df)
year_key = lambda y: '{}'.format(y)
series_year = lambda y: pd.Series([y] * 3, index=df.index)
@sim.model()
def model(year, table):
table[year_key(year)] = series_year(year)
sim.run(['model'], years=range(11), data_out=store_name, out_interval=3)
with | pd.get_store(store_name, mode='r') | pandas.get_store |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
| `@purpose`: Download raw data from apache, Box, or REDCap servers.
| `@date`: Created on Sat May 1 15:12:38 2019
| `@author`: <NAME>
| `@email`: <EMAIL>
| `@url`: https://semeon.io/d/imhr
"""
# available functions
__all__ = ['Download']
# required =
__required__ = ['openpyxl','openpyxl']
# core
from pdb import set_trace as breakpoint
# local libraries
from .. import settings
# check if psychopy is available
try:
# core
import os
from datetime import datetime
from pathlib import Path
except ImportError as e:
pkg = e.name
x = {'psychopy':'psychopy','pandas':'pandas'}
pkg = x[pkg] if pkg in x else pkg
raise Exception("No module named '%s'. Please install from PyPI before continuing."%(pkg),'red')
class Download():
"""Download raw data from apache, Box, or REDCap servers."""
@classmethod
def __init__(self, isLibrary=False):
"""Download raw data from apache, Box, or REDCap servers.
Parameters
----------
isLibrary : :obj:`bool`
Check if required libraries are available.
"""
#check libraries
if isLibrary:
settings.library(__required__)
@classmethod
def REDCap(cls, path, token, url, content, payload=None, **kwargs):
"""Download data from an Research Electronic Data Capture (REDCap) server.
Parameters
----------
path : :obj:`str`
Path to save data.
For example::
>>> path = '/Users/mdl-admin/Desktop/r33/redcap'
token : :obj:`str`
The API token specific to your REDCap project and username. This is usually found on the Applications > API page.
For example::
>>> token = '<PASSWORD>823032SFDMR24395298'
url : :obj:`str`
The URL of the REDCap project.
For example::
>>> url = 'https://redcap.prc.utexas.edu/redcap/api/'.
content : :obj:`str` {report, file, raw, arm, instrument, returnFormat, metadata, project, surveyLink, user, participantList}
Type of export. Examples include exporting a report (`report`), file (`file`), or project info (`project`).
payload_ : :obj:`dict` or :obj:`None`, optional
Manually submit parameters for exporting or importing data. Can be entered within the function for convenience.
**kwargs : :obj:`str` or :obj:`None`, optional
Additional properties, relevent for specific content types. Here's a list of available properties:
.. list-table::
:class: kwargs
:widths: 25 50
:header-rows: 1
* - Property
- Description
* - **report_id** : :obj:`str`
- (report, record) The report ID number provided next to the report name on the report list page.
* - **cformat** : :obj:`str` {csv, json, xml, odm}
- Format to return data, either csv, json, xml, or odm. Default is json.
* - **ctype** : :obj:`str`
- Shape of data. Default is flat.
* - **rawOrLabel**: :obj:`str` {raw, label}
- (report, record) TExport the raw coded values or labels for the options of multiple choice fields.
* - **rawOrLabelHeaders**: :obj:`str`
- (report, record) TExport the variable/field names (raw) or the field labels (label).
* - **exportCheckboxLabel**: :obj:`str`
- Specifies the format of checkbox field values specifically when exporting the data as labels (i.e., when rawOrLabel=label).
* - **returnFormat** : :obj:`str`
- Format to return errors. Default is `json`.
Returns
-------
log : :obj:`pandas.DataFrame` or :obj:`None`
Pandas dataframe of each download request.
content : :obj:`pandas.DataFrame` or :obj:`None`
Pandas dataframe of all data downloaded.
start, end : :obj:`str`
Timestamp (ISO format) and name of most recent (`end`) and first (`start`) file created in folder.
now : :obj:`str`
Current timestamp in ISO format.
"""
import openpyxl, requests
import pandas as pd
#----constants, lists to prepare
ldate = [] #list of dates
file_num = 0 #file counter
# bool
log = None #log of events
start = None #most recent file
end = None #most recent file
#----path
# log
name = Path(path).name
log_path = os.path.abspath(os.path.dirname(path + "/../"))
# destination
path = Path(path)
#----kwargs
# general
cformat = kwargs['cformat'] if "cformat" in kwargs else 'json'
ctype = kwargs['ctype'] if "ctype" in kwargs else 'flat'
rawOrLabel = kwargs['rawOrLabel'] if "rawOrLabel" in kwargs else 'raw'
rawOrLabelHeaders = kwargs['rawOrLabelHeaders'] if "rawOrLabelHeaders" in kwargs else 'raw'
exportCheckboxLabel = kwargs['exportCheckboxLabel'] if "exportCheckboxLabel" in kwargs else 'false'
returnFormat = kwargs['returnFormat'] if "returnFormat" in kwargs else 'json'
# report
report_id = kwargs['report_id'] if "report_id" in kwargs else 'None'
# participantList
instrument = kwargs['instrument'] if "instrument" in kwargs else 'cssrs'
event = kwargs['event'] if "event" in kwargs else 'online_eligibility_arm_1'
#----start
settings.console('connecting to REDCap', 'blue')
#----make sure local path exists
settings.console('local folder: %s'%(Path(path)), 'blue')
if not os.path.exists(Path(path)):
settings.console('creating local folder: %s'%(Path(path)), 'blue')
os.makedirs(Path(path))
#----start download
# prepare
if payload == None:
# report
if content == 'report':
payload = {
'token': token,
'content': content,
'format': cformat,
'type': ctype,
'returnFormat': returnFormat,
'report_id': report_id,
'rawOrLabel': rawOrLabel,
'rawOrLabelHeaders': rawOrLabelHeaders,
'exportCheckboxLabel': exportCheckboxLabel
}
elif content == 'participantList':
payload = {
'token': token,
'content': content,
'format': cformat,
'returnFormat': returnFormat,
'instrument': instrument,
'event': event,
}
elif content == 'metadata':
payload = {
'token': token,
'content': content,
'format': cformat,
'returnFormat': returnFormat,
}
elif content == 'project':
payload = {
'token': token,
'content': content,
'format': cformat,
'returnFormat': returnFormat,
}
else:
raise Exception('Not finished. Content `%s` unavailable.'%(content))
# get data
response = requests.post(url, data=payload)
json = response.json()
# if error
if 'error' in json:
raise Exception('Export failed. Reason: %s.'%(json['error']))
# convert to dataframe
df = pd.DataFrame(json)
# save data
path_= os.path.dirname(path) + "/REDCap/%s.xlsx"%(content)
df.to_excel(path_, index=False)
# if list isnt empty get start and end dates
if ldate:
## start
start = min(map(lambda x: [x[0],x[1]], ldate))
settings.console('oldest file: %s'%(start[0]), 'blue')
## end
end = max(map(lambda x: [x[0],x[1]], ldate))
settings.console('newest file: %s'%(end[0]), 'blue')
#----log
# if new files
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#!!! add values to go into dataframe----
row = [df.shape[0],token,content,now]
# log file path
fpath = "%s/%s.xlsx"%(log_path, name)
# if log exists, update
if os.path.exists(fpath):
settings.console("Log updated @: %s"%(fpath), 'blue')
#load file
wb = openpyxl.load_workbook(fpath)
# Select First Worksheet
ws = wb.worksheets[0]
# add data
ws.append(row)
# update
wb.save(fpath)
# import log
log = | pd.ExcelFile(fpath) | pandas.ExcelFile |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
| Period(freq="WK", year=2007, month=1, day=1) | pandas.Period |
__author__ = 'jonathan'
import os
import numpy as np
import xgboost as xgb
import pandas as pd
os.system("ls ../input")
train = pd.read_csv('../input/train.csv')
train = train.drop('id', axis=1)
y = train['target']
y = y.map(lambda s: s[6:])
y = y.map(lambda s: int(s)-1)
train = train.drop('target', axis=1)
x = train
dtrain = xgb.DMatrix(x.as_matrix(), label=y.tolist())
test = pd.read_csv('../input/test.csv')
test = test.drop('id', axis=1)
dtest = xgb.DMatrix(test.as_matrix())
# print(pd.unique(y.values.ravel()))
params = {'max_depth': 6,
'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'num_class': 9,
'nthread': 8}
# cv = xgb.cv(params, dtrain, 50, 3)
# watchlist = [(dtest, 'eval'), (dtrain, 'train')]
bst = xgb.train(params, dtrain, 50)
# bst.dump_model('dump.raw.txt', 'featmap.txt')
pred = bst.predict(dtest)
# np.savetxt('result.csv', pred, delimiter=',')
df = | pd.DataFrame(pred) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def read_fixed_width(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pd.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def read_stata_dict(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float,
double=float, numeric=float)
var_info = []
with open(dct_file, **options) as f:
for line in f:
match = re.search( r'_column\(([^)]*)\)', line)
if not match:
continue
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pd.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def read_stata(dct_name, dat_name, **options):
"""Reads Stata files from the given directory.
dirname: string
returns: DataFrame
"""
dct = read_stata_dict(dct_name)
df = dct.read_fixed_width(dat_name, **options)
return df
def sample_rows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def resample_rows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return sample_rows(df, len(df), replace=True)
def resample_rows_weighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column].copy()
weights /= sum(weights)
indices = np.random.choice(df.index, len(df), replace=True, p=weights)
sample = df.loc[indices]
return sample
def resample_by_year(df, column='wtssall'):
"""Resample rows within each year.
df: DataFrame
column: string name of weight variable
returns DataFrame
"""
grouped = df.groupby('year')
samples = [resample_rows_weighted(group, column)
for _, group in grouped]
sample = | pd.concat(samples, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements trend based forecasters."""
__author__ = ["<NAME>", "mloning", "aiwalter"]
__all__ = ["TrendForecaster", "PolynomialTrendForecaster", "STLForecaster"]
import numpy as np
import pandas as pd
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from statsmodels.tsa.seasonal import STL as _STL
from sktime.forecasting.base import BaseForecaster
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.naive import NaiveForecaster
from sktime.utils.datetime import _get_duration
class TrendForecaster(BaseForecaster):
"""Trend based forecasts of time series data.
Default settings train a linear regression model.
Parameters
----------
regressor : estimator object, default = None
Define the regression model type. If not set, will default to
sklearn.linear_model.LinearRegression
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import TrendForecaster
>>> y = load_airline()
>>> forecaster = TrendForecaster()
>>> forecaster.fit(y)
TrendForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
"""
_tags = {
"ignores-exogeneous-X": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, regressor=None):
# for default regressor, set fit_intercept=True
self.regressor = regressor
super(TrendForecaster, self).__init__()
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series with which to fit the forecaster.
X : pd.DataFrame, default=None
Exogenous variables are ignored
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
Returns
-------
self : returns an instance of self.
"""
self.regressor_ = self.regressor or LinearRegression(fit_intercept=True)
# create a clone of self.regressor
self.regressor_ = clone(self.regressor_)
# transform data
X = y.index.astype("int").to_numpy().reshape(-1, 1)
# fit regressor
self.regressor_.fit(X, y)
return self
def _predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Make forecasts for the given forecast horizon.
Parameters
----------
fh : int, list or np.array
The forecast horizon with the steps ahead to predict
X : pd.DataFrame, default=None
Exogenous variables (ignored)
return_pred_int : bool, default=False
Return the prediction intervals for the forecast.
alpha : float or list, default=0.95
If alpha is iterable, multiple intervals will be calculated.
Returns
-------
y_pred : pd.Series
Point predictions for the forecast
y_pred_int : pd.DataFrame
Prediction intervals for the forecast
"""
# use relative fh as time index to predict
fh = self.fh.to_absolute_int(self._y.index[0], self.cutoff)
X_pred = fh.to_numpy().reshape(-1, 1)
y_pred = self.regressor_.predict(X_pred)
return pd.Series(y_pred, index=self.fh.to_absolute(self.cutoff))
class PolynomialTrendForecaster(BaseForecaster):
"""Forecast time series data with a polynomial trend.
Default settings train a linear regression model with a 1st degree
polynomial transformation of the feature.
Parameters
----------
regressor : estimator object, default = None
Define the regression model type. If not set, will default to
sklearn.linear_model.LinearRegression
degree : int, default = 1
Degree of polynomial function
with_intercept : bool, default=True
If true, then include a feature in which all polynomial powers are
zero. (i.e. a column of ones, acts as an intercept term in a linear
model)
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import PolynomialTrendForecaster
>>> y = load_airline()
>>> forecaster = PolynomialTrendForecaster(degree=1)
>>> forecaster.fit(y)
PolynomialTrendForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
"""
_tags = {
"ignores-exogeneous-X": True,
"requires-fh-in-fit": False,
"handles-missing-data": False,
}
def __init__(self, regressor=None, degree=1, with_intercept=True):
self.regressor = regressor
self.degree = degree
self.with_intercept = with_intercept
self.regressor_ = self.regressor
super(PolynomialTrendForecaster, self).__init__()
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series with which to fit the forecaster.
X : pd.DataFrame, default=None
Exogenous variables are ignored
fh : int, list or np.array, default=None
The forecasters horizon with the steps ahead to to predict.
Returns
-------
self : returns an instance of self.
"""
# for default regressor, set fit_intercept=False as we generate a
# dummy variable in polynomial features
if self.regressor is None:
regressor = LinearRegression(fit_intercept=False)
else:
regressor = self.regressor
# make pipeline with polynomial features
self.regressor_ = make_pipeline(
PolynomialFeatures(degree=self.degree, include_bias=self.with_intercept),
regressor,
)
# transform data
n_timepoints = _get_duration(self._y.index, coerce_to_int=True) + 1
X = np.arange(n_timepoints).reshape(-1, 1)
# fit regressor
self.regressor_.fit(X, y)
return self
def _predict(self, fh=None, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""Make forecasts for the given forecast horizon.
Parameters
----------
fh : int, list or np.array
The forecast horizon with the steps ahead to predict
X : pd.DataFrame, default=None
Exogenous variables (ignored)
return_pred_int : bool, default=False
Return the prediction intervals for the forecast.
alpha : float or list, default=0.95
If alpha is iterable, multiple intervals will be calculated.
Returns
-------
y_pred : pd.Series
Point predictions for the forecast
y_pred_int : pd.DataFrame
Prediction intervals for the forecast
"""
# use relative fh as time index to predict
fh = self.fh.to_absolute_int(self._y.index[0], self.cutoff)
X_pred = fh.to_numpy().reshape(-1, 1)
y_pred = self.regressor_.predict(X_pred)
return pd.Series(y_pred, index=self.fh.to_absolute(self.cutoff))
class STLForecaster(BaseForecaster):
"""Implements STLForecaster based on statsmodels.tsa.seasonal.STL implementation.
The STLForecaster is using an STL to decompose the given
series y into the three components trend, season and residuals [1]_. Then,
the forecaster_trend, forecaster_seasonal and forecaster_resid are fitted
on the components individually to forecast them also individually. The
final forecast is then the sum of the three component forecasts. The STL
decomposition is done by means of using the package statsmodels [2]_.
Parameters
----------
sp : int, optional
Length of the seasonal period for STL, by default 2.
It's also the default sp for the forecasters
(forecaster_seasonal, forecaster_resid) that are None. The
default forecaster_trend does not get sp as trend is independent
to seasonality.
seasonal : int, optional
Length of the seasonal smoother. Must be an odd integer, and should
normally be >= 7 (default).
trend : {int, None}, optional
Length of the trend smoother. Must be an odd integer. If not provided
uses the smallest odd integer greater than
1.5 * period / (1 - 1.5 / seasonal), following the suggestion in
the original implementation.
low_pass : {int, None}, optional
Length of the low-pass filter. Must be an odd integer >=3. If not
provided, uses the smallest odd integer > period.
seasonal_deg : int, optional
Degree of seasonal LOESS. 0 (constant) or 1 (constant and trend).
trend_deg : int, optional
Degree of trend LOESS. 0 (constant) or 1 (constant and trend).
low_pass_deg : int, optional
Degree of low pass LOESS. 0 (constant) or 1 (constant and trend).
robust : bool, optional
Flag indicating whether to use a weighted version that is robust to
some forms of outliers.
seasonal_jump : int, optional
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every seasonal_jump points and linear
interpolation is between fitted points. Higher values reduce
estimation time.
trend_jump : int, optional
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every trend_jump points and values between
the two are linearly interpolated. Higher values reduce estimation
time.
low_pass_jump : int, optional
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every low_pass_jump points and values between
the two are linearly interpolated. Higher values reduce estimation
time.
inner_iter: int, optional
Number of iterations to perform in the inner loop. If not provided uses 2 if
robust is True, or 5 if not. This param goes into STL.fit() from statsmodels.
outer_iter: int, optional
Number of iterations to perform in the outer loop. If not provided uses 15 if
robust is True, or 0 if not. This param goes into STL.fit() from statsmodels.
forecaster_trend : sktime forecaster, optional
Forecaster to be fitted on trend_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="drift") is used.
forecaster_seasonal : sktime forecaster, optional
Forecaster to be fitted on seasonal_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="last") is used.
forecaster_resid : sktime forecaster, optional
Forecaster to be fitted on resid_ component of the
STL, by default None. If None, then
a NaiveForecaster(strategy="mean") is used.
Attributes
----------
trend_ : pd.Series
Trend component.
seasonal_ : pd.Series
Seasonal component.
resid_ : pd.Series
Residuals component.
forecaster_trend_ : sktime forecaster
Fitted trend forecaster.
forecaster_seasonal_ : sktime forecaster
Fitted seasonal forecaster.
forecaster_resid_ : sktime forecaster
Fitted residual forecaster.
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.forecasting.trend import STLForecaster
>>> y = load_airline()
>>> forecaster = STLForecaster(sp=12)
>>> forecaster.fit(y)
STLForecaster(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
See Also
--------
Deseasonalizer
Detrender
References
----------
.. [1] <NAME>, <NAME>, <NAME>, and <NAME> (1990)
STL: A Seasonal-Trend Decomposition Procedure Based on LOESS.
Journal of Official Statistics, 6, 3-73.
.. [2] https://www.statsmodels.org/dev/generated/statsmodels.tsa.seasonal.STL.html
"""
_tags = {
"scitype:y": "univariate", # which y are fine? univariate/multivariate/both
"ignores-exogeneous-X": False, # does estimator ignore the exogeneous X?
"handles-missing-data": False, # can estimator handle missing data?
"y_inner_mtype": "pd.Series", # which types do _fit, _predict, assume for y?
"X_inner_mtype": "pd.DataFrame", # which types do _fit, _predict, assume for X?
"requires-fh-in-fit": False, # is forecasting horizon already required in fit?
}
def __init__(
self,
sp=2,
seasonal=7,
trend=None,
low_pass=None,
seasonal_deg=1,
trend_deg=1,
low_pass_deg=1,
robust=False,
seasonal_jump=1,
trend_jump=1,
low_pass_jump=1,
inner_iter=None,
outer_iter=None,
forecaster_trend=None,
forecaster_seasonal=None,
forecaster_resid=None,
):
self.sp = sp
self.seasonal = seasonal
self.trend = trend
self.low_pass = low_pass
self.seasonal_deg = seasonal_deg
self.trend_deg = trend_deg
self.low_pass_deg = low_pass_deg
self.robust = robust
self.seasonal_jump = seasonal_jump
self.trend_jump = trend_jump
self.low_pass_jump = low_pass_jump
self.inner_iter = inner_iter
self.outer_iter = outer_iter
self.forecaster_trend = forecaster_trend
self.forecaster_seasonal = forecaster_seasonal
self.forecaster_resid = forecaster_resid
super(STLForecaster, self).__init__()
def _fit(self, y, X=None, fh=None):
"""Fit forecaster to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list, np.array or ForecastingHorizon, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Returns
-------
self : returns an instance of self.
"""
self._stl = _STL(
y.values,
period=self.sp,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit(inner_iter=self.inner_iter, outer_iter=self.outer_iter)
self.seasonal_ = | pd.Series(self._stl.seasonal, index=y.index) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # 06__process_mpranalyze_data
#
# in this notebook, i process the results that come out of running MPRAnalyze on each of the models, call significantly differentially active sequences, and merge results into one master dataframe.
# In[1]:
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from scipy.stats import spearmanr
# import utils
sys.path.append("../../../utils")
from plotting_utils import *
from classify_utils import *
get_ipython().run_line_magic('matplotlib', 'inline')
# %config InlineBackend.figure_format = 'svg'
# mpl.rcParams['figure.autolayout'] = False
# In[2]:
sns.set(**PAPER_PRESET)
fontsize = PAPER_FONTSIZE
# In[3]:
np.random.seed(2019)
# In[4]:
QUANT_ALPHA = 0.05
# ## functions
# ## variables
# In[5]:
data_dir = "../../../data/02__mpra/02__activs"
alpha_f = "%s/alpha_per_elem.quantification.txt" % data_dir
human_vals_f = "%s/human_TSS_vals.both_tiles.txt" % data_dir
mouse_vals_f= "%s/mouse_TSS_vals.both_tiles.txt" % data_dir
# In[6]:
native_f = "%s/native_results.txt" % data_dir
HUES64_cis_f = "%s/HUES64_cis_results.txt" % data_dir
mESC_cis_f = "%s/mESC_cis_results.txt" % data_dir
human_trans_f = "%s/human_trans_results.txt" % data_dir
mouse_trans_f = "%s/mouse_trans_results.txt" % data_dir
cis_trans_int_f = "%s/cis_trans_interaction_results.txt" % data_dir
# In[7]:
tss_map_f = "../../../data/01__design/01__mpra_list/mpra_tss.with_ids.RECLASSIFIED_WITH_MAX.txt"
# ## 1. import data
# In[8]:
alpha = pd.read_table(alpha_f, sep="\t").reset_index()
alpha.head()
# In[9]:
len(alpha)
# In[10]:
human_vals = pd.read_table(human_vals_f)
mouse_vals = pd.read_table(mouse_vals_f)
human_vals.head()
# In[11]:
native = pd.read_table(native_f).reset_index()
native.columns = ["index", "stat_native", "pval_native", "fdr_native", "df.test_native", "df.dna_native",
"df.rna.full_native", "df.rna.red_native", "logFC_native"]
native["index"] = native.apply(fix_ctrl_id, axis=1)
native.sample(5)
# In[12]:
HUES64_cis = | pd.read_table(HUES64_cis_f) | pandas.read_table |
import pandas as pd
df = | pd.read_csv(r'/Users/student/Dropbox/PhD/2019 Fall/Hydrologic Modeling/Onion_Creek_Full_Time.csv', index_col=0) | pandas.read_csv |
"""analysis.py: module for manifolds analysis."""
__author__ = "<NAME>, <NAME>, <NAME> and <NAME>"
__copyright__ = "Copyright (c) 2020, 2021, <NAME>, <NAME>, <NAME> and <NAME>"
__credits__ = ["Department of Chemical Engineering, University of Utah, Salt Lake City, Utah, USA", "Universite Libre de Bruxelles, Aero-Thermo-Mechanics Laboratory, Brussels, Belgium"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = ["<NAME>", "<NAME>"]
__email__ = ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
__status__ = "Production"
import numpy as np
import copy as cp
import multiprocessing as multiproc
from PCAfold import KReg
from scipy.spatial import KDTree
from scipy.optimize import minimize
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
import random as rnd
from scipy.interpolate import CubicSpline
from PCAfold.styles import *
from PCAfold import preprocess
from PCAfold import reduction
from termcolor import colored
from matplotlib.colors import ListedColormap
import time
################################################################################
#
# Manifold assessment
#
################################################################################
class VarianceData:
"""
A class for storing helpful quantities in analyzing dimensionality of manifolds through normalized variance measures.
This class will be returned by ``compute_normalized_variance``.
:param bandwidth_values:
the array of bandwidth values (Gaussian filter widths) used in computing the normalized variance for each variable
:param normalized_variance:
dictionary of the normalized variance computed at each of the bandwidth values for each variable
:param global_variance:
dictionary of the global variance for each variable
:param bandwidth_10pct_rise:
dictionary of the bandwidth value corresponding to a 10% rise in the normalized variance for each variable
:param variable_names:
list of the variable names
:param normalized_variance_limit:
dictionary of the normalized variance computed as the bandwidth approaches zero (numerically at :math:`10^{-16}`) for each variable
"""
def __init__(self, bandwidth_values, norm_var, global_var, bandwidth_10pct_rise, keys, norm_var_limit):
self._bandwidth_values = bandwidth_values.copy()
self._normalized_variance = norm_var.copy()
self._global_variance = global_var.copy()
self._bandwidth_10pct_rise = bandwidth_10pct_rise.copy()
self._variable_names = keys.copy()
self._normalized_variance_limit = norm_var_limit.copy()
@property
def bandwidth_values(self):
"""return the bandwidth values (Gaussian filter widths) used in computing the normalized variance for each variable"""
return self._bandwidth_values.copy()
@property
def normalized_variance(self):
"""return a dictionary of the normalized variance computed at each of the bandwidth values for each variable"""
return self._normalized_variance.copy()
@property
def global_variance(self):
"""return a dictionary of the global variance for each variable"""
return self._global_variance.copy()
@property
def bandwidth_10pct_rise(self):
"""return a dictionary of the bandwidth value corresponding to a 10% rise in the normalized variance for each variable"""
return self._bandwidth_10pct_rise.copy()
@property
def variable_names(self):
"""return a list of the variable names"""
return self._variable_names.copy()
@property
def normalized_variance_limit(self):
"""return a dictionary of the normalized variance computed as the
bandwidth approaches zero (numerically at 1.e-16) for each variable"""
return self._normalized_variance_limit.copy()
# ------------------------------------------------------------------------------
def compute_normalized_variance(indepvars, depvars, depvar_names, npts_bandwidth=25, min_bandwidth=None,
max_bandwidth=None, bandwidth_values=None, scale_unit_box=True, n_threads=None):
"""
Compute a normalized variance (and related quantities) for analyzing manifold dimensionality.
The normalized variance is computed as
.. math::
\\mathcal{N}(\\sigma) = \\frac{\\sum_{i=1}^n (y_i - \\mathcal{K}(\\hat{x}_i; \\sigma))^2}{\\sum_{i=1}^n (y_i - \\bar{y} )^2}
where :math:`\\bar{y}` is the average quantity over the whole manifold and :math:`\\mathcal{K}(\\hat{x}_i; \\sigma)` is the
weighted average quantity calculated using kernel regression with a Gaussian kernel of bandwidth :math:`\\sigma` centered
around the :math:`i^{th}` observation. :math:`n` is the number of observations.
:math:`\\mathcal{N}(\\sigma)` is computed for each bandwidth in an array of bandwidth values.
By default, the ``indepvars`` (:math:`x`) are centered and scaled to reside inside a unit box (resulting in :math:`\\hat{x}`) so that the bandwidths have the
same meaning in each dimension. Therefore, the bandwidth and its involved calculations are applied in the normalized
independent variable space. This may be turned off by setting ``scale_unit_box`` to False.
The bandwidth values may be specified directly through ``bandwidth_values`` or default values will be calculated as a
logspace from ``min_bandwidth`` to ``max_bandwidth`` with ``npts_bandwidth`` number of values. If left unspecified,
``min_bandwidth`` and ``max_bandwidth`` will be calculated as the minimum and maximum nonzero distance between points, respectively.
More information can be found in :cite:`Armstrong2021`.
**Example:**
.. code:: python
from PCAfold import PCA, compute_normalized_variance
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,5)
# Perform PCA to obtain the low-dimensional manifold:
pca_X = PCA(X, n_components=2)
principal_components = pca_X.transform(X)
# Compute normalized variance quantities:
variance_data = compute_normalized_variance(principal_components, X, depvar_names=['A', 'B', 'C', 'D', 'E'], bandwidth_values=np.logspace(-3, 1, 20), scale_unit_box=True)
# Access bandwidth values:
variance_data.bandwidth_values
# Access normalized variance values:
variance_data.normalized_variance
# Access normalized variance values for a specific variable:
variance_data.normalized_variance['B']
:param indepvars:
``numpy.ndarray`` specifying the independent variable values. It should be of size ``(n_observations,n_independent_variables)``.
:param depvars:
``numpy.ndarray`` specifying the dependent variable values. It should be of size ``(n_observations,n_dependent_variables)``.
:param depvar_names:
``list`` of ``str`` corresponding to the names of the dependent variables (for saving values in a dictionary)
:param npts_bandwidth:
(optional, default 25) number of points to build a logspace of bandwidth values
:param min_bandwidth:
(optional, default to minimum nonzero interpoint distance) minimum bandwidth
:param max_bandwidth:
(optional, default to estimated maximum interpoint distance) maximum bandwidth
:param bandwidth_values:
(optional) array of bandwidth values, i.e. filter widths for a Gaussian filter, to loop over
:param scale_unit_box:
(optional, default True) center/scale the independent variables between [0,1] for computing a normalized variance so the bandwidth values have the same meaning in each dimension
:param n_threads:
(optional, default None) number of threads to run this computation. If None, default behavior of multiprocessing.Pool is used, which is to use all available cores on the current system.
:return:
- **variance_data** - an object of the ``VarianceData`` class.
"""
assert indepvars.ndim == 2, "independent variable array must be 2D: n_observations x n_variables."
assert depvars.ndim == 2, "dependent variable array must be 2D: n_observations x n_variables."
assert (indepvars.shape[0] == depvars.shape[
0]), "The number of observations for dependent and independent variables must match."
assert (len(depvar_names) == depvars.shape[
1]), "The provided keys do not match the shape of the dependent variables yi."
if scale_unit_box:
xi = (indepvars - np.min(indepvars, axis=0)) / (np.max(indepvars, axis=0) - np.min(indepvars, axis=0))
else:
xi = indepvars.copy()
yi = depvars.copy()
if bandwidth_values is None:
if min_bandwidth is None:
tree = KDTree(xi)
min_bandwidth = np.min(tree.query(xi, k=2)[0][tree.query(xi, k=2)[0][:, 1] > 1.e-16, 1])
if max_bandwidth is None:
max_bandwidth = np.linalg.norm(np.max(xi, axis=0) - np.min(xi, axis=0)) * 10.
bandwidth_values = np.logspace(np.log10(min_bandwidth), np.log10(max_bandwidth), npts_bandwidth)
else:
if not isinstance(bandwidth_values, np.ndarray):
raise ValueError("bandwidth_values must be an array.")
lvar = np.zeros((bandwidth_values.size, yi.shape[1]))
kregmod = KReg(xi, yi) # class for kernel regression evaluations
# define a list of argments for kregmod_predict
fcnArgs = [(xi, bandwidth_values[si]) for si in range(bandwidth_values.size) ]
pool = multiproc.Pool(processes=n_threads)
kregmodResults = pool.starmap( kregmod.predict, fcnArgs)
pool.close()
pool.join()
for si in range(bandwidth_values.size):
lvar[si, :] = np.linalg.norm(yi - kregmodResults[si], axis=0) ** 2
# saving the local variance for each yi...
local_var = dict({key: lvar[:, idx] for idx, key in enumerate(depvar_names)})
# saving the global variance for each yi...
global_var = dict(
{key: np.linalg.norm(yi[:, idx] - np.mean(yi[:, idx])) ** 2 for idx, key in enumerate(depvar_names)})
# saving the values of the bandwidth where the normalized variance increases by 10%...
bandwidth_10pct_rise = dict()
for key in depvar_names:
bandwidth_idx = np.argwhere(local_var[key] / global_var[key] >= 0.1)
if len(bandwidth_idx) == 0.:
bandwidth_10pct_rise[key] = None
else:
bandwidth_10pct_rise[key] = bandwidth_values[bandwidth_idx[0]][0]
norm_local_var = dict({key: local_var[key] / global_var[key] for key in depvar_names})
# computing normalized variance as bandwidth approaches zero to check for non-uniqueness
lvar_limit = kregmod.predict(xi, 1.e-16)
nlvar_limit = np.linalg.norm(yi - lvar_limit, axis=0) ** 2
normvar_limit = dict({key: nlvar_limit[idx] for idx, key in enumerate(depvar_names)})
solution_data = VarianceData(bandwidth_values, norm_local_var, global_var, bandwidth_10pct_rise, depvar_names, normvar_limit)
return solution_data
# ------------------------------------------------------------------------------
def normalized_variance_derivative(variance_data):
"""
Compute a scaled normalized variance derivative on a logarithmic scale, :math:`\\hat{\\mathcal{D}}(\\sigma)`, from
.. math::
\\mathcal{D}(\\sigma) = \\frac{\\mathrm{d}\\mathcal{N}(\\sigma)}{\\mathrm{d}\\log_{10}(\\sigma)} + \lim_{\\sigma \\to 0} \\mathcal{N}(\\sigma)
and
.. math::
\\hat{\\mathcal{D}}(\\sigma) = \\frac{\\mathcal{D}(\\sigma)}{\\max(\\mathcal{D}(\\sigma))}
This value relays how fast the variance is changing as the bandwidth changes and captures non-uniqueness from
nonzero values of :math:`\lim_{\\sigma \\to 0} \\mathcal{N}(\\sigma)`. The derivative is approximated
with central finite differencing and the limit is approximated by :math:`\\mathcal{N}(\\sigma=10^{-16})` using the
``normalized_variance_limit`` attribute of the ``VarianceData`` object.
More information can be found in :cite:`Armstrong2021`.
**Example:**
.. code:: python
from PCAfold import PCA, compute_normalized_variance, normalized_variance_derivative
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,5)
# Perform PCA to obtain the low-dimensional manifold:
pca_X = PCA(X, n_components=2)
principal_components = pca_X.transform(X)
# Compute normalized variance quantities:
variance_data = compute_normalized_variance(principal_components, X, depvar_names=['A', 'B', 'C', 'D', 'E'], bandwidth_values=np.logspace(-3, 1, 20), scale_unit_box=True)
# Compute normalized variance derivative:
(derivative, bandwidth_values, max_derivative) = normalized_variance_derivative(variance_data)
# Access normalized variance derivative values for a specific variable:
derivative['B']
:param variance_data:
a ``VarianceData`` class returned from ``compute_normalized_variance``
:return:
- **derivative_dict** - a dictionary of :math:`\\hat{\\mathcal{D}}(\\sigma)` for each variable in the provided ``VarianceData`` object
- **x** - the :math:`\\sigma` values where :math:`\\hat{\\mathcal{D}}(\\sigma)` was computed
- **max_derivatives_dicts** - a dictionary of :math:`\\max(\\mathcal{D}(\\sigma))` values for each variable in the provided ``VarianceData`` object.
"""
x_plus = variance_data.bandwidth_values[2:]
x_minus = variance_data.bandwidth_values[:-2]
x = variance_data.bandwidth_values[1:-1]
derivative_dict = {}
max_derivatives_dict = {}
for key in variance_data.variable_names:
y_plus = variance_data.normalized_variance[key][2:]
y_minus = variance_data.normalized_variance[key][:-2]
derivative = (y_plus-y_minus)/(np.log10(x_plus)-np.log10(x_minus)) + variance_data.normalized_variance_limit[key]
scaled_derivative = derivative/np.max(derivative)
derivative_dict[key] = scaled_derivative
max_derivatives_dict[key] = np.max(derivative)
return derivative_dict, x, max_derivatives_dict
# ------------------------------------------------------------------------------
def find_local_maxima(dependent_values, independent_values, logscaling=True, threshold=1.e-2, show_plot=False):
"""
Finds and returns locations and values of local maxima in a dependent variable given a set of observations.
The functional form of the dependent variable is approximated with a cubic spline for smoother approximations to local maxima.
:param dependent_values:
observations of a single dependent variable such as :math:`\\hat{\\mathcal{D}}` from ``normalized_variance_derivative`` (for a single variable).
:param independent_values:
observations of a single independent variable such as :math:`\\sigma` returned by ``normalized_variance_derivative``
:param logscaling:
(optional, default True) this logarithmically scales ``independent_values`` before finding local maxima. This is needed for scaling :math:`\\sigma` appropriately before finding peaks in :math:`\\hat{\\mathcal{D}}`.
:param threshold:
(optional, default :math:`10^{-2}`) local maxima found below this threshold will be ignored.
:param show_plot:
(optional, default False) when True, a plot of the ``dependent_values`` over ``independent_values`` (logarithmically scaled if ``logscaling`` is True) with the local maxima highlighted will be shown.
:return:
- the locations of local maxima in ``dependent_values``
- the local maxima values
"""
if logscaling:
independent_values = np.log10(independent_values.copy())
zero_indices = []
upslope = True
npts = independent_values.size
for i in range(1, npts):
if upslope and dependent_values[i] - dependent_values[i - 1] <= 0:
if dependent_values[i] > threshold:
zero_indices.append(i - 1)
upslope = False
if not upslope and dependent_values[i] - dependent_values[i - 1] >= 0:
upslope = True
zero_locations = []
zero_Dvalues = []
for idx in zero_indices:
if idx < 1:
indices = [idx, idx + 1, idx + 2, idx + 3]
elif idx < 2:
indices = [idx - 1, idx, idx + 1, idx + 2]
elif idx > npts - 1:
indices = [idx - 3, idx - 2, idx - 1, idx]
else:
indices = [idx - 2, idx - 1, idx, idx + 1]
Dspl = CubicSpline(independent_values[indices], dependent_values[indices])
sigma_max = minimize(lambda s: -Dspl(s), independent_values[idx])
zero_locations.append(sigma_max.x[0])
zero_Dvalues.append(Dspl(sigma_max.x[0]))
if show_plot:
plt.plot(independent_values, dependent_values, 'k-')
plt.plot(zero_locations, zero_Dvalues, 'r*')
plt.xlim([np.min(independent_values),np.max(independent_values)])
plt.ylim([0., 1.05])
plt.grid()
if logscaling:
plt.xlabel('log$_{10}$(independent variable)')
else:
plt.xlabel('independent variable')
plt.ylabel('dependent variable')
plt.show()
if logscaling:
zero_locations = 10. ** np.array(zero_locations)
return np.array(zero_locations, dtype=float), np.array(zero_Dvalues, dtype=float)
# ------------------------------------------------------------------------------
def random_sampling_normalized_variance(sampling_percentages, indepvars, depvars, depvar_names,
n_sample_iterations=1, verbose=True, npts_bandwidth=25, min_bandwidth=None,
max_bandwidth=None, bandwidth_values=None, scale_unit_box=True, n_threads=None):
"""
Compute the normalized variance derivatives :math:`\\hat{\\mathcal{D}}(\\sigma)` for random samples of the provided
data specified using ``sampling_percentages``. These will be averaged over ``n_sample_iterations`` iterations. Analyzing
the shift in peaks of :math:`\\hat{\\mathcal{D}}(\\sigma)` due to sampling can distinguish between characteristic
features and non-uniqueness due to a transformation/reduction of manifold coordinates. True features should not show
significant sensitivity to sampling while non-uniqueness/folds in the manifold will.
More information can be found in :cite:`Armstrong2021`.
:param sampling_percentages:
list or 1D array of fractions (between 0 and 1) of the provided data to sample for computing the normalized variance
:param indepvars:
independent variable values (size: n_observations x n_independent variables)
:param depvars:
dependent variable values (size: n_observations x n_dependent variables)
:param depvar_names:
list of strings corresponding to the names of the dependent variables (for saving values in a dictionary)
:param n_sample_iterations:
(optional, default 1) how many iterations for each ``sampling_percentages`` to average the normalized variance derivative over
:param verbose:
(optional, default True) when True, progress statements are printed
:param npts_bandwidth:
(optional, default 25) number of points to build a logspace of bandwidth values
:param min_bandwidth:
(optional, default to minimum nonzero interpoint distance) minimum bandwidth
:param max_bandwidth:
(optional, default to estimated maximum interpoint distance) maximum bandwidth
:param bandwidth_values:
(optional) array of bandwidth values, i.e. filter widths for a Gaussian filter, to loop over
:param scale_unit_box:
(optional, default True) center/scale the independent variables between [0,1] for computing a normalized variance so the bandwidth values have the same meaning in each dimension
:param n_threads:
(optional, default None) number of threads to run this computation. If None, default behavior of multiprocessing.Pool is used, which is to use all available cores on the current system.
:return:
- a dictionary of the normalized variance derivative (:math:`\\hat{\\mathcal{D}}(\\sigma)`) for each sampling percentage in ``sampling_percentages`` averaged over ``n_sample_iterations`` iterations
- the :math:`\\sigma` values used for computing :math:`\\hat{\\mathcal{D}}(\\sigma)`
- a dictionary of the ``VarianceData`` objects for each sampling percentage and iteration in ``sampling_percentages`` and ``n_sample_iterations``
"""
assert indepvars.ndim == 2, "independent variable array must be 2D: n_observations x n_variables."
assert depvars.ndim == 2, "dependent variable array must be 2D: n_observations x n_variables."
if isinstance(sampling_percentages, list):
for p in sampling_percentages:
assert p > 0., "sampling percentages must be between 0 and 1"
assert p <= 1., "sampling percentages must be between 0 and 1"
elif isinstance(sampling_percentages, np.ndarray):
assert sampling_percentages.ndim ==1, "sampling_percentages must be given as a list or 1D array"
for p in sampling_percentages:
assert p > 0., "sampling percentages must be between 0 and 1"
assert p <= 1., "sampling percentages must be between 0 and 1"
else:
raise ValueError("sampling_percentages must be given as a list or 1D array.")
normvar_data = {}
avg_der_data = {}
for p in sampling_percentages:
if verbose:
print('sampling', p * 100., '% of the data')
nv_data = {}
avg_der = {}
for it in range(n_sample_iterations):
if verbose:
print(' iteration', it + 1, 'of', n_sample_iterations)
rnd.seed(it)
idxsample = rnd.sample(list(np.arange(0, indepvars.shape[0])), int(p * indepvars.shape[0]))
nv_data[it] = compute_normalized_variance(indepvars[idxsample, :], depvars[idxsample, :], depvar_names,
npts_bandwidth=npts_bandwidth, min_bandwidth=min_bandwidth,
max_bandwidth=max_bandwidth, bandwidth_values=bandwidth_values,
scale_unit_box=scale_unit_box, n_threads=n_threads)
der, xder, _ = normalized_variance_derivative(nv_data[it])
for key in der.keys():
if it == 0:
avg_der[key] = der[key] / np.float(n_sample_iterations)
else:
avg_der[key] += der[key] / np.float(n_sample_iterations)
avg_der_data[p] = avg_der
normvar_data[p] = nv_data
return avg_der_data, xder, normvar_data
# ------------------------------------------------------------------------------
def average_knn_distance(indepvars, n_neighbors=10, verbose=False):
"""
Computes an average Euclidean distances to :math:`k` nearest neighbors on
a manifold defined by the independent variables.
**Example:**
.. code:: python
from PCAfold import PCA, average_knn_distance
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='none', n_components=2, use_eigendec=True, nocenter=False)
# Calculate the principal components:
principal_components = pca_X.transform(X)
# Compute average distances on a manifold defined by the PCs:
average_distances = average_knn_distance(principal_components, n_neighbors=10, verbose=True)
With ``verbose=True``, minimum, maximum and average distance will be printed:
.. code-block:: text
Minimum distance: 0.1388300829487847
Maximum distance: 0.4689587542132183
Average distance: 0.20824964953425693
Median distance: 0.18333873029179215
.. note::
This function requires the ``scikit-learn`` module. You can install it through:
``pip install scikit-learn``
:param indepvars:
``numpy.ndarray`` specifying the independent variable values. It should be of size ``(n_observations,n_independent_variables)``.
:param n_neighbors: (optional)
``int`` specifying the number of nearest neighbors, :math:`k`.
:param verbose: (optional)
``bool`` for printing verbose details.
:return:
- **average_distances** - ``numpy.ndarray`` specifying the vector of average distances for every observation in a data set to its :math:`k` nearest neighbors. It has size ``(n_observations,)``.
"""
if not isinstance(indepvars, np.ndarray):
raise ValueError("Parameter `indepvars` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_independent_variables) = np.shape(indepvars)
except:
raise ValueError("Parameter `indepvars` has to have size `(n_observations,n_independent_variables)`.")
if not isinstance(n_neighbors, int):
raise ValueError("Parameter `n_neighbors` has to be of type int.")
if n_neighbors < 2:
raise ValueError("Parameter `n_neighbors` cannot be smaller than 2.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be a boolean.")
try:
from sklearn.neighbors import NearestNeighbors
except:
raise ValueError("Nearest neighbors search requires the `sklearn` module: `pip install scikit-learn`.")
(n_observations, n_independent_variables) = np.shape(indepvars)
knn_model = NearestNeighbors(n_neighbors=n_neighbors+1)
knn_model.fit(indepvars)
average_distances = np.zeros((n_observations,))
for query_point in range(0,n_observations):
(distances_neigh, idx_neigh) = knn_model.kneighbors(indepvars[query_point,:][None,:], n_neighbors=n_neighbors+1, return_distance=True)
query_point_idx = np.where(idx_neigh.ravel()==query_point)
distances_neigh = np.delete(distances_neigh.ravel(), np.s_[query_point_idx])
idx_neigh = np.delete(idx_neigh.ravel(), np.s_[query_point_idx])
average_distances[query_point] = np.mean(distances_neigh)
if verbose:
print('Minimum distance:\t' + str(np.min(average_distances)))
print('Maximum distance:\t' + str(np.max(average_distances)))
print('Average distance:\t' + str(np.mean(average_distances)))
print('Median distance:\t' + str(np.median(average_distances)))
return average_distances
# ------------------------------------------------------------------------------
def cost_function_normalized_variance_derivative(variance_data, penalty_function=None, norm=None, integrate_to_peak=False):
"""
Defines a cost function for manifold topology optimization based on the areas, or weighted (penalized) areas, under
the normalized variance derivatives curves, :math:`\\hat{\\mathcal{D}}(\\sigma)`, for the selected :math:`n_{dep}` dependent variables.
An individual area, :math:`A_i`, for the :math:`i^{th}` dependent variable, is computed by directly integrating the function :math:`\\hat{\\mathcal{D}}_i(\\sigma)``
in the :math:`\\log_{10}` space of bandwidths :math:`\\sigma`. Integration is performed using the composite trapezoid rule.
When ``integrate_to_peak=False``, the bounds of integration go from the minimum bandwidth, :math:`\\sigma_{min, i}`,
to the maximum bandwidth, :math:`\\sigma_{max, i}`:
.. math::
A_i = \\int_{\\sigma_{min, i}}^{\\sigma_{max, i}} \\hat{\\mathcal{D}}_i(\\sigma) d \\log_{10} \\sigma
.. image:: ../images/cost-function-D-hat.svg
:width: 600
:align: center
When ``integrate_to_peak=True``, the bounds of integration go from the minimum bandwidth, :math:`\\sigma_{min, i}`,
to the bandwidth for which the rightmost peak happens in :math:`\\hat{\\mathcal{D}}_i(\\sigma)``, :math:`\\sigma_{peak, i}`:
.. math::
A_i = \\int_{\\sigma_{min, i}}^{\\sigma_{peak, i}} \\hat{\\mathcal{D}}_i(\\sigma) d \\log_{10} \\sigma
.. image:: ../images/cost-function-D-hat-to-peak.svg
:width: 600
:align: center
In addition, each individual area, :math:`A_i`, can be weighted. Three weighting options are available:
- If ``penalty_function='peak'``, :math:`A_i` is weighted by the inverse of the rightmost peak location:
.. math::
A_i = \\frac{1}{\\sigma_{peak, i}} \\cdot \\int \\hat{\\mathcal{D}}_i(\\sigma) d(\\log_{10} \\sigma)
- If ``penalty_function='sigma'``, :math:`A_i` is weighted continuously by the bandwidth:
.. math::
A_i = \\int \\frac{\\hat{\\mathcal{D}}_i(\\sigma)}{\\sigma} d(\\log_{10} \\sigma)
This type of weighting *strongly* penalizes the area happening at lower bandwidth values:
.. image:: ../images/cost-function-sigma-penalty.svg
:width: 600
:align: center
- If ``penalty_function='log-sigma-over-peak'``, :math:`A_i` is weighted continuously by the :math:`\\log_{10}` -transformed bandwidth\
and takes into account information about the rightmost peak location:
.. math::
A_i = \\int \\Big( \\big| \\log_{10} \\big( \\frac{\\sigma}{\\sigma_{peak, i}} \\big) \\big| + \\frac{1}{||\\sigma_{peak, i}||_{0-1}} \\Big) \\cdot \\hat{\\mathcal{D}}_i(\\sigma) d(\\log_{10} \\sigma)
where :math:`||\\sigma_{peak, i}||_{0-1}` is the rightmost peak location expressed in a normalized 0-1 range. The normalization is performed so that :math:`||\\sigma_{min, i}||_{0-1} = 0.0` and :math:`||\\sigma_{max, i}||_{0-1} = 1.0`.
This type of weighting creates a more gentle penalty for the area happening further from the rightmost peak location:
.. image:: ../images/cost-function-log-sigma-over-peak-penalty.svg
:width: 600
:align: center
If ``norm=None``, a list of costs for all dependent variables is returned.
Otherwise, the final cost, :math:`\\mathcal{L}`, can be computed from all :math:`A_i` in a few ways,
where :math:`n_{dep}` is the number of dependent variables stored in the ``variance_data`` object:
- If ``norm='average'``, :math:`\\mathcal{L} = \\frac{1}{n_{dep}} \\sum_{i = 1}^{n_{dep}} A_i`.
- If ``norm='cumulative'``, :math:`\\mathcal{L} = \\sum_{i = 1}^{n_{dep}} A_i`.
- If ``norm='max'``, :math:`\\mathcal{L} = \\text{max} (A_i)`.
- If ``norm='median'``, :math:`\\mathcal{L} = \\text{median} (A_i)`.
- If ``norm='min'``, :math:`\\mathcal{L} = \\text{min} (A_i)`.
**Example:**
.. code:: python
from PCAfold import PCA, compute_normalized_variance, cost_function_normalized_variance_derivative
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,10)
# Specify variables names
variable_names = ['X_' + str(i) for i in range(0,10)]
# Perform PCA to obtain the low-dimensional manifold:
pca_X = PCA(X, n_components=2)
principal_components = pca_X.transform(X)
# Specify the bandwidth values:
bandwidth_values = np.logspace(-4, 2, 50)
# Compute normalized variance quantities:
variance_data = compute_normalized_variance(principal_components,
X,
depvar_names=variable_names,
bandwidth_values=bandwidth_values)
# Compute the cost for the current manifold:
cost = cost_function_normalized_variance_derivative(variance_data,
penalty_function='peak',
norm='max',
integrate_to_peak=True)
:param variance_data:
an object of ``VarianceData`` class.
:param penalty_function: (optional)
``str`` specifying the weighting (penalty) applied to each area.
Set ``penalty_function='peak'`` to weight each area by the rightmost peak location, :math:`\\sigma_{peak, i}`, for the :math:`i^{th}` dependent variable.
Set ``penalty_function='sigma'`` to weight each area continuously by the bandwidth.
Set ``penalty_function='log-sigma-over-peak'`` to weight each area continuously by the :math:`\\log_{10}` -transformed bandwidth, normalized by the right most peak location, :math:`\\sigma_{peak, i}`.
If ``penalty_function=None``, the area is not weighted.
:param norm: (optional)
``str`` specifying the norm to apply for all areas :math:`A_i`. ``norm='average'`` uses an arithmetic average, ``norm='max'`` uses the :math:`L_{\\infty}` norm,
``norm='median'`` uses a median area, ``norm='cumulative'`` uses a cumulative area and ``norm='min'`` uses a minimum area. If ``norm=None``, a list of costs for all depedent variables is returned.
:param integrate_to_peak: (optional)
``bool`` specifying whether an individual area for the :math:`i^{th}` dependent variable should be computed only up the the rightmost peak location.
:return:
- **cost** - ``float`` specifying the normalized cost, :math:`\\mathcal{L}`, or, if ``norm=None``, a list of costs, :math:`A_i`, for each dependent variable.
"""
__penalty_functions = ['peak', 'sigma', 'log-sigma-over-peak']
__norms = ['average', 'cumulative', 'max', 'median', 'min']
if penalty_function is not None:
if not isinstance(penalty_function, str):
raise ValueError("Parameter `penalty_function` has to be of type `str`.")
if penalty_function not in __penalty_functions:
raise ValueError("Parameter `penalty_function` has to be one of the following: 'peak', 'sigma', 'log-sigma-over-peak'.")
if norm is not None:
if not isinstance(norm, str):
raise ValueError("Parameter `norm` has to be of type `str`.")
if norm not in __norms:
raise ValueError("Parameter `norm` has to be one of the following: 'average', 'cumulative', 'max', 'median', 'min'.")
if not isinstance(integrate_to_peak, bool):
raise ValueError("Parameter `integrate_to_peak` has to be of type `bool`.")
derivative, sigma, _ = normalized_variance_derivative(variance_data)
costs = []
for variable in variance_data.variable_names:
idx_peaks, _ = find_peaks(derivative[variable], height=0)
idx_rightmost_peak = idx_peaks[-1]
rightmost_peak_location = sigma[idx_rightmost_peak]
(indices_to_the_left_of_peak, ) = np.where(sigma<=rightmost_peak_location)
if integrate_to_peak:
if penalty_function is None:
cost = np.trapz(derivative[variable][indices_to_the_left_of_peak], np.log10(sigma[indices_to_the_left_of_peak]))
costs.append(cost)
elif penalty_function == 'peak':
cost = 1. / (rightmost_peak_location) * np.trapz(derivative[variable][indices_to_the_left_of_peak], np.log10(sigma[indices_to_the_left_of_peak]))
costs.append(cost)
elif penalty_function == 'sigma':
penalty_sigma = 1./sigma[indices_to_the_left_of_peak]
cost = np.trapz(derivative[variable][indices_to_the_left_of_peak]*penalty_sigma, np.log10(sigma[indices_to_the_left_of_peak]))
costs.append(cost)
elif penalty_function == 'log-sigma-over-peak':
normalized_sigma, _, _ = preprocess.center_scale(np.log10(sigma[:,None]), scaling='0to1')
addition = normalized_sigma[idx_rightmost_peak][0]
penalty_log_sigma_peak = abs(np.log10(sigma[indices_to_the_left_of_peak]/rightmost_peak_location)) + 1./addition
cost = np.trapz(derivative[variable][indices_to_the_left_of_peak]*penalty_log_sigma_peak, np.log10(sigma[indices_to_the_left_of_peak]))
costs.append(cost)
else:
if penalty_function is None:
cost = np.trapz(derivative[variable], np.log10(sigma))
costs.append(cost)
elif penalty_function == 'peak':
cost = 1. / (rightmost_peak_location) * np.trapz(derivative[variable], np.log10(sigma))
costs.append(cost)
elif penalty_function == 'sigma':
penalty_sigma = 1./sigma
cost = np.trapz(derivative[variable]*penalty_sigma, np.log10(sigma))
costs.append(cost)
elif penalty_function == 'log-sigma-over-peak':
normalized_sigma, _, _ = preprocess.center_scale(np.log10(sigma[:,None]), scaling='0to1')
addition = normalized_sigma[idx_rightmost_peak][0]
penalty_log_sigma_peak = abs(np.log10(sigma/rightmost_peak_location)) + 1./addition
cost = np.trapz(derivative[variable]*penalty_log_sigma_peak, np.log10(sigma))
costs.append(cost)
if norm is None:
return costs
else:
if norm == 'max':
# Take L-infinity norm over all costs:
normalized_cost = np.max(costs)
elif norm == 'average':
# Take the arithmetic average norm over all costs:
normalized_cost = np.mean(costs)
elif norm == 'min':
# Take the minimum norm over all costs:
normalized_cost = np.min(costs)
elif norm == 'median':
# Take the median norm over all costs:
normalized_cost = np.median(costs)
elif norm == 'cumulative':
# Take the cumulative sum over all costs:
normalized_cost = np.sum(costs)
return normalized_cost
# ------------------------------------------------------------------------------
def manifold_informed_feature_selection(X, X_source, variable_names, scaling, bandwidth_values, target_variables=None, add_transformed_source=True, target_manifold_dimensionality=3, bootstrap_variables=None, penalty_function=None, norm='max', integrate_to_peak=False, verbose=False):
"""
Manifold-informed feature selection algorithm based on forward feature addition. The goal of the algorithm is to
select a meaningful subset of the original variables such that
undesired behaviors on a PCA-derived manifold of a given dimensionality are minimized.
The algorithm uses the cost function, :math:`\\mathcal{L}`, based on minimizing the area under the normalized variance derivatives curves, :math:`\\hat{\\mathcal{D}}(\\sigma)`,
for the selected :math:`n_{dep}` dependent variables (as per ``cost_function_normalized_variance_derivative`` function).
The algorithm can be bootstrapped in two ways:
- Automatic bootstrap when ``bootstrap_variables=None``: the first best variable is selected automatically as the one that gives the lowest cost.
- User-defined bootstrap when ``bootstrap_variables`` is set to a user-defined list of the bootstrap variables.
The algorithm iterates, adding a new variable that exhibits the lowest cost at each iteration.
The original variables in a data set get ordered according to their effect
on the manifold topology. Assuming that the original data set is composed of :math:`Q` variables,
the first output is a list of indices of the ordered
original variables, :math:`\\mathbf{X} = [X_1, X_2, \\dots, X_Q]`. The second output is a list of indices of the selected
subset of the original variables, :math:`\\mathbf{X}_S = [X_1, X_2, \\dots, X_n]`, that correspond to the minimum cost, :math:`\\mathcal{L}`.
.. note::
The algorithm can be very expensive (for large data sets) due to multiple computations of the normalized variance derivative.
Try running it on multiple cores or on a sampled data set.
In case the algorithm breaks when not being able to determine the peak
location, try increasing the range in the ``bandwidth_values`` parameter.
**Example:**
.. code:: python
from PCAfold import manifold_informed_feature_selection
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,10)
X_source = np.random.rand(100,10)
# Define original variables to add to the optimization:
target_variables = X[:,0:3]
# Specify variables names
variable_names = ['X_' + str(i) for i in range(0,10)]
# Specify the bandwidth values to compute the optimization on:
bandwidth_values = np.logspace(-4, 2, 50)
# Run the subset selection algorithm:
(ordered, selected, costs) = manifold_informed_feature_selection(X,
X_source,
variable_names,
scaling='auto',
bandwidth_values=bandwidth_values,
target_variables=target_variables,
add_transformed_source=True,
target_manifold_dimensionality=2,
bootstrap_variables=None,
penalty_function='peak',
norm='max',
integrate_to_peak=True,
verbose=True)
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param X_source:
``numpy.ndarray`` specifying the source terms, :math:`\\mathbf{S_X}`, corresponding to the state-space
variables in :math:`\\mathbf{X}`. This parameter is applicable to data sets
representing reactive flows. More information can be found in :cite:`Sutherland2009`. It should be of size ``(n_observations,n_variables)``.
:param variable_names:
``list`` of ``str`` specifying variables names.
:param scaling: (optional)
``str`` specifying the scaling methodology. It can be one of the following:
``'none'``, ``''``, ``'auto'``, ``'std'``, ``'pareto'``, ``'vast'``, ``'range'``, ``'0to1'``,
``'-1to1'``, ``'level'``, ``'max'``, ``'poisson'``, ``'vast_2'``, ``'vast_3'``, ``'vast_4'``.
:param bandwidth_values:
``numpy.ndarray`` specifying the bandwidth values, :math:`\\sigma`, for :math:`\\hat{\\mathcal{D}}(\\sigma)` computation.
:param target_variables: (optional)
``numpy.ndarray`` specifying the dependent variables that should be used in :math:`\\hat{\\mathcal{D}}(\\sigma)` computation. It should be of size ``(n_observations,n_target_variables)``.
:param add_transformed_source: (optional)
``bool`` specifying if the PCA-transformed source terms of the state-space variables should be added in :math:`\\hat{\\mathcal{D}}(\\sigma)` computation, alongside the user-defined dependent variables.
:param target_manifold_dimensionality: (optional)
``int`` specifying the target dimensionality of the PCA manifold.
:param bootstrap_variables: (optional)
``list`` specifying the user-selected variables to bootstrap the algorithm with. If set to ``None``, automatic bootstrapping is performed.
:param penalty_function: (optional)
``str`` specifying the weighting applied to each area.
Set ``penalty_function='peak'`` to weight each area by the rightmost peak location, :math:`\\sigma_{peak, i}`, for the :math:`i^{th}` dependent variable.
Set ``penalty_function='sigma'`` to weight each area continuously by the bandwidth.
Set ``penalty_function='log-sigma-over-peak'`` to weight each area continuously by the :math:`\\log_{10}` -transformed bandwidth, normalized by the right most peak location, :math:`\\sigma_{peak, i}`.
If ``penalty_function=None``, the area is not weighted.
:param norm: (optional)
``str`` specifying the norm to apply for all areas :math:`A_i`. ``norm='average'`` uses an arithmetic average, ``norm='max'`` uses the :math:`L_{\\infty}` norm,
``norm='median'`` uses a median area, ``norm='cumulative'`` uses a cumulative area and ``norm='min'`` uses a minimum area.
:param integrate_to_peak: (optional)
``bool`` specifying whether an individual area for the :math:`i^{th}` dependent variable should be computed only up the the rightmost peak location.
:param verbose: (optional)
``bool`` for printing verbose details.
:return:
- **ordered_variables** - ``list`` specifying the indices of the ordered variables.
- **selected_variables** - ``list`` specifying the indices of the selected variables that correspond to the minimum cost :math:`\\mathcal{L}`.
- **costs** - ``list`` specifying the costs, :math:`\\mathcal{L}`, from each iteration.
"""
__penalty_functions = ['peak', 'sigma', 'log-sigma-over-peak']
__norms = ['average', 'cumulative', 'max', 'median', 'min']
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have shape `(n_observations,n_variables)`.")
if not isinstance(X_source, np.ndarray):
raise ValueError("Parameter `X_source` has to be of type `numpy.ndarray`.")
try:
(n_observations_source, n_variables_source) = np.shape(X_source)
except:
raise ValueError("Parameter `X_source` has to have shape `(n_observations,n_variables)`.")
if n_variables_source != n_variables:
raise ValueError("Parameter `X_source` has different number of variables than `X`.")
if n_observations_source != n_observations:
raise ValueError("Parameter `X_source` has different number of observations than `X`.")
if not isinstance(variable_names, list):
raise ValueError("Parameter `variable_names` has to be of type `list`.")
if len(variable_names) != n_variables:
raise ValueError("Parameter `variable_names` has different number of variables than `X`.")
if not isinstance(scaling, str):
raise ValueError("Parameter `scaling` has to be of type `str`.")
if not isinstance(bandwidth_values, np.ndarray):
raise ValueError("Parameter `bandwidth_values` has to be of type `numpy.ndarray`.")
if target_variables is not None:
if not isinstance(target_variables, np.ndarray):
raise ValueError("Parameter `target_variables` has to be of type `numpy.ndarray`.")
try:
(n_d_hat_observations, n_target_variables) = np.shape(target_variables)
target_variables_names = ['X' + str(i) for i in range(0,n_target_variables)]
except:
raise ValueError("Parameter `target_variables` has to have shape `(n_observations,n_target_variables)`.")
if n_d_hat_observations != n_observations_source:
raise ValueError("Parameter `target_variables` has different number of observations than `X_source`.")
if not isinstance(add_transformed_source, bool):
raise ValueError("Parameter `add_transformed_source` has to be of type `bool`.")
if target_variables is None:
if not add_transformed_source:
raise ValueError("Either `target_variables` has to be specified or `add_transformed_source` has to be set to True.")
if not isinstance(target_manifold_dimensionality, int):
raise ValueError("Parameter `target_manifold_dimensionality` has to be of type `int`.")
if bootstrap_variables is not None:
if not isinstance(bootstrap_variables, list):
raise ValueError("Parameter `bootstrap_variables` has to be of type `list`.")
if penalty_function is not None:
if not isinstance(penalty_function, str):
raise ValueError("Parameter `penalty_function` has to be of type `str`.")
if penalty_function not in __penalty_functions:
raise ValueError("Parameter `penalty_function` has to be one of the following: 'peak', 'sigma', 'log-sigma-over-peak'.")
if not isinstance(norm, str):
raise ValueError("Parameter `norm` has to be of type `str`.")
if norm not in __norms:
raise ValueError("Parameter `norm` has to be one of the following: 'average', 'cumulative', 'max', 'median', 'min'.")
if not isinstance(integrate_to_peak, bool):
raise ValueError("Parameter `integrate_to_peak` has to be of type `bool`.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
variables_indices = [i for i in range(0,n_variables)]
costs = []
# Automatic bootstrapping: -------------------------------------------------
if bootstrap_variables is None:
if verbose: print('Automatic bootstrapping...\n')
bootstrap_cost_function = []
bootstrap_tic = time.perf_counter()
for i_variable in variables_indices:
if verbose: print('\tCurrently checking variable:\t' + variable_names[i_variable])
PCs = X[:,[i_variable]]
PC_sources = X_source[:,[i_variable]]
if target_variables is None:
depvars = cp.deepcopy(PC_sources)
depvar_names = ['SZ1']
else:
if add_transformed_source:
depvars = np.hstack((PC_sources, target_variables))
depvar_names = ['SZ1'] + target_variables_names
else:
depvars = target_variables
depvar_names = target_variables_names
bootstrap_variance_data = compute_normalized_variance(PCs, depvars, depvar_names=depvar_names, bandwidth_values=bandwidth_values)
bootstrap_area = cost_function_normalized_variance_derivative(bootstrap_variance_data, penalty_function=penalty_function, norm=norm, integrate_to_peak=integrate_to_peak)
if verbose: print('\tCost:\t%.4f' % bootstrap_area)
bootstrap_cost_function.append(bootstrap_area)
# Find a single best variable to bootstrap with:
(best_bootstrap_variable_index, ) = np.where(np.array(bootstrap_cost_function)==np.min(bootstrap_cost_function))
best_bootstrap_variable_index = int(best_bootstrap_variable_index)
costs.append(np.min(bootstrap_cost_function))
bootstrap_variables = [best_bootstrap_variable_index]
if verbose: print('\n\tVariable ' + variable_names[best_bootstrap_variable_index] + ' will be used as bootstrap.\n\tCost:\t%.4f' % np.min(bootstrap_cost_function) + '\n')
bootstrap_toc = time.perf_counter()
if verbose: print(f'Boostrapping time: {(bootstrap_toc - bootstrap_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
# Use user-defined bootstrapping: -----------------------------------------
else:
# Manifold dimensionality needs a fix here!
if verbose: print('User-defined bootstrapping...\n')
bootstrap_cost_function = []
bootstrap_tic = time.perf_counter()
if len(bootstrap_variables) < target_manifold_dimensionality:
n_components = len(bootstrap_variables)
else:
n_components = cp.deepcopy(target_manifold_dimensionality)
if verbose: print('\tUser-defined bootstrapping will be performed for a ' + str(n_components) + '-dimensional manifold.')
bootstrap_pca = reduction.PCA(X[:,bootstrap_variables], scaling=scaling, n_components=n_components)
PCs = bootstrap_pca.transform(X[:,bootstrap_variables])
PC_sources = bootstrap_pca.transform(X_source[:,bootstrap_variables], nocenter=True)
if target_variables is None:
depvars = cp.deepcopy(PC_sources)
depvar_names = ['SZ' + str(i) for i in range(0,n_components)]
else:
if add_transformed_source:
depvars = np.hstack((PC_sources, target_variables))
depvar_names = depvar_names = ['SZ' + str(i) for i in range(0,n_components)] + target_variables_names
else:
depvars = target_variables
depvar_names = target_variables_names
bootstrap_variance_data = compute_normalized_variance(PCs, depvars, depvar_names=depvar_names, bandwidth_values=bandwidth_values)
bootstrap_area = cost_function_normalized_variance_derivative(bootstrap_variance_data, penalty_function=penalty_function, norm=norm, integrate_to_peak=integrate_to_peak)
bootstrap_cost_function.append(bootstrap_area)
costs.append(bootstrap_area)
if verbose: print('\n\tVariable(s) ' + ', '.join([variable_names[i] for i in bootstrap_variables]) + ' will be used as bootstrap\n\tCost:\t%.4f' % np.min(bootstrap_area) + '\n')
bootstrap_toc = time.perf_counter()
if verbose: print(f'Boostrapping time: {(bootstrap_toc - bootstrap_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
# Iterate the algorithm starting from the bootstrap selection: -------------
if verbose: print('Optimizing...\n')
total_tic = time.perf_counter()
ordered_variables = [i for i in bootstrap_variables]
remaining_variables_list = [i for i in range(0,n_variables) if i not in bootstrap_variables]
previous_area = np.min(bootstrap_cost_function)
loop_counter = 0
while len(remaining_variables_list) > 0:
iteration_tic = time.perf_counter()
loop_counter += 1
if verbose:
print('Iteration No.' + str(loop_counter))
print('Currently adding variables from the following list: ')
print([variable_names[i] for i in remaining_variables_list])
current_cost_function = []
for i_variable in remaining_variables_list:
if len(ordered_variables) < target_manifold_dimensionality:
n_components = len(ordered_variables) + 1
else:
n_components = cp.deepcopy(target_manifold_dimensionality)
if verbose: print('\tCurrently added variable: ' + variable_names[i_variable])
current_variables_list = ordered_variables + [i_variable]
pca = reduction.PCA(X[:,current_variables_list], scaling=scaling, n_components=n_components)
PCs = pca.transform(X[:,current_variables_list])
PC_sources = pca.transform(X_source[:,current_variables_list], nocenter=True)
if target_variables is None:
depvars = cp.deepcopy(PC_sources)
depvar_names = ['SZ' + str(i) for i in range(0,n_components)]
else:
if add_transformed_source:
depvars = np.hstack((PC_sources, target_variables))
depvar_names = depvar_names = ['SZ' + str(i) for i in range(0,n_components)] + target_variables_names
else:
depvars = target_variables
depvar_names = target_variables_names
current_variance_data = compute_normalized_variance(PCs, depvars, depvar_names=depvar_names, bandwidth_values=bandwidth_values)
current_derivative, current_sigma, _ = normalized_variance_derivative(current_variance_data)
current_area = cost_function_normalized_variance_derivative(current_variance_data, penalty_function=penalty_function, norm=norm, integrate_to_peak=integrate_to_peak)
if verbose: print('\tCost:\t%.4f' % current_area)
current_cost_function.append(current_area)
if current_area <= previous_area:
if verbose: print(colored('\tSAME OR BETTER', 'green'))
else:
if verbose: print(colored('\tWORSE', 'red'))
min_area = np.min(current_cost_function)
(best_variable_index, ) = np.where(np.array(current_cost_function)==min_area)
try:
best_variable_index = int(best_variable_index)
except:
best_variable_index = int(best_variable_index[0])
if verbose: print('\n\tVariable ' + variable_names[remaining_variables_list[best_variable_index]] + ' is added.\n\tCost:\t%.4f' % min_area + '\n')
ordered_variables.append(remaining_variables_list[best_variable_index])
remaining_variables_list = [i for i in range(0,n_variables) if i not in ordered_variables]
if min_area <= previous_area:
previous_area = min_area
costs.append(min_area)
iteration_toc = time.perf_counter()
if verbose: print(f'\tIteration time: {(iteration_toc - iteration_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
# Compute the optimal subset where the cost is minimized: ------------------
(min_cost_function_index, ) = np.where(costs==np.min(costs))
try:
min_cost_function_index = int(min_cost_function_index)
except:
min_cost_function_index = int(min_cost_function_index[0])
if min_cost_function_index+1 < target_manifold_dimensionality:
selected_variables = list(np.array(ordered_variables)[0:target_manifold_dimensionality])
else:
selected_variables = list(np.array(ordered_variables)[0:min_cost_function_index+1])
if verbose:
print('Ordered variables:')
print(', '.join([variable_names[i] for i in ordered_variables]))
print(ordered_variables)
print('Final cost: %.4f' % min_area)
print('\nSelected variables:')
print(', '.join([variable_names[i] for i in selected_variables]))
print(selected_variables)
print('Lowest cost: %.4f' % previous_area)
total_toc = time.perf_counter()
if verbose: print(f'\nOptimization time: {(total_toc - total_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
return ordered_variables, selected_variables, costs
# ------------------------------------------------------------------------------
def manifold_informed_backward_elimination(X, X_source, variable_names, scaling, bandwidth_values, target_variables=None, add_transformed_source=True, source_space=None, target_manifold_dimensionality=3, penalty_function=None, norm='max', integrate_to_peak=False, verbose=False):
"""
Manifold-informed feature selection algorithm based on backward elimination. The goal of the algorithm is to
select a meaningful subset of the original variables such that
undesired behaviors on a PCA-derived manifold of a given dimensionality are minimized.
The algorithm uses the cost function, :math:`\\mathcal{L}`, based on minimizing the area under the normalized variance derivatives curves, :math:`\\hat{\\mathcal{D}}(\\sigma)`,
for the selected :math:`n_{dep}` dependent variables (as per ``cost_function_normalized_variance_derivative`` function).
The algorithm iterates, removing another variable that has an effect of decreasing the cost the most at each iteration.
The original variables in a data set get ordered according to their effect
on the manifold topology. Assuming that the original data set is composed of :math:`Q` variables,
the first output is a list of indices of the ordered
original variables, :math:`\\mathbf{X} = [X_1, X_2, \\dots, X_Q]`. The second output is a list of indices of the selected
subset of the original variables, :math:`\\mathbf{X}_S = [X_1, X_2, \\dots, X_n]`, that correspond to the minimum cost, :math:`\\mathcal{L}`.
.. note::
The algorithm can be very expensive (for large data sets) due to multiple computations of the normalized variance derivative.
Try running it on multiple cores or on a sampled data set.
In case the algorithm breaks when not being able to determine the peak
location, try increasing the range in the ``bandwidth_values`` parameter.
**Example:**
.. code:: python
from PCAfold import manifold_informed_backward_elimination
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,10)
X_source = np.random.rand(100,10)
# Define original variables to add to the optimization:
target_variables = X[:,0:3]
# Specify variables names
variable_names = ['X_' + str(i) for i in range(0,10)]
# Specify the bandwidth values to compute the optimization on:
bandwidth_values = np.logspace(-4, 2, 50)
# Run the subset selection algorithm:
(ordered, selected, costs) = manifold_informed_backward_elimination(X,
X_source,
variable_names,
scaling='auto',
bandwidth_values=bandwidth_values,
target_variables=target_variables,
add_transformed_source=True,
target_manifold_dimensionality=2,
penalty_function='peak',
norm='max',
integrate_to_peak=True,
verbose=True)
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param X_source:
``numpy.ndarray`` specifying the source terms, :math:`\\mathbf{S_X}`, corresponding to the state-space
variables in :math:`\\mathbf{X}`. This parameter is applicable to data sets
representing reactive flows. More information can be found in :cite:`Sutherland2009`. It should be of size ``(n_observations,n_variables)``.
:param variable_names:
``list`` of ``str`` specifying variables names. Order of names in the ``variable_names`` list should match the order of variables (columns) in ``X``.
:param scaling: (optional)
``str`` specifying the scaling methodology. It can be one of the following:
``'none'``, ``''``, ``'auto'``, ``'std'``, ``'pareto'``, ``'vast'``, ``'range'``, ``'0to1'``,
``'-1to1'``, ``'level'``, ``'max'``, ``'poisson'``, ``'vast_2'``, ``'vast_3'``, ``'vast_4'``.
:param bandwidth_values:
``numpy.ndarray`` specifying the bandwidth values, :math:`\\sigma`, for :math:`\\hat{\\mathcal{D}}(\\sigma)` computation.
:param target_variables: (optional)
``numpy.ndarray`` specifying the dependent variables that should be used in :math:`\\hat{\\mathcal{D}}(\\sigma)` computation. It should be of size ``(n_observations,n_target_variables)``.
:param add_transformed_source: (optional)
``bool`` specifying if the PCA-transformed source terms of the state-space variables should be added in :math:`\\hat{\\mathcal{D}}(\\sigma)` computation, alongside the user-defined dependent variables.
:param source_space: (optional)
``str`` specifying the space to which the PC source terms should be transformed before computing the cost. It can be one of the following: ``symlog``, ``continuous-symlog``, ``original-and-symlog``, ``original-and-continuous-symlog``. If set to ``None``, PC source terms are kept in their original PCA-space.
:param target_manifold_dimensionality: (optional)
``int`` specifying the target dimensionality of the PCA manifold.
:param penalty_function: (optional)
``str`` specifying the weighting applied to each area.
Set ``penalty_function='peak'`` to weight each area by the rightmost peak location, :math:`\\sigma_{peak, i}`, for the :math:`i^{th}` dependent variable.
Set ``penalty_function='sigma'`` to weight each area continuously by the bandwidth.
Set ``penalty_function='log-sigma-over-peak'`` to weight each area continuously by the :math:`\\log_{10}` -transformed bandwidth, normalized by the right most peak location, :math:`\\sigma_{peak, i}`.
If ``penalty_function=None``, the area is not weighted.
:param norm: (optional)
``str`` specifying the norm to apply for all areas :math:`A_i`. ``norm='average'`` uses an arithmetic average, ``norm='max'`` uses the :math:`L_{\\infty}` norm,
``norm='median'`` uses a median area, ``norm='cumulative'`` uses a cumulative area and ``norm='min'`` uses a minimum area.
:param integrate_to_peak: (optional)
``bool`` specifying whether an individual area for the :math:`i^{th}` dependent variable should be computed only up the the rightmost peak location.
:param verbose: (optional)
``bool`` for printing verbose details.
:return:
- **ordered_variables** - ``list`` specifying the indices of the ordered variables.
- **selected_variables** - ``list`` specifying the indices of the selected variables that correspond to the minimum cost :math:`\\mathcal{L}`.
- **optimized_cost** - ``float`` specifying the cost corresponding to the optimized subset.
- **costs** - ``list`` specifying the costs, :math:`\\mathcal{L}`, from each iteration.
"""
__penalty_functions = ['peak', 'sigma', 'log-sigma-over-peak']
__norms = ['average', 'cumulative', 'max', 'median', 'min']
__source_spaces = ['symlog', 'continuous-symlog', 'original-and-symlog', 'original-and-continuous-symlog']
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have shape `(n_observations,n_variables)`.")
if not isinstance(X_source, np.ndarray):
raise ValueError("Parameter `X_source` has to be of type `numpy.ndarray`.")
try:
(n_observations_source, n_variables_source) = np.shape(X_source)
except:
raise ValueError("Parameter `X_source` has to have shape `(n_observations,n_variables)`.")
if n_variables_source != n_variables:
raise ValueError("Parameter `X_source` has different number of variables than `X`.")
# TODO: In the future, we might want to allow different number of observations, there is no reason why they should be equal.
if n_observations_source != n_observations:
raise ValueError("Parameter `X_source` has different number of observations than `X`.")
if not isinstance(variable_names, list):
raise ValueError("Parameter `variable_names` has to be of type `list`.")
if len(variable_names) != n_variables:
raise ValueError("Parameter `variable_names` has different number of variables than `X`.")
if not isinstance(scaling, str):
raise ValueError("Parameter `scaling` has to be of type `str`.")
if not isinstance(bandwidth_values, np.ndarray):
raise ValueError("Parameter `bandwidth_values` has to be of type `numpy.ndarray`.")
if target_variables is not None:
if not isinstance(target_variables, np.ndarray):
raise ValueError("Parameter `target_variables` has to be of type `numpy.ndarray`.")
try:
(n_d_hat_observations, n_target_variables) = np.shape(target_variables)
target_variables_names = ['X' + str(i) for i in range(0,n_target_variables)]
except:
raise ValueError("Parameter `target_variables` has to have shape `(n_observations,n_target_variables)`.")
if n_d_hat_observations != n_observations_source:
raise ValueError("Parameter `target_variables` has different number of observations than `X_source`.")
if not isinstance(add_transformed_source, bool):
raise ValueError("Parameter `add_transformed_source` has to be of type `bool`.")
if target_variables is None:
if not add_transformed_source:
raise ValueError("Either `target_variables` has to be specified or `add_transformed_source` has to be set to True.")
if source_space is not None:
if not isinstance(source_space, str):
raise ValueError("Parameter `source_space` has to be of type `str`.")
if source_space.lower() not in __source_spaces:
raise ValueError("Parameter `source_space` has to be one of the following: symlog`, `continuous-symlog`.")
if not isinstance(target_manifold_dimensionality, int):
raise ValueError("Parameter `target_manifold_dimensionality` has to be of type `int`.")
if penalty_function is not None:
if not isinstance(penalty_function, str):
raise ValueError("Parameter `penalty_function` has to be of type `str`.")
if penalty_function not in __penalty_functions:
raise ValueError("Parameter `penalty_function` has to be one of the following: 'peak', 'sigma', 'log-sigma-over-peak'.")
if not isinstance(norm, str):
raise ValueError("Parameter `norm` has to be of type `str`.")
if norm not in __norms:
raise ValueError("Parameter `norm` has to be one of the following: 'average', 'cumulative', 'max', 'median', 'min'.")
if not isinstance(integrate_to_peak, bool):
raise ValueError("Parameter `integrate_to_peak` has to be of type `bool`.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
costs = []
if verbose: print('Optimizing...\n')
if verbose:
if add_transformed_source is not None:
if source_space is not None:
print('PC source terms will be assessed in the ' + source_space + ' space.\n')
total_tic = time.perf_counter()
remaining_variables_list = [i for i in range(0,n_variables)]
ordered_variables = []
loop_counter = 0
# Iterate the algorithm: ---------------------------------------------------
while len(remaining_variables_list) > target_manifold_dimensionality:
iteration_tic = time.perf_counter()
loop_counter += 1
if verbose:
print('Iteration No.' + str(loop_counter))
print('Currently eliminating variable from the following list: ')
print([variable_names[i] for i in remaining_variables_list])
current_cost_function = []
for i_variable in remaining_variables_list:
if verbose: print('\tCurrently eliminated variable: ' + variable_names[i_variable])
# Consider a subset with all variables but the currently eliminated one:
current_variables_list = [i for i in remaining_variables_list if i != i_variable]
if verbose:
print('\tRunning PCA for a subset:')
print('\t' + ', '.join([variable_names[i] for i in current_variables_list]))
pca = reduction.PCA(X[:,current_variables_list], scaling=scaling, n_components=target_manifold_dimensionality)
PCs = pca.transform(X[:,current_variables_list])
(PCs, _, _) = preprocess.center_scale(PCs, '-1to1')
if add_transformed_source:
PC_sources = pca.transform(X_source[:,current_variables_list], nocenter=True)
if source_space is not None:
if source_space == 'original-and-symlog':
transformed_PC_sources = preprocess.log_transform(PC_sources, method='symlog', threshold=1.e-4)
elif source_space == 'original-and-continuous-symlog':
transformed_PC_sources = preprocess.log_transform(PC_sources, method='continuous-symlog', threshold=1.e-4)
else:
transformed_PC_sources = preprocess.log_transform(PC_sources, method=source_space, threshold=1.e-4)
if target_variables is None:
if source_space == 'original-and-symlog' or source_space == 'original-and-continuous-symlog':
depvars = np.hstack((PC_sources, transformed_PC_sources))
depvar_names = ['SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + ['symlog-SZ' + str(i) for i in range(0,target_manifold_dimensionality)]
elif source_space == 'symlog' or source_space == 'continuous-symlog':
depvars = cp.deepcopy(transformed_PC_sources)
depvar_names = ['symlog-SZ' + str(i) for i in range(0,target_manifold_dimensionality)]
else:
depvars = cp.deepcopy(PC_sources)
depvar_names = ['SZ' + str(i) for i in range(0,target_manifold_dimensionality)]
else:
if add_transformed_source:
if source_space == 'original-and-symlog' or source_space == 'original-and-continuous-symlog':
depvars = np.hstack((PC_sources, transformed_PC_sources, target_variables))
depvar_names = ['SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + ['symlog-SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + target_variables_names
elif source_space == 'symlog' or source_space == 'continuous-symlog':
depvars = np.hstack((transformed_PC_sources, target_variables))
depvar_names = ['symlog-SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + target_variables_names
else:
depvars = np.hstack((PC_sources, target_variables))
depvar_names = ['SZ' + str(i) for i in range(0,target_manifold_dimensionality)] + target_variables_names
else:
depvars = cp.deepcopy(target_variables)
depvar_names = cp.deepcopy(target_variables_names)
current_variance_data = compute_normalized_variance(PCs, depvars, depvar_names=depvar_names, scale_unit_box = False, bandwidth_values=bandwidth_values)
current_area = cost_function_normalized_variance_derivative(current_variance_data, penalty_function=penalty_function, norm=norm, integrate_to_peak=integrate_to_peak)
if verbose: print('\tCost:\t%.4f' % current_area)
current_cost_function.append(current_area)
# Starting from the second iteration, we can make a comparison with the previous iteration's results:
if loop_counter > 1:
if current_area <= previous_area:
if verbose: print(colored('\tSAME OR BETTER', 'green'))
else:
if verbose: print(colored('\tWORSE', 'red'))
min_area = np.min(current_cost_function)
costs.append(min_area)
# Search for the variable whose removal will decrease the cost the most:
(worst_variable_index, ) = np.where(np.array(current_cost_function)==min_area)
# This handles cases where there are multiple minima with the same minimum cost value:
try:
worst_variable_index = int(worst_variable_index)
except:
worst_variable_index = int(worst_variable_index[0])
if verbose: print('\n\tVariable ' + variable_names[remaining_variables_list[worst_variable_index]] + ' is removed.\n\tCost:\t%.4f' % min_area + '\n')
# Append removed variable in the ascending order, this list is later flipped to have variables ordered from most to least important:
ordered_variables.append(remaining_variables_list[worst_variable_index])
# Create a new list of variables to loop over at the next iteration:
remaining_variables_list = [i for i in range(0,n_variables) if i not in ordered_variables]
if loop_counter > 1:
if min_area <= previous_area:
previous_area = min_area
else:
previous_area = min_area
iteration_toc = time.perf_counter()
if verbose: print(f'\tIteration time: {(iteration_toc - iteration_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
# Compute the optimal subset where the overal cost from all iterations is minimized: ------------------
# One last time remove the worst variable:
del current_cost_function[worst_variable_index]
for i in remaining_variables_list:
ordered_variables.append(i)
for i in range(0,len(remaining_variables_list)):
costs.append(current_cost_function[i])
# Invert lists to have variables ordered from most to least important:
ordered_variables = ordered_variables[::-1]
costs = costs[::-1]
(min_cost_function_index, ) = np.where(costs==np.min(costs))
# This handles cases where there are multiple minima with the same minimum cost value:
try:
min_cost_function_index = int(min_cost_function_index)
except:
min_cost_function_index = int(min_cost_function_index[0])
selected_variables = list(np.array(ordered_variables)[0:min_cost_function_index])
optimized_cost = costs[min_cost_function_index]
if verbose:
print('Ordered variables:')
print(', '.join([variable_names[i] for i in ordered_variables]))
print(ordered_variables)
print('Final cost: %.4f' % min_area)
print('\nSelected variables:')
print(', '.join([variable_names[i] for i in selected_variables]))
print(selected_variables)
print('Lowest cost: %.4f' % optimized_cost)
total_toc = time.perf_counter()
if verbose: print(f'\nOptimization time: {(total_toc - total_tic)/60:0.1f} minutes.' + '\n' + '-'*50)
return ordered_variables, selected_variables, optimized_cost, costs
################################################################################
#
# Regression assessment
#
################################################################################
class RegressionAssessment:
"""
Wrapper class for storing all regression assessment metrics for a given
regression solution given by the observed dependent variables, :math:`\\pmb{\\phi}_o`,
and the predicted dependent variables, :math:`\\pmb{\\phi}_p`.
**Example:**
.. code:: python
from PCAfold import PCA, RegressionAssessment
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,3)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='auto', n_components=2)
# Approximate the data set:
X_rec = pca_X.reconstruct(pca_X.transform(X))
# Instantiate RegressionAssessment class object:
regression_metrics = RegressionAssessment(X, X_rec)
# Access mean absolute error values:
MAE = regression_metrics.mean_absolute_error
In addition, all stratified regression metrics can be computed on a single variable:
.. code:: python
from PCAfold import variable_bins
# Generate bins:
(idx, bins_borders) = variable_bins(X[:,0], k=5, verbose=False)
# Instantiate RegressionAssessment class object:
stratified_regression_metrics = RegressionAssessment(X[:,0], X_rec[:,0], idx=idx)
# Access stratified mean absolute error values:
stratified_MAE = stratified_regression_metrics.stratified_mean_absolute_error
:param observed:
``numpy.ndarray`` specifying the observed values of dependent variables, :math:`\\pmb{\\phi}_o`. It should be of size ``(n_observations,)`` or ``(n_observations,n_variables)``.
:param predicted:
``numpy.ndarray`` specifying the predicted values of dependent variables, :math:`\\pmb{\\phi}_p`. It should be of size ``(n_observations,)`` or ``(n_observations,n_variables)``.
:param idx:
``numpy.ndarray`` of cluster classifications. It should be of size ``(n_observations,)`` or ``(n_observations,1)``.
:param variable_names: (optional)
``list`` of ``str`` specifying variable names.
:param use_global_mean: (optional)
``bool`` specifying if global mean of the observed variable should be used as a reference in :math:`R^2` calculation.
:param norm:
``str`` specifying the normalization, :math:`d_{norm}`, for NRMSE computation. It can be one of the following: ``std``, ``range``, ``root_square_mean``, ``root_square_range``, ``root_square_std``, ``abs_mean``.
:param use_global_norm: (optional)
``bool`` specifying if global norm of the observed variable should be used in NRMSE calculation.
:param tolerance:
``float`` specifying the tolerance for GDE computation.
**Attributes:**
- **coefficient_of_determination** - (read only) ``numpy.ndarray`` specifying the coefficient of determination, :math:`R^2`, values. It has size ``(1,n_variables)``.
- **mean_absolute_error** - (read only) ``numpy.ndarray`` specifying the mean absolute error (MAE) values. It has size ``(1,n_variables)``.
- **mean_squared_error** - (read only) ``numpy.ndarray`` specifying the mean squared error (MSE) values. It has size ``(1,n_variables)``.
- **root_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the root mean squared error (RMSE) values. It has size ``(1,n_variables)``.
- **normalized_root_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the normalized root mean squared error (NRMSE) values. It has size ``(1,n_variables)``.
- **good_direction_estimate** - (read only) ``float`` specifying the good direction estimate (GDE) value, treating the entire :math:`\\pmb{\\phi}_o` and :math:`\\pmb{\\phi}_p` as vectors. Note that if a single dependent variable is passed, GDE cannot be computed and is set to ``NaN``.
If ``idx`` has been specified:
- **stratified_coefficient_of_determination** - (read only) ``numpy.ndarray`` specifying the coefficient of determination, :math:`R^2`, values. It has size ``(1,n_variables)``.
- **stratified_mean_absolute_error** - (read only) ``numpy.ndarray`` specifying the mean absolute error (MAE) values. It has size ``(1,n_variables)``.
- **stratified_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the mean squared error (MSE) values. It has size ``(1,n_variables)``.
- **stratified_root_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the root mean squared error (RMSE) values. It has size ``(1,n_variables)``.
- **stratified_normalized_root_mean_squared_error** - (read only) ``numpy.ndarray`` specifying the normalized root mean squared error (NRMSE) values. It has size ``(1,n_variables)``.
"""
def __init__(self, observed, predicted, idx=None, variable_names=None, use_global_mean=False, norm='std', use_global_norm=False, tolerance=0.05):
if not isinstance(observed, np.ndarray):
raise ValueError("Parameter `observed` has to be of type `numpy.ndarray`.")
try:
(n_observed,) = np.shape(observed)
n_var_observed = 1
observed = observed[:,None]
except:
(n_observed, n_var_observed) = np.shape(observed)
if not isinstance(predicted, np.ndarray):
raise ValueError("Parameter `predicted` has to be of type `numpy.ndarray`.")
try:
(n_predicted,) = np.shape(predicted)
n_var_predicted = 1
predicted = predicted[:,None]
except:
(n_predicted, n_var_predicted) = np.shape(predicted)
if n_observed != n_predicted:
raise ValueError("Parameter `observed` has different number of elements than `predicted`.")
if n_var_observed != n_var_predicted:
raise ValueError("Parameter `observed` has different number of elements than `predicted`.")
self.__n_variables = n_var_observed
if idx is not None:
if isinstance(idx, np.ndarray):
if not all(isinstance(i, np.integer) for i in idx.ravel()):
raise ValueError("Parameter `idx` can only contain integers.")
else:
raise ValueError("Parameter `idx` has to be of type `numpy.ndarray`.")
try:
(n_observations_idx, ) = np.shape(idx)
n_idx = 1
except:
(n_observations_idx, n_idx) = np.shape(idx)
if n_idx != 1:
raise ValueError("Parameter `idx` has to have size `(n_observations,)` or `(n_observations,1)`.")
if n_observations_idx != n_observed:
raise ValueError('Vector of cluster classifications `idx` has different number of observations than the original data set `X`.')
if n_var_observed != 1:
raise ValueError('Stratified regression metrics can only be computed on a single vector.')
self.__n_clusters = len(np.unique(idx))
self.__cluster_populations = preprocess.get_populations(idx)
self.__cluster_min = []
self.__cluster_max = []
for i in range(0,self.__n_clusters):
(cluster_indices, ) = np.where(idx==i)
self.__cluster_min.append(np.min(observed[cluster_indices,:]))
self.__cluster_max.append(np.max(observed[cluster_indices,:]))
if not isinstance(use_global_mean, bool):
raise ValueError("Parameter `use_global_mean` has to be a boolean.")
if variable_names is not None:
if not isinstance(variable_names, list):
raise ValueError("Parameter `variable_names` has to be of type `list`.")
else:
if self.__n_variables != len(variable_names):
raise ValueError("Parameter `variable_names` has different number of variables than `observed` and `predicted`.")
else:
variable_names = []
for i in range(0,self.__n_variables):
variable_names.append('X' + str(i+1))
self.__variable_names = variable_names
self.__coefficient_of_determination_matrix = np.ones((1,self.__n_variables))
self.__mean_absolute_error_matrix = np.ones((1,self.__n_variables))
self.__mean_squared_error_matrix = np.ones((1,self.__n_variables))
self.__root_mean_squared_error_matrix = np.ones((1,self.__n_variables))
self.__normalized_root_mean_squared_error_matrix = np.ones((1,self.__n_variables))
if n_var_observed > 1:
_, self.__good_direction_estimate_value = good_direction_estimate(observed, predicted, tolerance=tolerance)
self.__good_direction_estimate_matrix = self.__good_direction_estimate_value * np.ones((1,self.__n_variables))
else:
self.__good_direction_estimate_value = np.NAN
self.__good_direction_estimate_matrix = self.__good_direction_estimate_value * np.ones((1,self.__n_variables))
for i in range(0,self.__n_variables):
self.__coefficient_of_determination_matrix[0,i] = coefficient_of_determination(observed[:,i], predicted[:,i])
self.__mean_absolute_error_matrix[0,i] = mean_absolute_error(observed[:,i], predicted[:,i])
self.__mean_squared_error_matrix[0,i] = mean_squared_error(observed[:,i], predicted[:,i])
self.__root_mean_squared_error_matrix[0,i] = root_mean_squared_error(observed[:,i], predicted[:,i])
self.__normalized_root_mean_squared_error_matrix[0,i] = normalized_root_mean_squared_error(observed[:,i], predicted[:,i], norm=norm)
if idx is not None:
self.__stratified_coefficient_of_determination = stratified_coefficient_of_determination(observed, predicted, idx=idx, use_global_mean=use_global_mean)
self.__stratified_mean_absolute_error = stratified_mean_absolute_error(observed, predicted, idx=idx)
self.__stratified_mean_squared_error = stratified_mean_squared_error(observed, predicted, idx=idx)
self.__stratified_root_mean_squared_error = stratified_root_mean_squared_error(observed, predicted, idx=idx)
self.__stratified_normalized_root_mean_squared_error = stratified_normalized_root_mean_squared_error(observed, predicted, idx=idx, norm=norm, use_global_norm=use_global_norm)
else:
self.__stratified_coefficient_of_determination = None
self.__stratified_mean_absolute_error = None
self.__stratified_mean_squared_error = None
self.__stratified_root_mean_squared_error = None
self.__stratified_normalized_root_mean_squared_error = None
@property
def coefficient_of_determination(self):
return self.__coefficient_of_determination_matrix
@property
def mean_absolute_error(self):
return self.__mean_absolute_error_matrix
@property
def mean_squared_error(self):
return self.__mean_squared_error_matrix
@property
def root_mean_squared_error(self):
return self.__root_mean_squared_error_matrix
@property
def normalized_root_mean_squared_error(self):
return self.__normalized_root_mean_squared_error_matrix
@property
def good_direction_estimate(self):
return self.__good_direction_estimate_value
@property
def stratified_coefficient_of_determination(self):
return self.__stratified_coefficient_of_determination
@property
def stratified_mean_absolute_error(self):
return self.__stratified_mean_absolute_error
@property
def stratified_mean_squared_error(self):
return self.__stratified_mean_squared_error
@property
def stratified_root_mean_squared_error(self):
return self.__stratified_root_mean_squared_error
@property
def stratified_normalized_root_mean_squared_error(self):
return self.__stratified_normalized_root_mean_squared_error
# ------------------------------------------------------------------------------
def print_metrics(self, table_format=['raw'], float_format='.4f', metrics=None, comparison=None):
"""
Prints regression assessment metrics as raw text, in ``tex`` format and/or as ``pandas.DataFrame``.
**Example:**
.. code:: python
from PCAfold import PCA, RegressionAssessment
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,3)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='auto', n_components=2)
# Approximate the data set:
X_rec = pca_X.reconstruct(pca_X.transform(X))
# Instantiate RegressionAssessment class object:
regression_metrics = RegressionAssessment(X, X_rec)
# Print regression metrics:
regression_metrics.print_metrics(table_format=['raw', 'tex', 'pandas'],
float_format='.4f',
metrics=['R2', 'NRMSE', 'GDE'])
.. note::
Adding ``'raw'`` to the ``table_format`` list will result in printing:
.. code-block:: text
-------------------------
X1
R2: 0.9900
NRMSE: 0.0999
GDE: 70.0000
-------------------------
X2
R2: 0.6126
NRMSE: 0.6224
GDE: 70.0000
-------------------------
X3
R2: 0.6368
NRMSE: 0.6026
GDE: 70.0000
Adding ``'tex'`` to the ``table_format`` list will result in printing:
.. code-block:: text
\\begin{table}[h!]
\\begin{center}
\\begin{tabular}{llll} \\toprule
& \\textit{X1} & \\textit{X2} & \\textit{X3} \\\\ \\midrule
R2 & 0.9900 & 0.6126 & 0.6368 \\\\
NRMSE & 0.0999 & 0.6224 & 0.6026 \\\\
GDE & 70.0000 & 70.0000 & 70.0000 \\\\
\\end{tabular}
\\caption{}\\label{}
\\end{center}
\\end{table}
Adding ``'pandas'`` to the ``table_format`` list (works well in Jupyter notebooks) will result in printing:
.. image:: ../images/generate-pandas-table.png
:width: 300
:align: center
Additionally, the current object of ``RegressionAssessment`` class can be compared with another object:
.. code:: python
from PCAfold import PCA, RegressionAssessment
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,3)
Y = np.random.rand(100,3)
# Instantiate PCA class object:
pca_X = PCA(X, scaling='auto', n_components=2)
pca_Y = PCA(Y, scaling='auto', n_components=2)
# Approximate the data set:
X_rec = pca_X.reconstruct(pca_X.transform(X))
Y_rec = pca_Y.reconstruct(pca_Y.transform(Y))
# Instantiate RegressionAssessment class object:
regression_metrics_X = RegressionAssessment(X, X_rec)
regression_metrics_Y = RegressionAssessment(Y, Y_rec)
# Print regression metrics:
regression_metrics_X.print_metrics(table_format=['raw', 'pandas'],
float_format='.4f',
metrics=['R2', 'NRMSE', 'GDE'],
comparison=regression_metrics_Y)
.. note::
Adding ``'raw'`` to the ``table_format`` list will result in printing:
.. code-block:: text
-------------------------
X1
R2: 0.9133 BETTER
NRMSE: 0.2944 BETTER
GDE: 67.0000 WORSE
-------------------------
X2
R2: 0.5969 WORSE
NRMSE: 0.6349 WORSE
GDE: 67.0000 WORSE
-------------------------
X3
R2: 0.6175 WORSE
NRMSE: 0.6185 WORSE
GDE: 67.0000 WORSE
Adding ``'pandas'`` to the ``table_format`` list (works well in Jupyter notebooks) will result in printing:
.. image:: ../images/generate-pandas-table-comparison.png
:width: 300
:align: center
:param table_format: (optional)
``list`` of ``str`` specifying the format(s) in which the table should be printed.
Strings can only be ``'raw'``, ``'tex'`` and/or ``'pandas'``.
:param float_format: (optional)
``str`` specifying the display format for the numerical entries inside the
table. By default it is set to ``'.4f'``.
:param metrics: (optional)
``list`` of ``str`` specifying which metrics should be printed. Strings can only be ``'R2'``, ``'MAE'``, ``'MSE'``, ``'RMSE'``, ``'NRMSE'``, ``'GDE'``.
If metrics is set to ``None``, all available metrics will be printed.
:param comparison: (optional)
object of ``RegressionAssessment`` class specifying the metrics that should be compared with the current regression metrics.
"""
__table_formats = ['raw', 'tex', 'pandas']
__metrics_names = ['R2', 'MAE', 'MSE', 'RMSE', 'NRMSE', 'GDE']
__metrics_dict = {'R2': self.__coefficient_of_determination_matrix,
'MAE': self.__mean_absolute_error_matrix,
'MSE': self.__mean_squared_error_matrix,
'RMSE': self.__root_mean_squared_error_matrix,
'NRMSE': self.__normalized_root_mean_squared_error_matrix,
'GDE': self.__good_direction_estimate_matrix}
if comparison is not None:
__comparison_metrics_dict = {'R2': comparison.coefficient_of_determination,
'MAE': comparison.mean_absolute_error,
'MSE': comparison.mean_squared_error,
'RMSE': comparison.root_mean_squared_error,
'NRMSE': comparison.normalized_root_mean_squared_error,
'GDE': comparison.good_direction_estimate * np.ones_like(comparison.coefficient_of_determination)}
if not isinstance(table_format, list):
raise ValueError("Parameter `table_format` has to be of type `list`.")
for item in table_format:
if item not in __table_formats:
raise ValueError("Parameter `table_format` can only contain 'raw', 'tex' and/or 'pandas'.")
if not isinstance(float_format, str):
raise ValueError("Parameter `float_format` has to be of type `str`.")
if metrics is not None:
if not isinstance(metrics, list):
raise ValueError("Parameter `metrics` has to be of type `list`.")
for item in metrics:
if item not in __metrics_names:
raise ValueError("Parameter `metrics` can only be: 'R2', 'MAE', 'MSE', 'RMSE', 'NRMSE', 'GDE'.")
else:
metrics = __metrics_names
if comparison is None:
for item in set(table_format):
if item=='raw':
for i in range(0,self.__n_variables):
print('-'*25 + '\n' + self.__variable_names[i])
metrics_to_print = []
for metric in metrics:
metrics_to_print.append(__metrics_dict[metric][0,i])
for j in range(0,len(metrics)):
print(metrics[j] + ':\t' + ('%' + float_format) % metrics_to_print[j])
if item=='tex':
import pandas as pd
metrics_to_print = np.zeros_like(self.__coefficient_of_determination_matrix)
for metric in metrics:
metrics_to_print = np.vstack((metrics_to_print, __metrics_dict[metric]))
metrics_to_print = metrics_to_print[1::,:]
metrics_table = | pd.DataFrame(metrics_to_print, columns=self.__variable_names, index=metrics) | pandas.DataFrame |
import json
import numpy as np
import pytest
from pandas import DataFrame, Index, json_normalize
import pandas._testing as tm
from pandas.io.json._normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [
{
"country": "USA",
"states": [
{
"name": "California",
"cities": [
{"name": "San Francisco", "pop": 12345},
{"name": "Los Angeles", "pop": 12346},
],
},
{
"name": "Ohio",
"cities": [
{"name": "Columbus", "pop": 1234},
{"name": "Cleveland", "pop": 1236},
],
},
],
},
{
"country": "Germany",
"states": [
{"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},
{
"name": "Nordrhein-Westfalen",
"cities": [
{"name": "Duesseldorf", "pop": 1238},
{"name": "Koeln", "pop": 1239},
],
},
],
},
]
@pytest.fixture
def state_data():
return [
{
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
"info": {"governor": "<NAME>"},
"shortname": "FL",
"state": "Florida",
},
{
"counties": [
{"name": "Summit", "population": 1234},
{"name": "Cuyahoga", "population": 1337},
],
"info": {"governor": "<NAME>"},
"shortname": "OH",
"state": "Ohio",
},
]
@pytest.fixture
def author_missing_data():
return [
{"info": None},
{
"info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
"author_name": {"first": "Jane", "last_name": "Doe"},
},
]
@pytest.fixture
def missing_metadata():
return [
{
"name": "Alice",
"addresses": [
{
"number": 9562,
"street": "Morris St.",
"city": "Massillon",
"state": "OH",
"zip": 44646,
}
],
},
{
"addresses": [
{
"number": 8449,
"street": "Spring St.",
"city": "Elizabethton",
"state": "TN",
"zip": 37643,
}
]
},
]
@pytest.fixture
def max_level_test_input_data():
"""
input data to test json_normalize with max_level param
"""
return [
{
"CreatedBy": {"Name": "User001"},
"Lookup": {
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
"Image": {"a": "b"},
}
]
class TestJSONNormalize:
def test_simple_records(self):
recs = [
{"a": 1, "b": 2, "c": 3},
{"a": 4, "b": 5, "c": 6},
{"a": 7, "b": 8, "c": 9},
{"a": 10, "b": 11, "c": 12},
]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], "counties")
expected = DataFrame(state_data[0]["counties"])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, "counties")
expected = []
for rec in state_data:
expected.extend(rec["counties"])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, "counties", meta="state")
expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({"A": {"A": 1, "B": 2}})
expected = DataFrame([[1, 2]], columns=["A.A", "A.B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_")
expected = DataFrame([[1, 2]], columns=["A_A", "A_B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3")
expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(
deep_nested,
["states", "cities"],
meta=["country", ["states", "name"]],
sep="_",
)
expected = Index(["name", "pop", "country", "states_name"]).sort_values()
assert result.columns.sort_values().equals(expected)
def test_value_array_record_prefix(self):
# GH 21536
result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.")
expected = DataFrame([[1], [2]], columns=["Prefix.0"])
tm.assert_frame_equal(result, expected)
def test_nested_object_record_path(self):
# GH 22706
data = {
"state": "Florida",
"info": {
"governor": "<NAME>",
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
},
}
result = json_normalize(data, record_path=["info", "counties"])
expected = DataFrame(
[["Dade", 12345], ["Broward", 40000], ["<NAME>", 60000]],
columns=["name", "population"],
)
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(
deep_nested, ["states", "cities"], meta=["country", ["states", "name"]]
)
ex_data = {
"country": ["USA"] * 4 + ["Germany"] * 3,
"states.name": [
"California",
"California",
"Ohio",
"Ohio",
"Bayern",
"Nordrhein-Westfalen",
"Nordrhein-Westfalen",
],
"name": [
"<NAME>",
"Los Angeles",
"Columbus",
"Cleveland",
"Munich",
"Duesseldorf",
"Koeln",
],
"pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239],
}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [
{
"state": "Florida",
"shortname": "FL",
"info": {"governor": "<NAME>"},
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
},
{
"state": "Ohio",
"shortname": "OH",
"info": {"governor": "<NAME>"},
"counties": [
{"name": "Summit", "population": 1234},
{"name": "Cuyahoga", "population": 1337},
],
},
]
result = json_normalize(
data, "counties", ["state", "shortname", ["info", "governor"]]
)
ex_data = {
"name": ["Dade", "Broward", "<NAME>", "Summit", "Cuyahoga"],
"state": ["Florida"] * 3 + ["Ohio"] * 2,
"shortname": ["FL", "FL", "FL", "OH", "OH"],
"info.governor": ["<NAME>"] * 3 + ["<NAME>"] * 2,
"population": [12345, 40000, 60000, 1234, 1337],
}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [
{
"foo": "hello",
"bar": "there",
"data": [
{"foo": "something", "bar": "else"},
{"foo": "something2", "bar": "else2"},
],
}
]
msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix"
with pytest.raises(ValueError, match=msg):
json_normalize(data, "data", meta=["foo", "bar"])
result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta")
for val in ["metafoo", "metabar", "foo", "bar"]:
assert val in result
def test_meta_parameter_not_modified(self):
# GH 18610
data = [
{
"foo": "hello",
"bar": "there",
"data": [
{"foo": "something", "bar": "else"},
{"foo": "something2", "bar": "else2"},
],
}
]
COLUMNS = ["foo", "bar"]
result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta")
assert COLUMNS == ["foo", "bar"]
for val in ["metafoo", "metabar", "foo", "bar"]:
assert val in result
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], "counties")
expected = DataFrame(state_data[0]["counties"])
tm.assert_frame_equal(result, expected)
result = json_normalize(
state_data, "counties", meta="state", record_prefix="county_"
)
expected = []
for rec in state_data:
expected.extend(rec["counties"])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: "county_" + x)
expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
+ b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode("utf8")
testdata = {
b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1],
"sub.A": [1, 3],
"sub.B": [2, 4],
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
def test_missing_field(self, author_missing_data):
# GH20030:
result = json_normalize(author_missing_data)
ex_data = [
{
"info": np.nan,
"info.created_at": np.nan,
"info.last_updated": np.nan,
"author_name.first": np.nan,
"author_name.last_name": np.nan,
},
{
"info": None,
"info.created_at": "11/08/1993",
"info.last_updated": "26/05/2012",
"author_name.first": "Jane",
"author_name.last_name": "Doe",
},
]
expected = DataFrame(ex_data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"max_level,expected",
[
(
0,
[
{
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
"CreatedBy": {"Name": "User001"},
"Image": {"a": "b"},
},
{
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
"CreatedBy": {"Name": "User001"},
"Image": {"a": "b"},
},
],
),
(
1,
[
{
"TextField": "Some text",
"UserField.Id": "ID001",
"UserField.Name": "Name001",
"CreatedBy": {"Name": "User001"},
"Image": {"a": "b"},
},
{
"TextField": "Some text",
"UserField.Id": "ID001",
"UserField.Name": "Name001",
"CreatedBy": {"Name": "User001"},
"Image": {"a": "b"},
},
],
),
],
)
def test_max_level_with_records_path(self, max_level, expected):
# GH23843: Enhanced JSON normalize
test_input = [
{
"CreatedBy": {"Name": "User001"},
"Lookup": [
{
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
{
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
],
"Image": {"a": "b"},
"tags": [
{"foo": "something", "bar": "else"},
{"foo": "something2", "bar": "else2"},
],
}
]
result = json_normalize(
test_input,
record_path=["Lookup"],
meta=[["CreatedBy"], ["Image"]],
max_level=max_level,
)
expected_df = DataFrame(data=expected, columns=result.columns.values)
tm.assert_equal(expected_df, result)
def test_nested_flattening_consistent(self):
# see gh-21537
df1 = json_normalize([{"A": {"B": 1}}])
df2 = json_normalize({"dummy": [{"A": {"B": 1}}]}, "dummy")
# They should be the same.
tm.assert_frame_equal(df1, df2)
def test_nonetype_record_path(self, nulls_fixture):
# see gh-30148
# should not raise TypeError
result = json_normalize(
[
{"state": "Texas", "info": nulls_fixture},
{"state": "Florida", "info": [{"i": 2}]},
],
record_path=["info"],
)
expected = DataFrame({"i": 2}, index=[0])
tm.assert_equal(result, expected)
def test_non_interable_record_path_errors(self):
# see gh-30148
test_input = {"state": "Texas", "info": 1}
test_path = "info"
msg = (
f"{test_input} has non iterable value 1 for path {test_path}. "
"Must be iterable or null."
)
with pytest.raises(TypeError, match=msg):
json_normalize([test_input], record_path=[test_path])
def test_meta_non_iterable(self):
# GH 31507
data = """[{"id": 99, "data": [{"one": 1, "two": 2}]}]"""
result = json_normalize(json.loads(data), record_path=["data"], meta=["id"])
expected = DataFrame(
{"one": [1], "two": [2], "id": np.array([99], dtype=object)}
)
tm.assert_frame_equal(result, expected)
class TestNestedToRecord:
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2), dict(flat1=3, flat2=4)]
result = nested_to_record(recs)
expected = recs
assert result == expected
def test_one_level_deep_flattens(self):
data = dict(flat1=1, dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {"dict1.c": 1, "dict1.d": 2, "flat1": 1}
assert result == expected
def test_nested_flattens(self):
data = dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2))
result = nested_to_record(data)
expected = {
"dict1.c": 1,
"dict1.d": 2,
"flat1": 1,
"nested.d": 2,
"nested.e.c": 1,
"nested.e.d": 2,
}
assert result == expected
def test_json_normalize_errors(self, missing_metadata):
# GH14583:
# If meta keys are not always present a new option to set
# errors='ignore' has been implemented
msg = "Try running with errors='ignore' as key 'name' is not always present"
with pytest.raises(KeyError, match=msg):
json_normalize(
data=missing_metadata,
record_path="addresses",
meta="name",
errors="raise",
)
def test_missing_meta(self, missing_metadata):
# GH25468
# If metadata is nullable with errors set to ignore, the null values
# should be numpy.nan values
result = json_normalize(
data=missing_metadata, record_path="addresses", meta="name", errors="ignore"
)
ex_data = [
[9562, "Morris St.", "Massillon", "OH", 44646, "Alice"],
[8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan],
]
columns = ["city", "number", "state", "street", "zip", "name"]
columns = ["number", "street", "city", "state", "zip", "name"]
expected = | DataFrame(ex_data, columns=columns) | pandas.DataFrame |
"""
Collection of functions used for the stitching.
IMPORTANT:
The identification of the organization of the fovs in the composite image
can be simplified if the (0,0) coords of the stage/camera will
be set to the same position for all machine used in the analysis.
In our case we started running experiments with the coords not adjusted
so the position of (0,0) is different for all the machine that
are used to generate the data.
"""
from typing import *
import logging
import shutil
import copy
import itertools
import math
import pickle
import zarr
import sys
import operator
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from itertools import groupby
from pathlib import Path
from sklearn.neighbors import NearestNeighbors
import sklearn.linear_model as linmod
from skimage.feature import register_translation
from skimage import measure
from scipy.optimize import minimize
from pynndescent import NNDescent
from pysmFISH.logger_utils import selected_logger
from pysmFISH.fovs_registration import create_fake_image
from pysmFISH.data_models import Dataset
from pysmFISH import io
class organize_square_tiles():
"""Class designed to determine the tile organization and identify the coords of the
overlapping regions between the tiles.
IMPORTANT: The normalize_coords method should be adjusted according to the
setup of the microscope.
"""
def __init__(self, experiment_fpath:str,dataset: pd.DataFrame,
metadata:Dict,round_num:int):
"""Class initialization
Args:
experiment_fpath (str): Path to the experiment to process
dataset (pd.DataFrame): Properties of the images of the experiment
metadata (Dict): Metadata describing the experiment
round_num (int): Reference acquisition round number
"""
self.logger = selected_logger()
self.experiment_fpath = Path(experiment_fpath)
self.dataset = dataset
self.metadata = metadata
self.round_num = round_num
self.experiment_name = self.metadata['experiment_name']
self.stitching_channel = self.metadata['stitching_channel']
self.overlapping_percentage = int(self.metadata['overlapping_percentage']) / 100
self.pixel_size = self.metadata['pixel_microns']
self.img_width = self.metadata['img_width']
self.img_height = self.metadata['img_height']
logging.getLogger('matplotlib.font_manager').disabled = True
if self.img_width == self.img_height:
self.img_size = self.img_width
else:
self.logger.error(f'the images to stitch are not square')
sys.exit(f'the images to stitch are not square')
def extract_microscope_coords(self):
"""Method to extract images coords in the stage reference
system"""
selected = self.dataset.loc[self.dataset.round_num == self.round_num,
['round_num','fov_num','fov_acquisition_coords_x','fov_acquisition_coords_y']]
selected.drop_duplicates(subset=['fov_num'],inplace=True)
selected.sort_values(by='fov_num', ascending=True, inplace=True)
self.x_coords = selected.loc[:,'fov_acquisition_coords_x'].to_numpy()
self.y_coords = selected.loc[:,'fov_acquisition_coords_y'].to_numpy()
def normalize_coords(self):
"""
Normalize the coords according to how the stage/camera are set.
This function must be modified according to the stage/camera setup.
ROBOFISH1 has stage with x increasing left-> right and y top->bottom
------> (x)
|
|
V (y)
ROBOFISH2 has stage with x increasing right-> left and y top->bottom
(x) <------
|
|
V (y)
ROBOFISH3 has stage with x increasing left-> right and y bottom->top
^ (y)
|
|
------> (x)
Axis modifications steps:
(1) The reference system will be first converted to image style:
------> (x)
|
|
V (y)
This step will cause a change in the position of the reference corner
for each fov. After image acquisition the reference corner is top-left
however after converting the axis direction to image-style the reference corner
will change postion:
ROBOFISH1: top-left --> top-left
ROBOFISH2: top-left --> top-right
ROBOFISH3: top-left --> bottom-left
(2) The coords will be translated to (0,0)
(3) then to matrix (python) notation
------> (columns)
|
|
V (rows)
"""
# port the coords to image type coords
if self.metadata['machine'] == 'ROBOFISH2':
self.x_coords = - self.x_coords
self.reference_corner_fov_position = 'top-right'
elif self.metadata['machine'] == 'ROBOFISH3':
self.x_coords = - self.x_coords
self.y_coords = - self.y_coords
self.reference_corner_fov_position = 'bottom-left'
elif self.metadata['machine'] == 'ROBOFISH1':
self.reference_corner_fov_position = 'top-left'
elif self.metadata['machine'] == 'NOT_DEFINED':
self.logger.error(f'Need to define the specs for stitching NOT_DEFINED machine')
sys.exit(f'Need to define the specs for stitching NOT_DEFINED machine')
else:
self.logger.error(f'define the right machine used to collected the data')
sys.exit(f'define the right machine used to collected the data')
# shift the coords to reference point (0,0)
# consider that we get the top-right corner of the image as well
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
x_max = np.amax(self.x_coords)
y_max = np.amax(self.y_coords)
# Put the coords to zero
if x_min >=0 :
self.x_coords = self.x_coords - x_min
else:
self.x_coords = self.x_coords + np.abs(x_min)
if y_min>0:
self.y_coords = self.y_coords - y_min
else:
self.y_coords = self.y_coords + np.abs(y_min)
# if x_max >=0 :
# self.x_coords = self.x_coords - x_min
# else:
# self.x_coords = self.x_coords + np.abs(x_min)
# if y_max>0:
# self.y_coords = self.y_coords - y_min
# else:
# self.y_coords = self.y_coords + np.abs(y_min)
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
# move coords to pxl space
self.tile_corners_coords_pxl = adjusted_coords / self.pixel_size
# def save_graph_original_coords(self):
# to correct because I already converted the coords to image
# # Turn interactive plotting off
# saving_fpath = self.experiment_fpath / 'output_figures' / 'microscope_space_tiles_organization.png'
# plt.ioff()
# # Create image type axes
# labels = [str(nr) for nr in np.arange(self.x_coords.shape[0])]
# fig = plt.figure(figsize=(20,10))
# plt.plot(self.x_coords,self.y_coords,'or')
# for label, x, y in zip(labels, self.x_coords,self.y_coords):
# plt.annotate(
# label,
# xy=(x,y), xytext=(-2, 2),
# textcoords='offset points', ha='center', va='bottom',fontsize=12)
# plt.tight_layout()
# plt.savefig(saving_fpath)
def save_graph_image_space_coords(self):
"""Method used to save the organization of the tiles
"""
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'image_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.tile_corners_coords_pxl.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.gca().invert_yaxis()
plt.plot(self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0],'or')
for label, x, y in zip(labels, self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0]):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def identify_adjacent_tiles(self):
"""Method that use Nearest neighbors to identify the beighbouring tiles
"""
shift_percent_tolerance = 0.05
searching_radius = self.img_size - (self.img_size*self.overlapping_percentage) + (self.img_size*shift_percent_tolerance)
nn = NearestNeighbors(n_neighbors=5,radius=searching_radius, metric='euclidean')
nn.fit(self.tile_corners_coords_pxl)
self.dists, self.indices = nn.kneighbors(self.tile_corners_coords_pxl, return_distance=True)
def determine_overlapping_regions(self):
"""Method used to calculate the coords of the overlapping regions between the tiles.
"""
# remember that overlapping region can be an empty dictionary
self.overlapping_regions = {}
self.overlapping_order ={}
for idx in np.arange(self.indices.shape[0]):
self.overlapping_regions[idx] = {}
self.overlapping_order[idx] = {}
for idx in np.arange(self.indices.shape[0]):
# Determine the indices that identify the correct adjacent
processing_indices = self.indices[idx,:]
processing_dists = self.dists[idx,:]
ref_tile = processing_indices[0]
self.overlapping_regions[ref_tile] = {}
self.overlapping_order[ref_tile] = {}
trimmed_indices = processing_indices[1:]
trimmed_dists = processing_dists[1:]
idx_adj = np.where(trimmed_dists < self.img_size)
adj_tiles_id = trimmed_indices[idx_adj]
adj_cpls = [(ref_tile, adj_tile) for adj_tile in adj_tiles_id]
# remove pairs that are already selected
only_new_cpls = [cpl for cpl in adj_cpls if (cpl[1],cpl[0]) not in self.overlapping_regions[cpl[1]].keys()]
# only_new_cpls = [cpl for cpl in adj_cpls]
if self.reference_corner_fov_position == 'top-left':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_height
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_height
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_width
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_width
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
elif self.reference_corner_fov_position == 'top-right':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_height
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_height
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords - self.img_width
c_br = tile2_c_coords
col_order = ('right','left')
else:
c_tl = tile2_c_coords - self.img_width
c_br = tile1_c_coords
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
elif self.reference_corner_fov_position == 'bottom-left':
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords - self.img_height
r_br = tile2_r_coords
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords - self.img_height
r_br = tile1_r_coords
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_width
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_width
col_order = ('left','right')
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
def run_tiles_organization(self):
"""Method used to run all the methods
"""
self.extract_microscope_coords()
# self.save_graph_original_coords()
self.normalize_coords()
self.save_graph_image_space_coords()
self.identify_adjacent_tiles()
self.determine_overlapping_regions()
fname = self.experiment_fpath / 'results' / 'microscope_tile_corners_coords_pxl.npy'
np.save(fname,self.tile_corners_coords_pxl)
class organize_square_tiles_old_room():
"""
Class used to identify the orgabnization of the tiles before the
reorganization of the Robofish room of April 2021 when Robofish3
was assembled.
"""
def __init__(self, experiment_fpath:str,dataset, metadata:Dict,round_num:int):
"""
round_num = int
reference channel
"""
self.logger = selected_logger()
self.experiment_fpath = Path(experiment_fpath)
self.dataset = dataset
self.metadata = metadata
self.round_num = round_num
self.experiment_name = self.metadata['experiment_name']
self.stitching_channel = self.metadata['stitching_channel']
self.overlapping_percentage = int(self.metadata['overlapping_percentage']) / 100
self.pixel_size = self.metadata['pixel_microns']
self.img_width = self.metadata['img_width']
self.img_height = self.metadata['img_height']
logging.getLogger('matplotlib.font_manager').disabled = True
if self.img_width == self.img_height:
self.img_size = self.img_width
else:
self.logger.error(f'the images to stitch are not square')
sys.exit(f'the images to stitch are not square')
def extract_microscope_coords(self):
selected = self.dataset.loc[self.dataset.round_num == self.round_num,
['round_num','fov_num','fov_acquisition_coords_x','fov_acquisition_coords_y']]
selected.drop_duplicates(subset=['fov_num'],inplace=True)
selected.sort_values(by='fov_num', ascending=True, inplace=True)
self.x_coords = selected.loc[:,'fov_acquisition_coords_x'].to_numpy()
self.y_coords = selected.loc[:,'fov_acquisition_coords_y'].to_numpy()
def normalize_coords(self):
if self.metadata['machine'] == 'ROBOFISH2':
# RobofishII has stage with reference point
# in the center (0,0)
# consider that we get the top-right corner of the image as well
self.reference_corner_fov_position = 'old-room-robofish2' # Not sure (i don't remember)
# consider that we get the top-right corner of the image as well
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
x_max = np.amax(self.x_coords)
y_max = np.amax(self.y_coords)
# Put the coords to zero
if x_max >=0 :
self.x_coords = self.x_coords - x_min
else:
self.x_coords = self.x_coords + np.abs(x_min)
if y_max>0:
self.y_coords = self.y_coords - y_min
else:
self.y_coords = self.y_coords + np.abs(y_min)
# flip y_axis
self.y_coords = self.y_coords - self.y_coords.max()
self.y_coords = - self.y_coords
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
elif self.metadata['machine'] == 'ROBOFISH1':
# The current system has stage ref coords top-left
self.reference_corner_fov_position = 'top-left'
# Normalize to (0,0) still BOTTOM-RIGHT
y_min = np.amin(self.y_coords)
x_min = np.amin(self.x_coords)
self.x_coords = self.x_coords - x_min
self.y_coords = self.y_coords - y_min
# flip axis to move (0,0) on TOP-LEF
self.x_coords = self.x_coords - self.x_coords.max()
self.x_coords = - self.x_coords
self.y_coords = self.y_coords - self.y_coords.max()
self.y_coords = - self.y_coords
# change the coords from x,y to r,c
adjusted_coords = np.zeros([self.x_coords.shape[0],2])
adjusted_coords[:,0] = self.y_coords
adjusted_coords[:,1] = self.x_coords
elif self.metadata['machine'] == 'NOT_DEFINED':
self.logger.error(f'Need to define the specs for stitching NOT_DEFINED machine')
sys.exit(f'Need to define the specs for stitching NOT_DEFINED machine')
else:
self.logger.error(f'define the right machine used to collected the data')
sys.exit(f'define the right machine used to collected the data')
self.tile_corners_coords_pxl = adjusted_coords / self.pixel_size
def save_graph_original_coords(self):
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'microscope_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.x_coords.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.plot(self.x_coords,self.y_coords,'or')
for label, x, y in zip(labels, self.x_coords,self.y_coords):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def save_graph_image_space_coords(self):
# Turn interactive plotting off
saving_fpath = self.experiment_fpath / 'output_figures' / 'image_space_tiles_organization.png'
plt.ioff()
# Create image type axes
labels = [str(nr) for nr in np.arange(self.tile_corners_coords_pxl.shape[0])]
fig = plt.figure(figsize=(20,10))
plt.gca().invert_yaxis()
plt.plot(self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0],'or')
for label, x, y in zip(labels, self.tile_corners_coords_pxl[:,1],self.tile_corners_coords_pxl[:,0]):
plt.annotate(
label,
xy=(x,y), xytext=(-2, 2),
textcoords='offset points', ha='center', va='bottom',fontsize=12)
plt.tight_layout()
plt.savefig(saving_fpath)
def identify_adjacent_tiles(self):
shift_percent_tolerance = 0.05
searching_radius = self.img_size - (self.img_size*self.overlapping_percentage) + (self.img_size*shift_percent_tolerance)
nn = NearestNeighbors(n_neighbors=5,radius=searching_radius, metric='euclidean')
nn.fit(self.tile_corners_coords_pxl)
self.dists, self.indices = nn.kneighbors(self.tile_corners_coords_pxl, return_distance=True)
def determine_overlapping_regions(self):
# remember that overlapping region can be an empty dictionary
self.overlapping_regions = {}
self.overlapping_order ={}
for idx in np.arange(self.indices.shape[0]):
self.overlapping_regions[idx] = {}
self.overlapping_order[idx] = {}
for idx in np.arange(self.indices.shape[0]):
# Determine the indices that identify the correct adjacent
processing_indices = self.indices[idx,:]
processing_dists = self.dists[idx,:]
ref_tile = processing_indices[0]
self.overlapping_regions[ref_tile] = {}
self.overlapping_order[ref_tile] = {}
trimmed_indices = processing_indices[1:]
trimmed_dists = processing_dists[1:]
idx_adj = np.where(trimmed_dists < self.img_size)
adj_tiles_id = trimmed_indices[idx_adj]
adj_cpls = [(ref_tile, adj_tile) for adj_tile in adj_tiles_id]
# remove pairs that are already selected
only_new_cpls = [cpl for cpl in adj_cpls if (cpl[1],cpl[0]) not in self.overlapping_regions[cpl[1]].keys()]
# only_new_cpls = [cpl for cpl in adj_cpls]
if self.metadata['machine'] == 'ROBOFISH2':
# If tile coords are top left
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords
r_br = tile2_r_coords + self.img_size
r_bl = tile2_c_coords + self.img_size
r_tr = tile1_c_coords
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords
r_br = tile1_r_coords + self.img_size
r_bl = tile1_r_coords + self.img_size
r_tr = tile2_r_coords
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords
c_br = tile2_c_coords + self.img_size
c_tr = tile2_c_coords + self.img_size
c_bl = tile1_c_coords
col_order = ('right','left')
else:
c_tl = tile2_c_coords
c_br = tile1_c_coords + self.img_size
c_bl = tile2_c_coords
c_tr = tile1_c_coords + self.img_size
col_order = ('left','right')
elif self.metadata['machine'] == 'ROBOFISH1':
# If tile coords are bottom right
for cpl in only_new_cpls:
tile1_r_coords = self.tile_corners_coords_pxl[cpl[0]][0]
tile2_r_coords = self.tile_corners_coords_pxl[cpl[1]][0]
tile1_c_coords = self.tile_corners_coords_pxl[cpl[0]][1]
tile2_c_coords = self.tile_corners_coords_pxl[cpl[1]][1]
if tile1_r_coords > tile2_r_coords:
r_tl = tile1_r_coords - self.img_size
r_br = tile2_r_coords
r_bl = tile2_c_coords
r_tr = tile1_c_coords - self.img_size
row_order = ('bottom','top')
else:
r_tl = tile2_r_coords - self.img_size
r_br = tile1_r_coords
r_bl = tile1_r_coords
r_tr = tile2_r_coords - self.img_size
row_order = ('top','bottom')
if tile1_c_coords > tile2_c_coords:
c_tl = tile1_c_coords - self.img_size
c_br = tile2_c_coords
c_tr = tile2_c_coords
c_bl = tile1_c_coords - self.img_size
col_order = ('right','left')
else:
c_tl = tile2_c_coords - self.img_size
c_br = tile1_c_coords
c_bl = tile2_c_coords - self.img_size
c_tr = tile1_c_coords
col_order = ('left','right')
else:
pass
self.overlapping_regions[ref_tile][cpl] = [r_tl, r_br, c_tl, c_br]
self.overlapping_order[ref_tile][cpl] = {'row_order':row_order,'column_order':col_order}
def run_tiles_organization(self):
self.extract_microscope_coords()
self.save_graph_original_coords()
self.normalize_coords()
self.save_graph_image_space_coords()
self.identify_adjacent_tiles()
self.determine_overlapping_regions()
fname = self.experiment_fpath / 'results' / 'microscope_tile_corners_coords_pxl.npy'
np.save(fname,self.tile_corners_coords_pxl)
def stitch_using_coords_general(decoded_df: pd.DataFrame, tile_corners_coords_pxl: np.ndarray,
reference_corner_fov_position: str, metadata: Dict, tag: str):
"""Function to create a stitched image using the fov coords
of the stage.
Args:
decoded_df (pd.DataFrame): Counts after decoding
tile_corners_coords_pxl (np.ndarray): Coords of the fovs according to the stage
reference_corner_fov_position (str): Position of the reference corner determine by
the organization stage/camera. In our setup can be:
- top-left
- top-right
- bottom_left
metadata (Dict): [description]
tag (str): [description]
Returns:
[type]: Decoded counts with coords of the dots adjusted to the stage
reference point
"""
logger = selected_logger()
was_file = 0
if not isinstance(decoded_df, pd.DataFrame):
was_file = 1
decoded_df_fpath = copy.deepcopy(decoded_df)
decoded_df = pd.read_parquet(decoded_df)
if decoded_df['r_px_registered'].empty:
decoded_df['r_px_'+tag] = np.nan
decoded_df['c_px_'+tag] = np.nan
else:
#fov = decoded_df.iloc[0]['fov_num']
fov = int(decoded_df.fov_num.unique()[0])
r_microscope_coords = tile_corners_coords_pxl[fov,0]
c_microscope_coords = tile_corners_coords_pxl[fov,1]
if reference_corner_fov_position == 'top-left':
decoded_df['r_px_'+tag] = r_microscope_coords + decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords + decoded_df['c_px_registered']
elif reference_corner_fov_position == 'top-right':
decoded_df['r_px_'+tag] = r_microscope_coords + decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords - (metadata['img_width'] - decoded_df['c_px_registered'])
elif reference_corner_fov_position == 'bottom-left':
decoded_df['r_px_'+tag] = r_microscope_coords + (metadata['img_height'] - decoded_df['r_px_registered'])
decoded_df['c_px_'+tag] = c_microscope_coords + decoded_df['c_px_registered']
elif reference_corner_fov_position == 'bottom-right':
decoded_df['r_px_'+tag] = r_microscope_coords + (metadata['img_height'] - decoded_df['r_px_registered'])
decoded_df['c_px_'+tag] = c_microscope_coords - (metadata['img_width'] - decoded_df['c_px_registered'])
elif reference_corner_fov_position == 'old-room-robofish2':
decoded_df['r_px_'+tag] = r_microscope_coords - decoded_df['r_px_registered']
decoded_df['c_px_'+tag] = c_microscope_coords - decoded_df['c_px_registered']
else:
logger.error(f"the referernce corner fov position name is wrong")
sys.exit(f"the referernce corner fov position name is wrong")
# decoded_df['r_px_'+tag] = r_microscope_coords - decoded_df['r_px_registered']
# decoded_df['c_px_'+tag] = c_microscope_coords - decoded_df['c_px_registered']
if was_file:
decoded_df.to_parquet(decoded_df_fpath,index=False)
else:
return decoded_df
def stitch_using_coords_general_segmented_objects(fov,obj_dict,tile_corners_coords_pxl,reference_corner_fov_position, metadata):
"""
Function used to stitch the segmented object used for defining the cells.
"""
r_microscope_coords = tile_corners_coords_pxl[fov,0]
c_microscope_coords = tile_corners_coords_pxl[fov,1]
if obj_dict:
if reference_corner_fov_position == 'top-left':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + coords_dict['original_coords'][:,0],
c_microscope_coords + coords_dict['original_coords'][:,1]]).T
elif reference_corner_fov_position == 'top-right':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + coords_dict['original_coords'][:,0],
c_microscope_coords - (metadata['img_width'] -coords_dict['original_coords'][:,1])]).T
elif reference_corner_fov_position == 'bottom_left':
for el,coords_dict in obj_dict.items():
coords_dict['stitched_coords'] = np.vstack([r_microscope_coords + (metadata['img_height'] -coords_dict['original_coords'][:,0]),
c_microscope_coords + coords_dict['original_coords'][:,1]]).T
return obj_dict
def register_coords_obj(fov,segmentation_output_path,
stitching_parameters,
reference_corner_fov_position,
metadata):
"""Function used to register the coords of the segmented object to th
Args:
fov ([type]): [description]
segmentation_output_path ([type]): [description]
"""
segmented_output = pickle.load(open(segmentation_output_path / ('preprocessed_data_fov_' + str(fov) + '_mask.pkl'), 'rb'))
segmented_regions = measure.regionprops(segmented_output)
segmented_regions_dict = {}
for prop in segmented_regions:
segmented_regions_dict[str(fov)+'-'+str(prop.label)] = {}
segmented_regions_dict[str(fov)+'-'+str(prop.label)]['original_coords']=prop.coords
segmented_regions_dict[str(fov)+'-'+str(prop.label)]['stitched_coords']= np.nan
segmented_regions_dict = stitch_using_coords_general_segmented_objects(fov,segmented_regions_dict,
stitching_parameters,reference_corner_fov_position, metadata)
pickle.dump(segmented_regions_dict,open(segmentation_output_path / ('registered_objs_dict_fov_' + str(fov) + '.pkl'), 'wb'))
def get_all_dots_in_overlapping_regions(counts_df, chunk_coords, stitching_selected='microscope_stitched'):
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
r_tl = chunk_coords[0]
r_br = chunk_coords[1]
c_tl = chunk_coords[2]
c_br = chunk_coords[3]
overlapping_ref_df = counts_df.loc[(counts_df[r_tag] > r_tl) & (counts_df[r_tag] < r_br)
& (counts_df[c_tag] > c_tl) & (counts_df[c_tag] < c_br),:]
return overlapping_ref_df
# TODO adjust the registration with dots (triangulation)
def register_cpl(cpl, chunk_coords, experiment_fpath,
stitching_channel,
reference_round):
logger = selected_logger()
registration = {}
experiment_fpath = Path(experiment_fpath)
try:
counts1_fpath = list((experiment_fpath / 'results').glob('*decoded_fov_' + str(cpl[0]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[0]}')
else:
try:
counts2_fpath = list((experiment_fpath / 'results').glob('*decoded_fov_' + str(cpl[1]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[1]}')
else:
counts1_df = pd.read_parquet(counts1_fpath)
counts2_df = pd.read_parquet(counts2_fpath)
count1_grp = counts1_df.loc[(counts1_df.channel == stitching_channel) &
(counts1_df.round_num == reference_round),:]
count2_grp = counts2_df.loc[(counts2_df.channel == stitching_channel) &
(counts2_df.round_num == reference_round),:]
count1_grp = counts1_df.loc[counts1_df.channel == stitching_channel,:]
count2_grp = counts2_df.loc[counts2_df.channel == stitching_channel,:]
overlap_count1 = get_all_dots_in_overlapping_regions(count1_grp, chunk_coords,stitching_selected='microscope_stitched')
overlap_count2 = get_all_dots_in_overlapping_regions(count2_grp, chunk_coords,stitching_selected='microscope_stitched')
if overlap_count1.empty or overlap_count2.empty:
shift = np.array([1000,1000])
registration[cpl] = [shift, np.nan]
else:
# TODO
# Maybe add a selction step where if the number of beads is below X the beads based registration will be run or fft based
# registration if the number of beads is high enough
r_tl = chunk_coords[0]
c_tl = chunk_coords[2]
img_shape = np.array([np.abs(chunk_coords[1]-chunk_coords[0]),np.abs(chunk_coords[3]-chunk_coords[2])]).astype('int') + 1
norm_ref_coords = overlap_count1.loc[:,['r_px_microscope_stitched','c_px_microscope_stitched']].to_numpy() -[r_tl, c_tl]
norm_comp_coords = overlap_count2.loc[:,['r_px_microscope_stitched','c_px_microscope_stitched']].to_numpy() -[r_tl, c_tl]
img_ref = create_fake_image(img_shape, norm_ref_coords)
img_tran = create_fake_image(img_shape, norm_comp_coords)
shift, error, diffphase = register_translation(img_ref, img_tran)
registration[cpl] = [shift, error]
return registration
def register_cpl_fresh_nuclei(cpl: Tuple, chunk_coords: np.ndarray, order: dict,
metadata:dict, experiment_fpath:str):
"""Function to register orverlapping regions of nuclear staining for stitching
Args:
cpl (Tuple): overlapping tiles
chunk_coords (np.ndarray): coords of the overlapping region [r_tl,r_br,c_tl,c_br]
order (dict): description of the position of the tiles
metadata (dict): dictionary with the general experiment data
experiment_fpath (str): path to the experiment to process
Returns:
dict: registration output [shift, error]
"""
logger = selected_logger()
registration = {}
experiment_fpath = Path(experiment_fpath)
img_width = metadata['img_width']
img_height = metadata['img_height']
experiment_name = metadata['experiment_name']
error = 0
filtered_nuclei_fpath = experiment_fpath / 'fresh_tissue' / 'fresh_tissue_nuclei_preprocessed_img_data.zarr'
try:
st = zarr.DirectoryStore(filtered_nuclei_fpath)
root = zarr.group(store=st, overwrite=False)
except:
logger.error(f'cannot load the zarr files with filtered nuclei')
else:
try:
img1 = root[experiment_name + '_fresh_tissue_nuclei_fov_' + str(cpl[0])]['preprocessed_data_fov_'+str(cpl[0])][...]
except:
logger.error(f'image file cannot be loaded for nuclei of fov {cpl[0]}')
else:
try:
img2 = root[experiment_name + '_fresh_tissue_nuclei_fov_' + str(cpl[1])]['preprocessed_data_fov_'+str(cpl[1])][...]
except:
logger.error(f'image file cannot be loaded for nuclei of fov {cpl[1]}')
else:
img_shape = np.array([np.abs(chunk_coords[1]-chunk_coords[0]),np.abs(chunk_coords[3]-chunk_coords[2])]).astype('int')
if order == {'row_order': ('top', 'bottom'), 'column_order': ('right', 'left')}:
img1_slice = img1[(img_height-img_shape[0]):img_height,0:img_shape[1]]
img2_slice = img2[0:img_shape[0],(img_width-img_shape[1]):img_width]
elif order == {'row_order': ('top', 'bottom'), 'column_order': ('left', 'right')}:
img1_slice = img1[img_height-img_shape[0]:img_height,img_width-img_shape[1]:img_width]
img2_slice = img2[0:img_shape[0],0:img_shape[1]]
elif order == {'row_order': ('bottom', 'top'), 'column_order': ('left', 'right')}:
img1_slice = img1[0:img_shape[0],img_width-img_shape[1]:img_width]
img2_slice = img2[img_height-img_shape[0]:img_height,0:img_shape[1]]
elif order == {'row_order': ('bottom', 'top'), 'column_order': ('right', 'left')}:
img1_slice = img1[0:img_shape[0],0:img_shape[1]]
img2_slice = img2[img_height-img_shape[0]:img_height,img_width-img_shape[1]:img_width]
else:
logger.error(f'unknown fovs order')
error = 1
if error:
shift = np.array([1000,1000])
registration[cpl] = [shift, np.nan]
else:
shift, error, diffphase = register_translation(img1_slice, img2_slice)
registration[cpl] = [shift, error]
return registration
def stitching_graph(experiment_fpath, stitching_channel,tiles_org, metadata,
reference_round, client, nr_dim = 2):
logger = selected_logger()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
futures = []
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(register_cpl,cpl, chunk_coords, experiment_fpath,stitching_channel,
reference_round)
futures.append(future)
all_registrations = client.gather(futures)
all_registrations = [reg for reg in all_registrations if reg ]
all_registrations_dict = {}
for output_dict in all_registrations:
all_registrations_dict.update(output_dict)
# Run registration only if there are not too many overlappig regions without
# dots
# counts_cpls_missing_overlapping_dots = 0
# cpls_missing_overlapping_dots = []
# for cpl, registration_output in all_registrations_dict.items():
# if np.isnan(registration_output[1]):
# cpls_missing_overlapping_dots.append(cpl)
# counts_cpls_missing_overlapping_dots += 1
# global_stitching_done = 0
# if len(cpls_missing_overlapping_dots) > 10:
# logger.error(f"Too many cpl of fovs without overlapping reference dots")
# pickle.dump([cpls_missing_overlapping_dots,counts_cpls_missing_overlapping_dots ],
# open(experiment_fpath / 'results' / 'fovs_without_overlapping_reference_dots_no_global_stitching.pkl','rb'))
# global_stitching_done = 0
# return tiles_org.tile_corners_coords_pxl, global_stitching_done
# else:
# global_stitching_done = 1
# logger.error(f"The number of cpls of fovs without overlapping reference dots is low, test global stitching")
# pickle.dump([cpls_missing_overlapping_dots,counts_cpls_missing_overlapping_dots ],
# open(experiment_fpath / 'results' / 'fovs_without_overlapping_reference_dots_yes_global_stitching.pkl','wb'))
overlapping_coords_reorganized = {}
for idx, cpl_dict in tiles_org.overlapping_regions.items():
overlapping_coords_reorganized.update(cpl_dict)
all_registrations_removed_large_shift = {k:v for (k,v) in all_registrations_dict.items() if np.all(np.abs(v[0]) < 20)}
cpls = all_registrations_removed_large_shift.keys()
# cpls = list(unfolded_overlapping_regions_dict.keys())
total_cpls = len(cpls)
nr_tiles = tiles_org.tile_corners_coords_pxl.shape[0]
weights_err1 = np.zeros((total_cpls * nr_dim))
weights_err2 = np.zeros((total_cpls * nr_dim))
P = np.zeros(total_cpls * nr_dim)
ZQ = np.zeros((total_cpls * nr_dim,nr_tiles * nr_dim))
weights_err = np.zeros((total_cpls * nr_dim))
for i, (a, b) in enumerate(cpls):
shift = all_registrations_removed_large_shift[(a,b)][0]
dr = shift[0]
dc = shift[1]
P[i * nr_dim] = dr
P[i * nr_dim +1 ] = dc
weights_err[i * nr_dim:i * nr_dim + nr_dim] = all_registrations_removed_large_shift[(a,b)][1]
for i, (a, b) in enumerate(cpls):
# Y row:
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a:nr_dim * a + 1] = -1
Z[nr_dim * b:nr_dim * b + 1] = 1
ZQ[i * nr_dim, :] = Z
# X row
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a + 1:nr_dim * a + 2] = -1
Z[nr_dim * b + 1:nr_dim * b + 2] = 1
ZQ[i * nr_dim + 1, :] = Z
lrg = linmod.LinearRegression(fit_intercept=False)
lrg.fit(ZQ,P)
global_translrg = lrg.coef_.reshape(nr_tiles, nr_dim)
gb = -1 * (-lrg.coef_.reshape((nr_tiles, nr_dim)) \
+ lrg.coef_.reshape((nr_tiles, nr_dim))[0:1, :])
global_shift = gb.astype(int)
adjusted_coords = tiles_org.tile_corners_coords_pxl + global_shift
# Determine shift of missing tiles
out_level = 1000
low = np.where(global_shift< -out_level)[0]
high = np.where(global_shift> out_level)[0]
low_high = np.hstack((low,high))
missing_tiles_id = np.unique(low_high)
missing_tiles_coords = tiles_org.tile_corners_coords_pxl[missing_tiles_id,:]
if missing_tiles_coords.shape[0] >0:
coords_cl = np.delete(tiles_org.tile_corners_coords_pxl, missing_tiles_id, 0)
ad_coords_cl = np.delete(adjusted_coords, missing_tiles_id, 0 )
tst = linmod.LinearRegression(fit_intercept=False)
tst.fit(coords_cl,ad_coords_cl)
corrected_missing = tst.predict(missing_tiles_coords)
for idx, tile_id in enumerate(missing_tiles_id):
adjusted_coords[tile_id] = corrected_missing[idx]
dec_fpath = (experiment_fpath / 'results').glob('*_decoded_fov*')
for fpath in dec_fpath:
global_stitched_decoded_df = stitch_using_coords_general(fpath,
adjusted_coords,
tiles_org.reference_corner_fov_position,
metadata,
'global_stitched')
if isinstance(global_stitched_decoded_df,pd.DataFrame):
global_stitched_decoded_df.to_parquet(fpath)
global_shift = tiles_org.tile_corners_coords_pxl - adjusted_coords
pickle.dump(global_shift,open(experiment_fpath / 'results'/ 'stitching_global_shift.pkl','wb'))
pickle.dump(adjusted_coords,open(experiment_fpath / 'results'/ 'global_stitched_coords.pkl','wb'))
return adjusted_coords
# return adjusted_coords, global_stitching_done
def stitching_graph_fresh_nuclei(experiment_fpath,tiles_org, metadata,
client, nr_dim = 2):
logger = selected_logger()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
unfolded_overlapping_order_dict = {key:value for (k,v) in tiles_org.overlapping_order.items() for (key,value) in v.items()}
futures = []
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(register_cpl_fresh_nuclei,cpl, chunk_coords,
unfolded_overlapping_order_dict[cpl],
metadata,
experiment_fpath)
futures.append(future)
all_registrations = client.gather(futures)
all_registrations = [reg for reg in all_registrations if reg ]
all_registrations_dict = {}
for output_dict in all_registrations:
all_registrations_dict.update(output_dict)
overlapping_coords_reorganized = {}
for idx, cpl_dict in tiles_org.overlapping_regions.items():
overlapping_coords_reorganized.update(cpl_dict)
all_registrations_removed_large_shift = {k:v for (k,v) in all_registrations_dict.items() if np.all(np.abs(v[0]) < 20)}
cpls = all_registrations_removed_large_shift.keys()
# cpls = list(unfolded_overlapping_regions_dict.keys())
total_cpls = len(cpls)
nr_tiles = tiles_org.tile_corners_coords_pxl.shape[0]
weights_err1 = np.zeros((total_cpls * nr_dim))
weights_err2 = np.zeros((total_cpls * nr_dim))
P = np.zeros(total_cpls * nr_dim)
ZQ = np.zeros((total_cpls * nr_dim,nr_tiles * nr_dim))
weights_err = np.zeros((total_cpls * nr_dim))
for i, (a, b) in enumerate(cpls):
shift = all_registrations_removed_large_shift[(a,b)][0]
dr = shift[0]
dc = shift[1]
P[i * nr_dim] = dr
P[i * nr_dim +1 ] = dc
weights_err[i * nr_dim:i * nr_dim + nr_dim] = all_registrations_removed_large_shift[(a,b)][1]
for i, (a, b) in enumerate(cpls):
# Y row:
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a:nr_dim * a + 1] = -1
Z[nr_dim * b:nr_dim * b + 1] = 1
ZQ[i * nr_dim, :] = Z
# X row
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a + 1:nr_dim * a + 2] = -1
Z[nr_dim * b + 1:nr_dim * b + 2] = 1
ZQ[i * nr_dim + 1, :] = Z
lrg = linmod.LinearRegression(fit_intercept=False)
lrg.fit(ZQ,P)
global_translrg = lrg.coef_.reshape(nr_tiles, nr_dim)
gb = -1 * (-lrg.coef_.reshape((nr_tiles, nr_dim)) \
+ lrg.coef_.reshape((nr_tiles, nr_dim))[0:1, :])
global_shift = gb.astype(int)
adjusted_coords = tiles_org.tile_corners_coords_pxl + global_shift
# Determine shift of missing tiles
out_level = 1000
low = np.where(global_shift< -out_level)[0]
high = np.where(global_shift> out_level)[0]
low_high = np.hstack((low,high))
missing_tiles_id = np.unique(low_high)
missing_tiles_coords = tiles_org.tile_corners_coords_pxl[missing_tiles_id,:]
if missing_tiles_coords.shape[0] >0:
coords_cl = np.delete(tiles_org.tile_corners_coords_pxl, missing_tiles_id, 0)
ad_coords_cl = np.delete(adjusted_coords, missing_tiles_id, 0 )
tst = linmod.LinearRegression(fit_intercept=False)
tst.fit(coords_cl,ad_coords_cl)
corrected_missing = tst.predict(missing_tiles_coords)
for idx, tile_id in enumerate(missing_tiles_id):
adjusted_coords[tile_id] = corrected_missing[idx]
dec_fpath = (experiment_fpath / 'fresh_tissue'/ 'results').glob('*_decoded_fov*')
for fpath in dec_fpath:
global_stitched_decoded_df = stitch_using_coords_general(fpath,
adjusted_coords,
tiles_org.reference_corner_fov_position,
metadata,
'global_stitched_nuclei')
if isinstance(global_stitched_decoded_df,pd.DataFrame):
global_stitched_decoded_df.to_parquet(fpath)
global_shift = tiles_org.tile_corners_coords_pxl - adjusted_coords
pickle.dump(global_shift,open(experiment_fpath / 'fresh_tissue' / 'results'/ 'stitching_global_shift.pkl','wb'))
pickle.dump(adjusted_coords,open(experiment_fpath / 'fresh_tissue' / 'results'/ 'global_stitched_coords.pkl','wb'))
return adjusted_coords
# return adjusted_coords, global_stitching_done
def stitching_graph_serial_nuclei(experiment_fpath,tiles_org, metadata,
registration_reference_hybridization,
client, nr_dim = 2):
logger = selected_logger()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
unfolded_overlapping_order_dict = {key:value for (k,v) in tiles_org.overlapping_order.items() for (key,value) in v.items()}
futures = []
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(register_cpl_fresh_nuclei,cpl, chunk_coords,
unfolded_overlapping_order_dict[cpl],
metadata,
experiment_fpath)
futures.append(future)
all_registrations = client.gather(futures)
all_registrations = [reg for reg in all_registrations if reg ]
all_registrations_dict = {}
for output_dict in all_registrations:
all_registrations_dict.update(output_dict)
overlapping_coords_reorganized = {}
for idx, cpl_dict in tiles_org.overlapping_regions.items():
overlapping_coords_reorganized.update(cpl_dict)
all_registrations_removed_large_shift = {k:v for (k,v) in all_registrations_dict.items() if np.all(np.abs(v[0]) < 20)}
cpls = all_registrations_removed_large_shift.keys()
# cpls = list(unfolded_overlapping_regions_dict.keys())
total_cpls = len(cpls)
nr_tiles = tiles_org.tile_corners_coords_pxl.shape[0]
weights_err1 = np.zeros((total_cpls * nr_dim))
weights_err2 = np.zeros((total_cpls * nr_dim))
P = np.zeros(total_cpls * nr_dim)
ZQ = np.zeros((total_cpls * nr_dim,nr_tiles * nr_dim))
weights_err = np.zeros((total_cpls * nr_dim))
for i, (a, b) in enumerate(cpls):
shift = all_registrations_removed_large_shift[(a,b)][0]
dr = shift[0]
dc = shift[1]
P[i * nr_dim] = dr
P[i * nr_dim +1 ] = dc
weights_err[i * nr_dim:i * nr_dim + nr_dim] = all_registrations_removed_large_shift[(a,b)][1]
for i, (a, b) in enumerate(cpls):
# Y row:
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a:nr_dim * a + 1] = -1
Z[nr_dim * b:nr_dim * b + 1] = 1
ZQ[i * nr_dim, :] = Z
# X row
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a + 1:nr_dim * a + 2] = -1
Z[nr_dim * b + 1:nr_dim * b + 2] = 1
ZQ[i * nr_dim + 1, :] = Z
lrg = linmod.LinearRegression(fit_intercept=False)
lrg.fit(ZQ,P)
global_translrg = lrg.coef_.reshape(nr_tiles, nr_dim)
gb = -1 * (-lrg.coef_.reshape((nr_tiles, nr_dim)) \
+ lrg.coef_.reshape((nr_tiles, nr_dim))[0:1, :])
global_shift = gb.astype(int)
adjusted_coords = tiles_org.tile_corners_coords_pxl + global_shift
# Determine shift of missing tiles
out_level = 1000
low = np.where(global_shift< -out_level)[0]
high = np.where(global_shift> out_level)[0]
low_high = np.hstack((low,high))
missing_tiles_id = np.unique(low_high)
missing_tiles_coords = tiles_org.tile_corners_coords_pxl[missing_tiles_id,:]
if missing_tiles_coords.shape[0] >0:
coords_cl = np.delete(tiles_org.tile_corners_coords_pxl, missing_tiles_id, 0)
ad_coords_cl = np.delete(adjusted_coords, missing_tiles_id, 0 )
tst = linmod.LinearRegression(fit_intercept=False)
tst.fit(coords_cl,ad_coords_cl)
corrected_missing = tst.predict(missing_tiles_coords)
for idx, tile_id in enumerate(missing_tiles_id):
adjusted_coords[tile_id] = corrected_missing[idx]
dec_fpath = (experiment_fpath / 'results').glob('*_decoded_fov*')
for fpath in dec_fpath:
global_stitched_decoded_df = stitch_using_coords_general(fpath,
adjusted_coords,
tiles_org.reference_corner_fov_position,
metadata,
'global_stitched_nuclei')
if isinstance(global_stitched_decoded_df,pd.DataFrame):
global_stitched_decoded_df.to_parquet(fpath)
global_shift = tiles_org.tile_corners_coords_pxl - adjusted_coords
pickle.dump(global_shift,open(experiment_fpath / 'results'/ 'stitching_global_shift.pkl','wb'))
pickle.dump(adjusted_coords,open(experiment_fpath / 'results'/ 'global_stitched_coords.pkl','wb'))
return adjusted_coords
# return adjusted_coords, global_stitching_done
def stitched_beads_on_nuclei_fresh_tissue(experiment_fpath:str,
client,
nuclei_tag:str='_ChannelCy3_Nuclei_',
beads_tag:str='_ChannelEuropium_Cy3_',
round_num:int = 1,
overlapping_percentage:int=5,
machine:str='ROBOFISH2'
):
"""Function tun run the stitching of the dots in the fresh images using
the nuclei images as reference
Args:
experiment_fpath (str): path of the experiment to process
client ([type]): dask client for parallel processing
nuclei_tag (str, optional): Tag to identify the nuclei dataset. Defaults to '_ChannelCy3_Nuclei_'.
beads_tag (str, optional): Tag to identify the beads dataset. Defaults to '_ChannelEuropium_Cy3_'.
round_num (int, optional): Reference round,for the fresh tissue there is only one. Defaults to 1.
overlapping_percentage (int, optional): Overlapping between the different tiles. Defaults to 5.
machine (str, optional): machine running the experiment. Defaults to 'ROBOFISH2'.
"""
experiment_fpath = Path(experiment_fpath)
fresh_tissue_path = experiment_fpath / 'fresh_tissue'
beads_dataset_fpath = list(fresh_tissue_path.glob('*'+ beads_tag +'*.parquet'))[0]
nuclei_dataset_fpath = list(fresh_tissue_path.glob('*'+ nuclei_tag +'*.parquet'))[0]
# Collect and adjust beads dataset with missing values
beads_data = Dataset()
beads_data.load_dataset(beads_dataset_fpath)
beads_data.dataset['processing_type'] = 'undefined'
beads_data.dataset['overlapping_percentage'] = overlapping_percentage / 100
beads_data.dataset['machine'] = machine
metadata_beads = beads_data.collect_metadata(beads_data.dataset)
beads_org_tiles = organize_square_tiles(experiment_fpath,beads_data.dataset,metadata_beads,round_num)
beads_org_tiles.run_tiles_organization()
flist = list((fresh_tissue_path / 'results').glob('*decoded_fov*.parquet'))
# duplicate registered
for fpath in flist:
data = pd.read_parquet(fpath)
data['r_px_registered'] = data['r_px_original']
data['c_px_registered'] = data['c_px_original']
data['hamming_distance'] = 0
data['decoded_genes'] = 'beads'
data.to_parquet(fpath)
all_futures = []
for fpath in flist:
future = client.submit(stitch_using_coords_general, fpath,
beads_org_tiles.tile_corners_coords_pxl,
beads_org_tiles.reference_corner_fov_position,
metadata_beads,tag='microscope_stitched')
all_futures.append(future)
_ = client.gather(all_futures)
io.simple_output_plotting(fresh_tissue_path,
stitching_selected= 'microscope_stitched',
selected_Hdistance=0,
client = client,
input_file_tag = 'decoded_fov',
file_tag = 'stitched_microscope')
# Collect and adjust nuclei dataset with missing values
nuclei_data = Dataset()
nuclei_data.load_dataset(nuclei_dataset_fpath)
nuclei_data.dataset['processing_type'] = 'undefined'
nuclei_data.dataset['overlapping_percentage'] = overlapping_percentage / 100
nuclei_data.dataset['machine'] = machine
metadata_nuclei = nuclei_data.collect_metadata(nuclei_data.dataset)
nuclei_org_tiles = organize_square_tiles(experiment_fpath,nuclei_data.dataset,metadata_nuclei,round_num)
nuclei_org_tiles.run_tiles_organization()
adjusted_coords =stitching_graph_fresh_nuclei(experiment_fpath,nuclei_org_tiles, metadata_nuclei,
client, nr_dim = 2)
io.simple_output_plotting(fresh_tissue_path,
stitching_selected= 'global_stitched_nuclei',
selected_Hdistance=0,
client = client,
input_file_tag = 'decoded_fov',
file_tag = 'global_stitched_nuclei')
# REMOVED OVERLAPPING DOTS ACCORDING TO FOV (MUCH FASTER THAN FOR GENE)
# EXPECIALLY FOR LARGE AREAS WITH A LOT OF COUNTS
def identify_duplicated_dots_NNDescend(ref_tiles_df: pd.DataFrame,comp_tiles_df: pd.DataFrame,
stitching_selected: str,same_dot_radius: int)-> list:
"""Function used to identify duplicated dots for a gene in the overlapping regions. This
version of the function uses the fast nearest neighbor coded in NNDescend
Args:
ref_tiles_df (pd.DataFrame): Counts of the reference tiles
comp_tiles_df (pd.DataFrame): Counts in the comparing tiles
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
same_dot_radius (int): searching radius used to define if two dots are
the same
Returns:
list: dot ids to remove
"""
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
overlapping_ref_coords = ref_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
overlapping_comp_coords = comp_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
dots_ids = comp_tiles_df.loc[:, ['dot_id']].to_numpy()
index = NNDescent(overlapping_ref_coords,metric='euclidean',n_neighbors=1)
indices, dists = index.query(overlapping_comp_coords,k=1)
idx_dists = np.where(dists < same_dot_radius)[0]
dots_id_to_remove = dots_ids[idx_dists]
dots_id_to_remove = list(dots_id_to_remove.reshape(dots_id_to_remove.shape[0],))
return dots_id_to_remove
def identify_duplicated_dots_sklearn(ref_tiles_df: pd.DataFrame,comp_tiles_df: pd.DataFrame,
stitching_selected: str,same_dot_radius: int)-> list:
"""Function used to identify duplicated dots for a gene in the overlapping regions. This
version of the function uses the fast nearest neighbor coded in NNDescend
Args:
ref_tiles_df (pd.DataFrame): Counts of the reference tiles
comp_tiles_df (pd.DataFrame): Counts in the comparing tiles
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
same_dot_radius (int): searching radius used to define if two dots are
the same
Returns:
list: dot ids to remove
"""
nn = NearestNeighbors(n_neighbors=1,radius=same_dot_radius, metric='euclidean',algorithm='kd_tree')
r_tag = 'r_px_' + stitching_selected
c_tag = 'c_px_' + stitching_selected
overlapping_ref_coords = ref_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
overlapping_comp_coords = comp_tiles_df.loc[:, [r_tag,c_tag]].to_numpy()
dots_ids = comp_tiles_df.loc[:, ['dot_id']].to_numpy()
nn.fit(overlapping_ref_coords)
dists, indices = nn.kneighbors(overlapping_comp_coords, return_distance=True)
idx_dists = np.where(dists <= same_dot_radius)[0]
dots_id_to_remove = dots_ids[idx_dists]
dots_id_to_remove = list(dots_id_to_remove.reshape(dots_id_to_remove.shape[0],))
return dots_id_to_remove
def remove_overlapping_dots_fov(cpl: Tuple[int,int], chunk_coords: np.ndarray,
experiment_fpath: str, stitching_selected:str,
hamming_distance: float, same_dot_radius: int)-> Dict[Tuple[int,int],List[str]]:
"""Function that identify the overlapping dots between two different tiles. The duplicated dots
for all genes are identified
Args:
cpl (Tuple[int,int]): Adjacent tiles to compare
chunk_coords (np.ndarray): Coords of the overlapping regions between the two tiles to compare
experiment_fpath (str): Path to the experiment to process
stitching_selected (str): String that identify the coords of the pixels
according to the stitching used to process the data
hamming_distance (float): Selected distance from the code
same_dot_radius (int): searching radius used to define if two dots are
the same
Returns:
Dict[Tuple[int,int],List[str]]: {cpl:all_dots_id_to_remove}
"""
logger = selected_logger()
all_dots_id_to_remove = []
experiment_fpath = Path(experiment_fpath)
try:
counts1_fpath = list((experiment_fpath / 'results').glob('*decoded*_fov_' + str(cpl[0]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[0]}')
else:
try:
counts2_fpath = list((experiment_fpath / 'results').glob('*decoded*_fov_' + str(cpl[1]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[1]}')
else:
counts1_df = pd.read_parquet(counts1_fpath)
counts2_df = | pd.read_parquet(counts2_fpath) | pandas.read_parquet |
import pandas as pd
import numpy as np
from typing import Union, List, Optional, Iterable
import h5py
#######################
## Loading metadata ##
#######################
def _load_samples_metadata(model):
samples_metadata = pd.DataFrame(
[
[cell, group]
for group, cell_list in model.samples.items()
for cell in cell_list
],
columns=["sample", "group"],
)
if "samples_metadata" in model.model:
if len(list(model.model["samples_metadata"][model.groups[0]].keys())) > 0:
_samples_metadata = pd.concat(
[
pd.concat(
[
pd.Series(model.model["samples_metadata"][g][k])
for k in model.model["samples_metadata"][g].keys()
],
axis=1,
)
for g in model.groups
],
axis=0,
)
_samples_metadata.columns = list(
model.model["samples_metadata"][model.groups[0]].keys()
)
if "group" in _samples_metadata.columns:
del _samples_metadata["group"]
if "sample" in _samples_metadata.columns:
del _samples_metadata["sample"]
samples_metadata = pd.concat(
[
samples_metadata.reset_index(drop=True),
_samples_metadata.reset_index(drop=True),
],
axis=1,
)
# Decode objects as UTF-8 strings
for column in samples_metadata.columns:
if samples_metadata[column].dtype == "object":
try:
samples_metadata[column] = [
i.decode() for i in samples_metadata[column].values
]
except (UnicodeDecodeError, AttributeError):
pass
samples_metadata = samples_metadata.set_index("sample")
return samples_metadata
def _load_features_metadata(model):
features_metadata = pd.DataFrame(
[
[feature, view]
for view, feature_list in model.features.items()
for feature in feature_list
],
columns=["feature", "view"],
)
if "features_metadata" in model.model:
if len(list(model.model["features_metadata"][model.views[0]].keys())) > 0:
features_metadata_dict = {
m: pd.concat(
[
pd.Series(model.model["features_metadata"][m][k])
for k in model.model["features_metadata"][m].keys()
],
axis=1,
)
for m in model.views
}
for m in features_metadata_dict.keys():
features_metadata_dict[m].columns = list(
model.model["features_metadata"][m].keys()
)
_features_metadata = | pd.concat(features_metadata_dict, axis=0) | pandas.concat |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
(
(oc2['DOB'] + pd.DateOffset(years=4) == start) # ???
| (oc2['DOB'] + pd.DateOffset(years=17) == start)
)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
& oc2['SDQ_REASON'].isna()
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_157():
error = ErrorDefinition(
code='157',
description="Child is aged 4 years or over at the beginning of the year or 16 years or under at the end of the "
"year and Strengths and Difficulties Questionnaire (SDQ) 1 has been recorded as the reason for no "
"Strengths and Difficulties Questionnaire (SDQ) score.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
ERRRR = (
oc2['CONTINUOUSLY_LOOKED_AFTER']
& (oc2['DOB'] + pd.DateOffset(years=4) <= start)
& (oc2['DOB'] + pd.DateOffset(years=16) >= endo)
& oc2['SDQ_SCORE'].isna()
& (oc2['SDQ_REASON'] == 'SDQ1')
)
return {'OC2': oc2[ERRRR].index.to_list()}
return error, _validate
def validate_357():
error = ErrorDefinition(
code='357',
description='If this is the first episode ever for this child, reason for new episode must be S. '
'Check whether there is an episode immediately preceding this one, which has been left out. '
'If not the reason for new episode code must be amended to S.',
affected_fields=['RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
eps = dfs['Episodes']
eps['DECOM'] = pd.to_datetime(eps['DECOM'], format='%d/%m/%Y', errors='coerce')
eps = eps.loc[eps['DECOM'].notnull()]
first_eps = eps.loc[eps.groupby('CHILD')['DECOM'].idxmin()]
errs = first_eps[first_eps['RNE'] != 'S'].index.to_list()
return {'Episodes': errs}
return error, _validate
def validate_117():
error = ErrorDefinition(
code='117',
description='Date of decision that a child should/should no longer be placed for adoption is beyond the current collection year or after the child ceased to be looked after.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_PLACED', 'DEC', 'REC', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placed_adoption = dfs['PlacedAdoption']
collection_end = dfs['metadata']['collection_end']
# datetime
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# Drop nans and continuing episodes
episodes = episodes.dropna(subset=['DECOM'])
episodes = episodes[episodes['REC'] != 'X1']
episodes = episodes.loc[episodes.groupby('CHILD')['DECOM'].idxmax()]
# prepare to merge
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
p4a_cols = ['DATE_PLACED', 'DATE_PLACED_CEASED']
# latest episodes
merged = episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
mask = (
(merged['DATE_PLACED'] > collection_end)
| (merged['DATE_PLACED'] > merged['DEC'])
| (merged['DATE_PLACED_CEASED'] > collection_end)
| (merged['DATE_PLACED_CEASED'] > merged['DEC'])
)
# If provided <DATE_PLACED> and/or <DATE_PLACED_CEASED> must not be > <COLLECTION_END_DATE> or <DEC> of latest episode where <REC> not = 'X1'
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_118():
error = ErrorDefinition(
code='118',
description='Date of decision that a child should no longer be placed for adoption is before the current collection year or before the date the child started to be looked after.',
affected_fields=['DECOM', 'DECOM', 'LS']
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
code_list = ['V3', 'V4']
# datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
filter_by_ls = episodes[~(episodes['LS'].isin(code_list))]
earliest_episode_idxs = filter_by_ls.groupby('CHILD')['DECOM'].idxmin()
earliest_episodes = episodes[episodes.index.isin(earliest_episode_idxs)]
# prepare to merge
placed_adoption.reset_index(inplace=True)
earliest_episodes.reset_index(inplace=True)
# merge
merged = earliest_episodes.merge(placed_adoption, on='CHILD', how='left', suffixes=['_eps', '_pa'])
# drop rows where DATE_PLACED_CEASED is not provided
merged = merged.dropna(subset=['DATE_PLACED_CEASED'])
# If provided <DATE_PLACED_CEASED> must not be prior to <COLLECTION_START_DATE> or <DECOM> of the earliest episode with an <LS> not = 'V3' or 'V4'
mask = (merged['DATE_PLACED_CEASED'] < merged['DECOM']) | (merged['DATE_PLACED_CEASED'] < collection_start)
# error locations
pa_error_locs = merged.loc[mask, 'index_pa']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'PlacedAdoption': pa_error_locs.unique().tolist()}
return error, _validate
def validate_352():
error = ErrorDefinition(
code='352',
description='Child who started to be looked after was aged 18 or over.',
affected_fields=['DECOM', 'RNE'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB18'] = header['DOB'] + pd.DateOffset(years=18)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
care_start = episodes_merged['RNE'].str.upper().astype(str).isin(['S'])
started_over_18 = episodes_merged['DOB18'] <= episodes_merged['DECOM']
error_mask = care_start & started_over_18
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_209():
error = ErrorDefinition(
code='209',
description='Child looked after is of school age and should not have an unknown Unique Pupil Number (UPN) code of UN1.',
affected_fields=['UPN', 'DOB']
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
collection_start = dfs['metadata']['collection_start']
# convert to datetime
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
yr = collection_start.year - 1
reference_date = pd.to_datetime('31/08/' + str(yr), format='%d/%m/%Y', errors='coerce')
# If <DOB> >= 4 years prior to 31/08/YYYY then <UPN> should not be 'UN1' Note: YYYY in this instance refers to the year prior to the collection start (for collection year 2019-2020, it would be looking at the 31/08/2018).
mask = (reference_date >= (header['DOB'] + pd.offsets.DateOffset(years=4))) & (header['UPN'] == 'UN1')
# error locations
error_locs_header = header.index[mask]
return {'Header': error_locs_header.tolist()}
return error, _validate
def validate_198():
error = ErrorDefinition(
code='198',
description="Child has not been looked after continuously for at least 12 months at 31 March but a reason "
"for no Strengths and Difficulties (SDQ) score has been completed. ",
affected_fields=['SDQ_REASON'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_REASON'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_185():
error = ErrorDefinition(
code='185',
description="Child has not been looked after continuously for at least 12 months at " +
"31 March but a Strengths and Difficulties (SDQ) score has been completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
error_mask = oc2['SDQ_SCORE'].notna() & ~oc2['CONTINUOUSLY_LOOKED_AFTER']
error_locs = oc2.index[error_mask].to_list()
return {'OC2': error_locs}
return error, _validate
def validate_186():
error = ErrorDefinition(
code='186',
description="Children aged 4 or over at the start of the year and children aged under 17 at the " +
"end of the year and who have been looked after for at least 12 months continuously " +
"should have a Strengths and Difficulties (SDQ) score completed.",
affected_fields=['SDQ_SCORE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_start_str = dfs['metadata']['collection_start']
collection_end_str = dfs['metadata']['collection_end']
collection_start = pd.to_datetime(collection_start_str, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2 = add_CLA_column(dfs, 'OC2')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
oc2['17th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=17)
error_mask = (
(oc2['4th_bday'] <= collection_start)
& (oc2['17th_bday'] > collection_end)
& oc2['CONTINUOUSLY_LOOKED_AFTER']
& oc2['SDQ_SCORE'].isna()
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_187():
error = ErrorDefinition(
code='187',
description="Child cannot be looked after continuously for 12 months at " +
"31 March (OC2) and have any of adoption or care leavers returns completed.",
affected_fields=['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR', # OC3
'IN_TOUCH', 'ACTIV', 'ACCOM'], # AD1
)
def _validate(dfs):
if (
'OC3' not in dfs
or 'AD1' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
ad1, oc3 = add_CLA_column(dfs, ['AD1', 'OC3'])
# OC3
should_be_blank = ['IN_TOUCH', 'ACTIV', 'ACCOM']
oc3_mask = oc3['CONTINUOUSLY_LOOKED_AFTER'] & oc3[should_be_blank].notna().any(axis=1)
oc3_error_locs = oc3[oc3_mask].index.to_list()
# AD1
should_be_blank = ['DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR', 'LS_ADOPTR']
ad1_mask = ad1['CONTINUOUSLY_LOOKED_AFTER'] & ad1[should_be_blank].notna().any(axis=1)
ad1_error_locs = ad1[ad1_mask].index.to_list()
return {'AD1': ad1_error_locs,
'OC3': oc3_error_locs}
return error, _validate
def validate_188():
error = ErrorDefinition(
code='188',
description="Child is aged under 4 years at the end of the year, "
"but a Strengths and Difficulties (SDQ) score or a reason "
"for no SDQ score has been completed. ",
affected_fields=['SDQ_SCORE', 'SDQ_REASON'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
oc2 = dfs['OC2']
collection_end_str = dfs['metadata']['collection_end']
collection_end = pd.to_datetime(collection_end_str, format='%d/%m/%Y', errors='coerce')
oc2['DOB_dt'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
oc2['4th_bday'] = oc2['DOB_dt'] + pd.DateOffset(years=4)
error_mask = (
(oc2['4th_bday'] > collection_end)
& oc2[['SDQ_SCORE', 'SDQ_REASON']].notna().any(axis=1)
)
oc2_errors = oc2.loc[error_mask].index.to_list()
return {'OC2': oc2_errors}
return error, _validate
def validate_190():
error = ErrorDefinition(
code='190',
description="Child has not been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been completed.",
affected_fields=['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
, # AD1
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_blank = ['CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT',
'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
mask = ~oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_blank].notna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_191():
error = ErrorDefinition(
code='191',
description="Child has been looked after continuously for at least 12 months at 31 March but one or more "
"data items relating to children looked after for 12 months have been left blank.",
affected_fields=['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'], # OC2
)
def _validate(dfs):
if (
'OC2' not in dfs
or 'Episodes' not in dfs
):
return {}
# add 'CONTINUOUSLY_LOOKED_AFTER' column
oc2 = add_CLA_column(dfs, 'OC2')
should_be_present = ['IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE']
mask = oc2['CONTINUOUSLY_LOOKED_AFTER'] & oc2[should_be_present].isna().any(axis=1)
error_locs = oc2[mask].index.to_list()
return {'OC2': error_locs}
return error, _validate
def validate_607():
error = ErrorDefinition(
code='607',
description='Child ceased to be looked after in the year, but mother field has not been completed.',
affected_fields=['DEC', 'REC', 'MOTHER', 'LS', 'SEX']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
code_list = ['V3', 'V4']
# convert to datetiime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# CEASED_TO_BE_LOOKED_AFTER = DEC is not null and REC is filled but not equal to X1
CEASED_TO_BE_LOOKED_AFTER = merged['DEC'].notna() & ((merged['REC'] != 'X1') & merged['REC'].notna())
# and <LS> not = ‘V3’ or ‘V4’
check_LS = ~(merged['LS'].isin(code_list))
# and <DEC> is in <CURRENT_COLLECTION_YEAR
check_DEC = (collection_start <= merged['DEC']) & (merged['DEC'] <= collection_end)
# Where <CEASED_TO_BE_LOOKED_AFTER> = ‘Y’, and <LS> not = ‘V3’ or ‘V4’ and <DEC> is in <CURRENT_COLLECTION_YEAR> and <SEX> = ‘2’ then <MOTHER> should be provided.
mask = CEASED_TO_BE_LOOKED_AFTER & check_LS & check_DEC & (merged['SEX'] == '2') & (merged['MOTHER'].isna())
header_error_locs = merged.loc[mask, 'index_er']
eps_error_locs = merged.loc[mask, 'index_eps']
return {'Episodes': eps_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_210():
error = ErrorDefinition(
code='210',
description='Children looked after for more than a week at 31 March should not have an unknown Unique Pupil Number (UPN) code of UN4.',
affected_fields=['UPN', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
collection_end = dfs['metadata']['collection_end']
# convert to datetime
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
yr = collection_end.year
reference_date = ref_date = pd.to_datetime('24/03/' + str(yr), format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
# the logical way is to merge left on UPN but that will be a one to many merge and may not go as well as a many to one merge that we've been doing.
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <UPN> = 'UN4' then no episode <DECOM> must be >` = 24/03/YYYY Note: YYYY refers to the current collection year.
mask = (merged['UPN'] == 'UN4') & (merged['DECOM'] >= reference_date)
# error locations
error_locs_header = merged.loc[mask, 'index_er']
error_locs_eps = merged.loc[mask, 'index_eps']
return {'Episodes': error_locs_eps.tolist(), 'Header': error_locs_header.unique().tolist()}
return error, _validate
def validate_1010():
error = ErrorDefinition(
code='1010',
description='This child has no episodes loaded for current year even though there was an open episode of '
+ 'care at the end of the previous year, and care leaver data has been entered.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Episodes_last' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
oc3 = dfs['OC3']
# convert DECOM to datetime, drop missing/invalid sort by CHILD then DECOM,
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last = episodes_last.dropna(subset=['DECOM']).sort_values(['CHILD', 'DECOM'], ascending=True)
# Keep only the final episode for each child (ie where the following row has a different CHILD value)
episodes_last = episodes_last[
episodes_last['CHILD'].shift(-1) != episodes_last['CHILD']
]
# Keep only the final episodes that were still open
episodes_last = episodes_last[episodes_last['DEC'].isna()]
# The remaining children ought to have episode data in the current year if they are in OC3
has_current_episodes = oc3['CHILD'].isin(episodes['CHILD'])
has_open_episode_last = oc3['CHILD'].isin(episodes_last['CHILD'])
error_mask = ~has_current_episodes & has_open_episode_last
validation_error_locations = oc3.index[error_mask]
return {'OC3': validation_error_locations.tolist()}
return error, _validate
def validate_525():
error = ErrorDefinition(
code='525',
description='A child for whom the decision to be placed for adoption has been reversed cannot be adopted during the year.',
affected_fields=['DATE_PLACED_CEASED', 'DATE_INT', 'DATE_MATCH', 'FOSTER_CARE', 'NB_ADOPTR', 'SEX_ADOPTR',
'LS_ADOPTR']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs or 'AD1' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
ad1 = dfs['AD1']
# prepare to merge
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = placed_adoption.merge(ad1, on='CHILD', how='left', suffixes=['_placed', '_ad1'])
# If <DATE_PLACED_CEASED> not Null, then <DATE_INT>; <DATE_MATCH>; <FOSTER_CARE>; <NB_ADOPTR>; <SEX_ADOPTR>; and <LS_ADOPTR> should not be provided
mask = merged['DATE_PLACED_CEASED'].notna() & (
merged['DATE_INT'].notna() | merged['DATE_MATCH'].notna() | merged['FOSTER_CARE'].notna() |
merged['NB_ADOPTR'].notna() | merged['SEX_ADOPTR'].notna() | merged['LS_ADOPTR'].notna())
# error locations
pa_error_locs = merged.loc[mask, 'index_placed']
ad_error_locs = merged.loc[mask, 'index_ad1']
# return result
return {'PlacedAdoption': pa_error_locs.tolist(), 'AD1': ad_error_locs.tolist()}
return error, _validate
def validate_335():
error = ErrorDefinition(
code='335',
description='The current foster value (0) suggests that child is not adopted by current foster carer, but last placement is A2, A3, or A5. Or the current foster value (1) suggests that child is adopted by current foster carer, but last placement is A1, A4 or A6.',
affected_fields=['PLACE', 'FOSTER_CARE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'AD1' not in dfs:
return {}
else:
episodes = dfs['Episodes']
ad1 = dfs['AD1']
# prepare to merge
episodes.reset_index(inplace=True)
ad1.reset_index(inplace=True)
merged = episodes.merge(ad1, on='CHILD', how='left', suffixes=['_eps', '_ad1'])
# Where <PL> = 'A2', 'A3' or 'A5' and <DEC> = 'E1', 'E11', 'E12' <FOSTER_CARE> should not be '0'; Where <PL> = ‘A1’, ‘A4’ or ‘A6’ and <REC> = ‘E1’, ‘E11’, ‘E12’ <FOSTER_CARE> should not be ‘1’.
mask = (
merged['REC'].isin(['E1', 'E11', 'E12']) & (
(merged['PLACE'].isin(['A2', 'A3', 'A5']) & (merged['FOSTER_CARE'].astype(str) == '0'))
| (merged['PLACE'].isin(['A1', 'A4', 'A6']) & (merged['FOSTER_CARE'].astype(str) == '1'))
)
)
eps_error_locs = merged.loc[mask, 'index_eps']
ad1_error_locs = merged.loc[mask, 'index_ad1']
# use .unique since join is many to one
return {'Episodes': eps_error_locs.tolist(), 'AD1': ad1_error_locs.unique().tolist()}
return error, _validate
def validate_215():
error = ErrorDefinition(
code='215',
description='Child has care leaver information but one or more data items relating to children looked after for 12 months have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM', 'CONVICTED', 'HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK',
'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE', 'INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED']
)
def _validate(dfs):
if 'OC3' not in dfs or 'OC2' not in dfs:
return {}
else:
oc3 = dfs['OC3']
oc2 = dfs['OC2']
# prepare to merge
oc3.reset_index(inplace=True)
oc2.reset_index(inplace=True)
merged = oc3.merge(oc2, on='CHILD', how='left', suffixes=['_3', '_2'])
# If any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided then <CONVICTED>; <HEALTH_CHECK>; <IMMUNISATIONS>; <TEETH_CHECK>; <HEALTH_ASSESSMENT>; <SUBSTANCE MISUSE>; <INTERVENTION_RECEIVED>; <INTERVENTION_OFFERED>; should not be provided
mask = (merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna()) & (
merged['CONVICTED'].notna() | merged['HEALTH_CHECK'].notna() | merged['IMMUNISATIONS'].notna() |
merged['TEETH_CHECK'].notna() | merged['HEALTH_ASSESSMENT'].notna() | merged[
'SUBSTANCE_MISUSE'].notna() | merged['INTERVENTION_RECEIVED'].notna() | merged[
'INTERVENTION_OFFERED'].notna())
# error locations
oc3_error_locs = merged.loc[mask, 'index_3']
oc2_error_locs = merged.loc[mask, 'index_2']
return {'OC3': oc3_error_locs.tolist(), 'OC2': oc2_error_locs.tolist()}
return error, _validate
def validate_399():
error = ErrorDefinition(
code='399',
description='Mother field, review field or participation field are completed but '
+ 'child is looked after under legal status V3 or V4.',
affected_fields=['MOTHER', 'LS', 'REVIEW', 'REVIEW_CODE']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs or 'Reviews' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
reviews = dfs['Reviews']
code_list = ['V3', 'V4']
# prepare to merge
episodes['index_eps'] = episodes.index
header['index_hdr'] = header.index
reviews['index_revs'] = reviews.index
# merge
merged = (episodes.merge(header, on='CHILD', how='left')
.merge(reviews, on='CHILD', how='left'))
# If <LS> = 'V3' or 'V4' then <MOTHER>, <REVIEW> and <REVIEW_CODE> should not be provided
mask = merged['LS'].isin(code_list) & (
merged['MOTHER'].notna() | merged['REVIEW'].notna() | merged['REVIEW_CODE'].notna())
# Error locations
eps_errors = merged.loc[mask, 'index_eps']
header_errors = merged.loc[mask, 'index_hdr'].unique()
revs_errors = merged.loc[mask, 'index_revs'].unique()
return {'Episodes': eps_errors.tolist(),
'Header': header_errors.tolist(),
'Reviews': revs_errors.tolist()}
return error, _validate
def validate_189():
error = ErrorDefinition(
code='189',
description='Child is aged 17 years or over at the beginning of the year, but an Strengths and Difficulties '
+ '(SDQ) score or a reason for no Strengths and Difficulties (SDQ) score has been completed.',
affected_fields=['DOB', 'SDQ_SCORE', 'SDQ_REASON']
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
collection_start = dfs['metadata']['collection_start']
# datetime format allows appropriate comparison between dates
oc2['DOB'] = pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# If <DOB> >17 years prior to <COLLECTION_START_DATE> then <SDQ_SCORE> and <SDQ_REASON> should not be provided
mask = ((oc2['DOB'] + pd.offsets.DateOffset(years=17)) <= collection_start) & (
oc2['SDQ_REASON'].notna() | oc2['SDQ_SCORE'].notna())
# That is, raise error if collection_start > DOB + 17years
oc_error_locs = oc2.index[mask]
return {'OC2': oc_error_locs.tolist()}
return error, _validate
def validate_226():
error = ErrorDefinition(
code='226',
description='Reason for placement change is not required.',
affected_fields=['REASON_PLACE_CHANGE', 'PLACE']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
code_list = ['T0', 'T1', 'T2', 'T3', 'T4']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
# create column to see previous REASON_PLACE_CHANGE
episodes = episodes.sort_values(['CHILD', 'DECOM'])
episodes['PREVIOUS_REASON'] = episodes.groupby('CHILD')['REASON_PLACE_CHANGE'].shift(1)
# If <PL> = 'T0'; 'T1'; 'T2'; 'T3' or 'T4' then <REASON_PLACE_CHANGE> should be null in current episode and current episode - 1
mask = episodes['PLACE'].isin(code_list) & (
episodes['REASON_PLACE_CHANGE'].notna() | episodes['PREVIOUS_REASON'].notna())
# error locations
error_locs = episodes.index[mask]
return {'Episodes': error_locs.tolist()}
return error, _validate
def validate_358():
error = ErrorDefinition(
code='358',
description='Child with this legal status should not be under 10.',
affected_fields=['DECOM', 'DOB', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['J1', 'J2', 'J3']
# convert dates to datetime format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# Where <LS> = ‘J1’ or ‘J2’ or ‘J3’ then <DOB> should <= to 10 years prior to <DECOM>
mask = merged['LS'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=10) < merged['DECOM'])
# That is, raise error if DECOM > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_407():
error = ErrorDefinition(
code='407',
description='Reason episode ceased is Special Guardianship Order, but child has reached age 18.',
affected_fields=['DEC', 'DOB', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'Header' not in dfs:
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
code_list = ['E45', 'E46', 'E47', 'E48']
# convert dates to datetime format
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er'])
# If <REC> = ‘E45’ or ‘E46’ or ‘E47’ or ‘E48’ then <DOB> must be < 18 years prior to <DEC>
mask = merged['REC'].isin(code_list) & (merged['DOB'] + pd.offsets.DateOffset(years=18) < merged['DEC'])
# That is, raise error if DEC > DOB + 10years
# error locations
header_error_locs = merged.loc[mask, 'index_er']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'Header': header_error_locs.unique().tolist()}
return error, _validate
def validate_1007():
error = ErrorDefinition(
code='1007',
description='Care leaver information is not required for 17- or 18-year olds who are still looked after.',
affected_fields=['DEC', 'REC', 'DOB', 'IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_end = dfs['metadata']['collection_end']
# convert dates to datetime format
oc3['DOB'] = pd.to_datetime(oc3['DOB'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
# prepare to merge
episodes.reset_index(inplace=True)
oc3.reset_index(inplace=True)
merged = episodes.merge(oc3, on='CHILD', how='left', suffixes=['_eps', '_oc3'])
# If <DOB> < 19 and >= to 17 years prior to <COLLECTION_END_DATE> and current episode <DEC> and or <REC> not provided then <IN_TOUCH>, <ACTIV> and <ACCOM> should not be provided
check_age = (merged['DOB'] + pd.offsets.DateOffset(years=17) <= collection_end) & (
merged['DOB'] + pd.offsets.DateOffset(years=19) > collection_end)
# That is, check that 17<=age<19
check_dec_rec = merged['REC'].isna() | merged['DEC'].isna()
# if either DEC or REC are absent
mask = check_age & check_dec_rec & (
merged['IN_TOUCH'].notna() | merged['ACTIV'].notna() | merged['ACCOM'].notna())
# Then raise an error if either IN_TOUCH, ACTIV, or ACCOM have been provided too
# error locations
oc3_error_locs = merged.loc[mask, 'index_oc3']
episode_error_locs = merged.loc[mask, 'index_eps']
# one to many join implies use .unique on the 'one'
return {'Episodes': episode_error_locs.tolist(), 'OC3': oc3_error_locs.unique().tolist()}
return error, _validate
def validate_442():
error = ErrorDefinition(
code='442',
description='Unique Pupil Number (UPN) field is not completed.',
affected_fields=['UPN', 'LS']
)
def _validate(dfs):
if ('Episodes' not in dfs) or ('Header' not in dfs):
return {}
else:
episodes = dfs['Episodes']
header = dfs['Header']
episodes.reset_index(inplace=True)
header.reset_index(inplace=True)
code_list = ['V3', 'V4']
# merge left on episodes to get all children for which episodes have been recorded even if they do not exist on the header.
merged = episodes.merge(header, on=['CHILD'], how='left', suffixes=['_eps', '_er'])
# Where any episode present, with an <LS> not = 'V3' or 'V4' then <UPN> must be provided
mask = (~merged['LS'].isin(code_list)) & merged['UPN'].isna()
episode_error_locs = merged.loc[mask, 'index_eps']
header_error_locs = merged.loc[mask, 'index_er']
return {'Episodes': episode_error_locs.tolist(),
# Select unique values since many episodes are joined to one header
# and multiple errors will be raised for the same index.
'Header': header_error_locs.dropna().unique().tolist()}
return error, _validate
def validate_344():
error = ErrorDefinition(
code='344',
description='The record shows the young person has died or returned home to live with parent(s) or someone with parental responsibility for a continuous period of 6 months or more, but activity and/or accommodation on leaving care have been completed.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'DIED' or 'RHOM' then <ACTIV> and <ACCOM> should not be provided
mask = ((oc3['IN_TOUCH'] == 'DIED') | (oc3['IN_TOUCH'] == 'RHOM')) & (
oc3['ACTIV'].notna() | oc3['ACCOM'].notna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_345():
error = ErrorDefinition(
code='345',
description='The data collection record shows the local authority is in touch with this young person, but activity and/or accommodation data items are zero.',
affected_fields=['IN_TOUCH', 'ACTIV', 'ACCOM']
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
# If <IN_TOUCH> = 'Yes' then <ACTIV> and <ACCOM> must be provided
mask = (oc3['IN_TOUCH'] == 'YES') & (oc3['ACTIV'].isna() | oc3['ACCOM'].isna())
error_locations = oc3.index[mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_384():
error = ErrorDefinition(
code='384',
description='A child receiving respite care cannot be in a long-term foster placement ',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# Where <LS> = 'V3' or 'V4' then <PL> must not be 'U1' or 'U4'
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
(episodes['PLACE'] == 'U1') | (episodes['PLACE'] == 'U4'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_390():
error = ErrorDefinition(
code='390',
description='Reason episode ceased is adopted but child has not been previously placed for adoption.',
affected_fields=['PLACE', 'REC']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# If <REC> = 'E11' or 'E12' then <PL> must be one of 'A3', 'A4', 'A5' or 'A6'
mask = ((episodes['REC'] == 'E11') | (episodes['REC'] == 'E12')) & ~(
(episodes['PLACE'] == 'A3') | (episodes['PLACE'] == 'A4') | (episodes['PLACE'] == 'A5') | (
episodes['PLACE'] == 'A6'))
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_378():
error = ErrorDefinition(
code='378',
description='A child who is placed with parent(s) cannot be looked after under a single period of accommodation under Section 20 of the Children Act 1989.',
affected_fields=['PLACE', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
# the & sign supercedes the ==, so brackets are necessary here
mask = (episodes['PLACE'] == 'P1') & (episodes['LS'] == 'V2')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_398():
error = ErrorDefinition(
code='398',
description='Distance field completed but child looked after under legal status V3 or V4.',
affected_fields=['LS', 'HOME_POST', 'PL_POST']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = ((episodes['LS'] == 'V3') | (episodes['LS'] == 'V4')) & (
episodes['HOME_POST'].notna() | episodes['PL_POST'].notna())
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_451():
error = ErrorDefinition(
code='451',
description='Child is still freed for adoption, but freeing orders could not be applied for since 30 December 2005.',
affected_fields=['DEC', 'REC', 'LS']
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = episodes['DEC'].isna() & episodes['REC'].isna() & (episodes['LS'] == 'D1')
error_locations = episodes.index[mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_519():
error = ErrorDefinition(
code='519',
description='Data entered on the legal status of adopters shows civil partnership couple, but data entered on genders of adopters does not show it as a couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
mask = (ad1['LS_ADOPTR'] == 'L2') & (
(ad1['SEX_ADOPTR'] != 'MM') & (ad1['SEX_ADOPTR'] != 'FF') & (ad1['SEX_ADOPTR'] != 'MF'))
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_520():
error = ErrorDefinition(
code='520',
description='Data entry on the legal status of adopters shows different gender married couple but data entry on genders of adopters shows it as a same gender couple.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR']
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
ad1 = dfs['AD1']
# check condition
mask = (ad1['LS_ADOPTR'] == 'L11') & (ad1['SEX_ADOPTR'] != 'MF')
error_locations = ad1.index[mask]
return {'AD1': error_locations.to_list()}
return error, _validate
def validate_522():
error = ErrorDefinition(
code='522',
description='Date of decision that the child should be placed for adoption must be on or before the date that a child should no longer be placed for adoption.',
affected_fields=['DATE_PLACED', 'DATE_PLACED_CEASED']
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
# Convert to datetimes
placed_adoption['DATE_PLACED_CEASED'] = pd.to_datetime(placed_adoption['DATE_PLACED_CEASED'],
format='%d/%m/%Y', errors='coerce')
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# Boolean mask
mask = placed_adoption['DATE_PLACED_CEASED'] > placed_adoption['DATE_PLACED']
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_563():
error = ErrorDefinition(
code='563',
description='The child should no longer be placed for adoption but the date of the decision that the child should be placed for adoption is blank',
affected_fields=['DATE_PLACED', 'REASON_PLACED_CEASED', 'DATE_PLACED_CEASED'],
)
def _validate(dfs):
if 'PlacedAdoption' not in dfs:
return {}
else:
placed_adoption = dfs['PlacedAdoption']
mask = placed_adoption['REASON_PLACED_CEASED'].notna() & placed_adoption['DATE_PLACED_CEASED'].notna() & \
placed_adoption['DATE_PLACED'].isna()
error_locations = placed_adoption.index[mask]
return {'PlacedAdoption': error_locations.to_list()}
return error, _validate
def validate_544():
error = ErrorDefinition(
code='544',
description="Any child who has conviction information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['CONVICTED', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
convict = oc2['CONVICTED'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = convict & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_634():
error = ErrorDefinition(
code='634',
description='There are entries for previous permanence options, but child has not started to be looked after from 1 April 2016 onwards.',
affected_fields=['LA_PERM', 'PREV_PERM', 'DATE_PERM', 'DECOM']
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PrevPerm' not in dfs:
return {}
else:
episodes = dfs['Episodes']
prevperm = dfs['PrevPerm']
collection_start = dfs['metadata']['collection_start']
# convert date field to appropriate format
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
# the maximum date has the highest possibility of satisfying the condition
episodes['LAST_DECOM'] = episodes.groupby('CHILD')['DECOM'].transform('max')
# prepare to merge
episodes.reset_index(inplace=True)
prevperm.reset_index(inplace=True)
merged = prevperm.merge(episodes, on='CHILD', how='left', suffixes=['_prev', '_eps'])
# If <PREV_PERM> or <LA_PERM> or <DATE_PERM> provided, then at least 1 episode must have a <DECOM> later than 01/04/2016
mask = (merged['PREV_PERM'].notna() | merged['DATE_PERM'].notna() | merged['LA_PERM'].notna()) & (
merged['LAST_DECOM'] < collection_start)
eps_error_locs = merged.loc[mask, 'index_eps']
prevperm_error_locs = merged.loc[mask, 'index_prev']
# return {'PrevPerm':prevperm_error_locs}
return {'Episodes': eps_error_locs.unique().tolist(), 'PrevPerm': prevperm_error_locs.unique().tolist()}
return error, _validate
def validate_158():
error = ErrorDefinition(
code='158',
description='If a child has been recorded as receiving an intervention for their substance misuse problem, then the additional item on whether an intervention was offered should be left blank.',
affected_fields=['INTERVENTION_RECEIVED', 'INTERVENTION_OFFERED'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
error_mask = oc2['INTERVENTION_RECEIVED'].astype(str).eq('1') & oc2['INTERVENTION_OFFERED'].notna()
error_locations = oc2.index[error_mask]
return {'OC2': error_locations.tolist()}
return error, _validate
def validate_133():
error = ErrorDefinition(
code='133',
description='Data entry for accommodation after leaving care is invalid. If reporting on a childs accommodation after leaving care the data entry must be valid',
affected_fields=['ACCOM'],
)
def _validate(dfs):
if 'OC3' not in dfs:
return {}
else:
oc3 = dfs['OC3']
valid_codes = ['B1', 'B2', 'C1', 'C2', 'D1', 'D2', 'E1', 'E2', 'G1', 'G2', 'H1', 'H2', 'K1', 'K2', 'R1',
'R2', 'S2', 'T1', 'T2', 'U1', 'U2', 'V1', 'V2', 'W1', 'W2', 'X2', 'Y1', 'Y2', 'Z1', 'Z2',
'0']
error_mask = ~oc3['ACCOM'].isna() & ~oc3['ACCOM'].isin(valid_codes)
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.tolist()}
return error, _validate
def validate_565():
error = ErrorDefinition(
code='565',
description='The date that the child started to be missing or away from placement without authorisation has been completed but whether the child was missing or away from placement without authorisation has not been completed.',
affected_fields=['MISSING', 'MIS_START']
)
def _validate(dfs):
if 'Missing' not in dfs:
return {}
else:
missing = dfs['Missing']
mask = missing['MIS_START'].notna() & missing['MISSING'].isna()
error_locations = missing.index[mask]
return {'Missing': error_locations.to_list()}
return error, _validate
def validate_433():
error = ErrorDefinition(
code='433',
description='The reason for new episode suggests that this is a continuation episode, but the episode does not start on the same day as the last episode finished.',
affected_fields=['RNE', 'DECOM'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['original_index'] = episodes.index
episodes.sort_values(['CHILD', 'DECOM', 'DEC'], inplace=True)
episodes[['PREVIOUS_DEC', 'PREVIOUS_CHILD']] = episodes[['DEC', 'CHILD']].shift(1)
rne_is_ongoing = episodes['RNE'].str.upper().astype(str).isin(['P', 'L', 'T', 'U', 'B'])
date_mismatch = episodes['PREVIOUS_DEC'] != episodes['DECOM']
missing_date = episodes['PREVIOUS_DEC'].isna() | episodes['DECOM'].isna()
same_child = episodes['PREVIOUS_CHILD'] == episodes['CHILD']
error_mask = rne_is_ongoing & (date_mismatch | missing_date) & same_child
error_locations = episodes['original_index'].loc[error_mask].sort_values()
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_437():
error = ErrorDefinition(
code='437',
description='Reason episode ceased is child has died or is aged 18 or over but there are further episodes.',
affected_fields=['REC'],
)
# !# potential false negatives, as this only operates on the current year's data
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes.sort_values(['CHILD', 'DECOM'], inplace=True)
episodes[['NEXT_DECOM', 'NEXT_CHILD']] = episodes[['DECOM', 'CHILD']].shift(-1)
# drop rows with missing DECOM as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
ceased_e2_e15 = episodes['REC'].str.upper().astype(str).isin(['E2', 'E15'])
has_later_episode = episodes['CHILD'] == episodes['NEXT_CHILD']
error_mask = ceased_e2_e15 & has_later_episode
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_547():
error = ErrorDefinition(
code='547',
description="Any child who has health promotion information completed must also have immunisation, teeth check, health assessment and substance misuse problem identified fields completed.",
affected_fields=['HEALTH_CHECK', 'IMMUNISATIONS', 'TEETH_CHECK', 'HEALTH_ASSESSMENT', 'SUBSTANCE_MISUSE'],
)
def _validate(dfs):
if 'OC2' not in dfs:
return {}
else:
oc2 = dfs['OC2']
healthck = oc2['HEALTH_CHECK'].astype(str) == '1'
immunisations = oc2['IMMUNISATIONS'].isna()
teeth_ck = oc2['TEETH_CHECK'].isna()
health_ass = oc2['HEALTH_ASSESSMENT'].isna()
sub_misuse = oc2['SUBSTANCE_MISUSE'].isna()
error_mask = healthck & (immunisations | teeth_ck | health_ass | sub_misuse)
validation_error_locations = oc2.index[error_mask]
return {'OC2': validation_error_locations.to_list()}
return error, _validate
def validate_635():
error = ErrorDefinition(
code='635',
description='There are entries for date of order and local authority code where previous permanence option was arranged but previous permanence code is Z1',
affected_fields=['LA_PERM', 'DATE_PERM', 'PREV_PERM']
)
def _validate(dfs):
if 'PrevPerm' not in dfs:
return {}
else:
prev_perm = dfs['PrevPerm']
# raise and error if either LA_PERM or DATE_PERM are present, yet PREV_PERM is absent.
mask = ((prev_perm['LA_PERM'].notna() | prev_perm['DATE_PERM'].notna()) & prev_perm['PREV_PERM'].isna())
error_locations = prev_perm.index[mask]
return {'PrevPerm': error_locations.to_list()}
return error, _validate
def validate_550():
error = ErrorDefinition(
code='550',
description='A placement provider code of PR0 can only be associated with placement P1.',
affected_fields=['PLACE', 'PLACE_PROVIDER'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
mask = (episodes['PLACE'] != 'P1') & episodes['PLACE_PROVIDER'].eq('PR0')
validation_error_locations = episodes.index[mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_217():
error = ErrorDefinition(
code='217',
description='Children who are placed for adoption with current foster carers (placement types A3 or A5) must have a reason for new episode of S, T or U.',
affected_fields=['PLACE', 'DECOM', 'RNE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
max_decom_allowed = pd.to_datetime('01/04/2015', format='%d/%m/%Y', errors='coerce')
reason_new_ep = ['S', 'T', 'U']
place_codes = ['A3', 'A5']
mask = (episodes['PLACE'].isin(place_codes) & (episodes['DECOM'] >= max_decom_allowed)) & ~episodes[
'RNE'].isin(reason_new_ep)
validation_error_mask = mask
validation_error_locations = episodes.index[validation_error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_518():
error = ErrorDefinition(
code='518',
description='If reporting legal status of adopters is L4 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L4') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_517():
error = ErrorDefinition(
code='517',
description='If reporting legal status of adopters is L3 then the genders of adopters should be coded as MF. MF = the adopting couple are male and female.',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L3') & ~AD1['SEX_ADOPTR'].isin(['MF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_558():
error = ErrorDefinition(
code='558',
description='If a child has been adopted, then the decision to place them for adoption has not been disrupted and the date of the decision that a child should no longer be placed for adoption should be left blank. if the REC code is either E11 or E12 then the DATE PLACED CEASED date should not be provided',
affected_fields=['DATE_PLACED_CEASED', 'REC'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
rec_codes = ['E11', 'E12']
placeEpisodes = episodes[episodes['REC'].isin(rec_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED_CEASED'].notna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_453():
error = ErrorDefinition(
code='453',
description='Contradiction between placement distance in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_DISTANCE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['PL_DISTANCE'] = pd.to_numeric(episodes['PL_DISTANCE'], errors='coerce')
episodes_last['PL_DISTANCE'] = pd.to_numeric(episodes_last['PL_DISTANCE'], errors='coerce')
# drop rows with missing DECOM before finding idxmin/max, as invalid/missing values can lead to errors
episodes = episodes.dropna(subset=['DECOM'])
episodes_last = episodes_last.dropna(subset=['DECOM'])
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_dist = abs(episodes_merged['PL_DISTANCE'] - episodes_merged['PL_DISTANCE_last']) >= 0.2
error_mask = in_both_years & same_rne & last_year_open & different_pl_dist
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_516():
error = ErrorDefinition(
code='516',
description='The episode data submitted for this child does not show that he/she was with their former foster carer(s) during the year.If the code in the reason episode ceased is E45 or E46 the child must have a placement code of U1 to U6.',
affected_fields=['REC', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
else:
episodes = dfs['Episodes']
place_codes = ['U1', 'U2', 'U3', 'U4', 'U5', 'U6']
rec_codes = ['E45', 'E46']
error_mask = episodes['REC'].isin(rec_codes) & ~episodes['PLACE'].isin(place_codes)
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_511():
error = ErrorDefinition(
code='511',
description='If reporting that the number of person(s) adopting the looked after child is two adopters then the code should only be MM, FF or MF. MM = the adopting couple are both males; FF = the adopting couple are both females; MF = The adopting couple are male and female.',
affected_fields=['NB_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
mask = AD1['NB_ADOPTR'].astype(str).eq('2') & AD1['SEX_ADOPTR'].isin(['M1', 'F1'])
validation_error_mask = mask
validation_error_locations = AD1.index[validation_error_mask]
return {'AD1': validation_error_locations.tolist()}
return error, _validate
def validate_524():
error = ErrorDefinition(
code='524',
description='If reporting legal status of adopters is L12 then the genders of adopters should be coded as MM or FF. MM = the adopting couple are both males. FF = the adopting couple are both females',
affected_fields=['LS_ADOPTR', 'SEX_ADOPTR'],
)
def _validate(dfs):
if 'AD1' not in dfs:
return {}
else:
AD1 = dfs['AD1']
error_mask = AD1['LS_ADOPTR'].eq('L12') & ~AD1['SEX_ADOPTR'].isin(['MM', 'FF'])
error_locations = AD1.index[error_mask]
return {'AD1': error_locations.tolist()}
return error, _validate
def validate_441():
error = ErrorDefinition(
code='441',
description='Participation method indicates child was 4 years old or over at the time of the review, but the date of birth and review date indicates the child was under 4 years old.',
affected_fields=['DOB', 'REVIEW', 'REVIEW_CODE'],
)
def _validate(dfs):
if 'Reviews' not in dfs:
return {}
else:
reviews = dfs['Reviews']
reviews['DOB'] = pd.to_datetime(reviews['DOB'], format='%d/%m/%Y', errors='coerce')
reviews['REVIEW'] = pd.to_datetime(reviews['REVIEW'], format='%d/%m/%Y', errors='coerce')
reviews = reviews.dropna(subset=['REVIEW', 'DOB'])
mask = reviews['REVIEW_CODE'].isin(['PN1', 'PN2', 'PN3', 'PN4', 'PN5', 'PN6', 'PN7']) & (
reviews['REVIEW'] < reviews['DOB'] + pd.offsets.DateOffset(years=4))
validation_error_mask = mask
validation_error_locations = reviews.index[validation_error_mask]
return {'Reviews': validation_error_locations.tolist()}
return error, _validate
def validate_184():
error = ErrorDefinition(
code='184',
description='Date of decision that a child should be placed for adoption is before the child was born.',
affected_fields=['DATE_PLACED', # PlacedAdoptino
'DOB'], # Header
)
def _validate(dfs):
if 'Header' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
child_record = dfs['Header']
placed_for_adoption = dfs['PlacedAdoption']
all_data = (placed_for_adoption
.reset_index()
.merge(child_record, how='left', on='CHILD', suffixes=[None, '_P4A']))
all_data['DATE_PLACED'] = pd.to_datetime(all_data['DATE_PLACED'], format='%d/%m/%Y', errors='coerce')
all_data['DOB'] = pd.to_datetime(all_data['DOB'], format='%d/%m/%Y', errors='coerce')
mask = (all_data['DATE_PLACED'] >= all_data['DOB']) | all_data['DATE_PLACED'].isna()
validation_error = ~mask
validation_error_locations = all_data[validation_error]['index'].unique()
return {'PlacedAdoption': validation_error_locations.tolist()}
return error, _validate
def validate_612():
error = ErrorDefinition(
code='612',
description="Date of birth field has been completed but mother field indicates child is not a mother.",
affected_fields=['SEX', 'MOTHER', 'MC_DOB'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
else:
header = dfs['Header']
error_mask = (
((header['MOTHER'].astype(str) == '0') | header['MOTHER'].isna())
& (header['SEX'].astype(str) == '2')
& header['MC_DOB'].notna()
)
validation_error_locations = header.index[error_mask]
return {'Header': validation_error_locations.tolist()}
return error, _validate
def validate_552():
"""
This error checks that the first adoption episode is after the last decision !
If there are multiple of either there may be unexpected results !
"""
error = ErrorDefinition(
code="552",
description="Date of Decision to place a child for adoption should be on or prior to the date that the child was placed for adoption.",
# Field that defines date of decision to place a child for adoption is DATE_PLACED and the start of adoption is defined by DECOM with 'A' placement types.
affected_fields=['DATE_PLACED', 'DECOM'],
)
def _validate(dfs):
if ('PlacedAdoption' not in dfs) or ('Episodes' not in dfs):
return {}
else:
# get the required datasets
placed_adoption = dfs['PlacedAdoption']
episodes = dfs['Episodes']
# keep index values so that they stay the same when needed later on for error locations
placed_adoption.reset_index(inplace=True)
episodes.reset_index(inplace=True)
adoption_eps = episodes[episodes['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])].copy()
# find most recent adoption decision
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format='%d/%m/%Y',
errors='coerce')
# remove rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption['DATE_PLACED'].notna()]
placed_adoption_inds = placed_adoption.groupby('CHILD')['DATE_PLACED'].idxmax(skipna=True)
last_decision = placed_adoption.loc[placed_adoption_inds]
# first time child started adoption
adoption_eps["DECOM"] = pd.to_datetime(adoption_eps['DECOM'], format='%d/%m/%Y', errors='coerce')
adoption_eps = adoption_eps[adoption_eps['DECOM'].notna()]
adoption_eps_inds = adoption_eps.groupby('CHILD')['DECOM'].idxmin(skipna=True)
# full information of first adoption
first_adoption = adoption_eps.loc[adoption_eps_inds]
# date of decision and date of start of adoption (DECOM) have to be put in one table
merged = first_adoption.merge(last_decision, on=['CHILD'], how='left', suffixes=['_EP', '_PA'])
# check to see if date of decision to place is less than or equal to date placed.
decided_after_placed = merged["DECOM"] < merged["DATE_PLACED"]
# find the corresponding location of error values per file.
episode_error_locs = merged.loc[decided_after_placed, 'index_EP']
placedadoption_error_locs = merged.loc[decided_after_placed, 'index_PA']
return {"PlacedAdoption": placedadoption_error_locs.to_list(), "Episodes": episode_error_locs.to_list()}
return error, _validate
def validate_551():
error = ErrorDefinition(
code='551',
description='Child has been placed for adoption but there is no date of the decision that the child should be placed for adoption.',
affected_fields=['DATE_PLACED', 'PLACE'],
)
def _validate(dfs):
if 'Episodes' not in dfs or 'PlacedAdoption' not in dfs:
return {}
else:
episodes = dfs['Episodes']
placedAdoptions = dfs['PlacedAdoption']
episodes = episodes.reset_index()
place_codes = ['A3', 'A4', 'A5', 'A6']
placeEpisodes = episodes[episodes['PLACE'].isin(place_codes)]
merged = placeEpisodes.merge(placedAdoptions, how='left', on='CHILD').set_index('index')
episodes_with_errors = merged[merged['DATE_PLACED'].isna()]
error_mask = episodes.index.isin(episodes_with_errors.index)
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_557():
error = ErrorDefinition(
code='557',
description="Child for whom the decision was made that they should be placed for adoption has left care " +
"but was not adopted and information on the decision that they should no longer be placed for " +
"adoption items has not been completed.",
affected_fields=['DATE_PLACED_CEASED', 'REASON_PLACED_CEASED', # PlacedAdoption
'PLACE', 'LS', 'REC'], # Episodes
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'PlacedAdoption' not in dfs:
return {}
else:
eps = dfs['Episodes']
placed = dfs['PlacedAdoption']
eps = eps.reset_index()
placed = placed.reset_index()
child_placed = eps['PLACE'].isin(['A3', 'A4', 'A5', 'A6'])
order_granted = eps['LS'].isin(['D1', 'E1'])
not_adopted = ~eps['REC'].isin(['E11', 'E12']) & eps['REC'].notna()
placed['ceased_incomplete'] = (
placed['DATE_PLACED_CEASED'].isna() | placed['REASON_PLACED_CEASED'].isna()
)
eps = eps[(child_placed | order_granted) & not_adopted]
eps = eps.merge(placed, on='CHILD', how='left', suffixes=['_EP', '_PA'], indicator=True)
eps = eps[(eps['_merge'] == 'left_only') | eps['ceased_incomplete']]
EP_errors = eps['index_EP']
PA_errors = eps['index_PA'].dropna()
return {
'Episodes': EP_errors.to_list(),
'PlacedAdoption': PA_errors.to_list(),
}
return error, _validate
def validate_207():
error = ErrorDefinition(
code='207',
description='Mother status for the current year disagrees with the mother status already recorded for this child.',
affected_fields=['MOTHER'],
)
def _validate(dfs):
if 'Header' not in dfs or 'Header_last' not in dfs:
return {}
else:
header = dfs['Header']
header_last = dfs['Header_last']
header_merged = header.reset_index().merge(header_last, how='left', on=['CHILD'], suffixes=('', '_last'),
indicator=True).set_index('index')
in_both_years = header_merged['_merge'] == 'both'
mother_is_different = header_merged['MOTHER'].astype(str) != header_merged['MOTHER_last'].astype(str)
mother_was_true = header_merged['MOTHER_last'].astype(str) == '1'
error_mask = in_both_years & mother_is_different & mother_was_true
error_locations = header.index[error_mask]
return {'Header': error_locations.to_list()}
return error, _validate
def validate_523():
error = ErrorDefinition(
code='523',
description="Date of decision that the child should be placed for adoption should be the same date as the decision that adoption is in the best interest (date should be placed).",
affected_fields=['DATE_PLACED', 'DATE_INT'],
)
def _validate(dfs):
if ("AD1" not in dfs) or ("PlacedAdoption" not in dfs):
return {}
else:
placed_adoption = dfs["PlacedAdoption"]
ad1 = dfs["AD1"]
# keep initial index values to be reused for locating errors later on.
placed_adoption.reset_index(inplace=True)
ad1.reset_index(inplace=True)
# convert to datetime to enable comparison
placed_adoption['DATE_PLACED'] = pd.to_datetime(placed_adoption['DATE_PLACED'], format="%d/%m/%Y",
errors='coerce')
ad1["DATE_INT"] = pd.to_datetime(ad1['DATE_INT'], format='%d/%m/%Y', errors='coerce')
# drop rows where either of the required values have not been filled.
placed_adoption = placed_adoption[placed_adoption["DATE_PLACED"].notna()]
ad1 = ad1[ad1["DATE_INT"].notna()]
# bring corresponding values together from both dataframes
merged_df = placed_adoption.merge(ad1, on=['CHILD'], how='inner', suffixes=["_AD", "_PA"])
# find error values
different_dates = merged_df['DATE_INT'] != merged_df['DATE_PLACED']
# map error locations to corresponding indices
pa_error_locations = merged_df.loc[different_dates, 'index_PA']
ad1_error_locations = merged_df.loc[different_dates, 'index_AD']
return {"PlacedAdoption": pa_error_locations.to_list(), "AD1": ad1_error_locations.to_list()}
return error, _validate
def validate_3001():
error = ErrorDefinition(
code='3001',
description='Where care leavers information is being returned for a young person around their 17th birthday, the accommodation cannot be with their former foster carer(s).',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
oc3 = dfs['OC3']
collection_start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
header['DOB17'] = header['DOB'] + pd.DateOffset(years=17)
oc3_merged = oc3.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
accom_foster = oc3_merged['ACCOM'].str.upper().astype(str).isin(['Z1', 'Z2'])
age_17_in_year = (oc3_merged['DOB17'] <= collection_end) & (oc3_merged['DOB17'] >= collection_start)
error_mask = accom_foster & age_17_in_year
error_locations = oc3.index[error_mask]
return {'OC3': error_locations.to_list()}
return error, _validate
def validate_389():
error = ErrorDefinition(
code='389',
description='Reason episode ceased is that child transferred to care of adult social care services, but child is aged under 16.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB16'] = header['DOB'] + pd.DateOffset(years=16)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_asc = episodes_merged['REC'].str.upper().astype(str).isin(['E7'])
ceased_over_16 = episodes_merged['DOB16'] <= episodes_merged['DEC']
error_mask = ceased_asc & ~ceased_over_16
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_387():
error = ErrorDefinition(
code='387',
description='Reason episode ceased is child moved into independent living arrangement, but the child is aged under 14.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
header['DOB14'] = header['DOB'] + pd.DateOffset(years=14)
episodes_merged = episodes.reset_index().merge(header, how='left', on=['CHILD'], suffixes=('', '_header'),
indicator=True).set_index('index')
ceased_indep = episodes_merged['REC'].str.upper().astype(str).isin(['E5', 'E6'])
ceased_over_14 = episodes_merged['DOB14'] <= episodes_merged['DEC']
dec_present = episodes_merged['DEC'].notna()
error_mask = ceased_indep & ~ceased_over_14 & dec_present
error_locations = episodes.index[error_mask]
return {'Episodes': error_locations.to_list()}
return error, _validate
def validate_452():
error = ErrorDefinition(
code='452',
description='Contradiction between local authority of placement code in the last episode of the previous year and in the first episode of the current year.',
affected_fields=['PL_LA'],
)
def _validate(dfs):
if 'Episodes' not in dfs:
return {}
if 'Episodes_last' not in dfs:
return {}
else:
episodes = dfs['Episodes']
episodes_last = dfs['Episodes_last']
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_last['DECOM'] = pd.to_datetime(episodes_last['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes_min = episodes.groupby('CHILD')['DECOM'].idxmin()
episodes_last_max = episodes_last.groupby('CHILD')['DECOM'].idxmax()
episodes = episodes[episodes.index.isin(episodes_min)]
episodes_last = episodes_last[episodes_last.index.isin(episodes_last_max)]
episodes_merged = episodes.reset_index().merge(episodes_last, how='left', on=['CHILD'],
suffixes=('', '_last'), indicator=True).set_index('index')
in_both_years = episodes_merged['_merge'] == 'both'
same_rne = episodes_merged['RNE'] == episodes_merged['RNE_last']
last_year_open = episodes_merged['DEC_last'].isna()
different_pl_la = episodes_merged['PL_LA'].astype(str) != episodes_merged['PL_LA_last'].astype(str)
error_mask = in_both_years & same_rne & last_year_open & different_pl_la
validation_error_locations = episodes.index[error_mask]
return {'Episodes': validation_error_locations.tolist()}
return error, _validate
def validate_386():
error = ErrorDefinition(
code='386',
description='Reason episode ceased is adopted but child has reached age 18.',
affected_fields=['REC'],
)
def _validate(dfs):
if 'Header' not in dfs:
return {}
if 'Episodes' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
header['DOB'] = pd.to_datetime(header['DOB'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = | pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 16:27:56 2019
@author: skoebric
"""
import pandas as pd
import numpy as np
import json
xl = | pd.ExcelFile('/Users/skoebric/Documents/NREL-GitHub/dGen/new_mexican_tariffs.xlsx') | pandas.ExcelFile |
try:
from models import DNN_CV
# from sklego.metrics import p_percent_score
except:
from project2.api.models import DNN_CV
# from project2.sklego.metrics import p_percent_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.utils import resample
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
class Policy:
""" A policy for treatment/vaccination. """
def __init__(self, n_actions, action_set, plot_fairness=False,
num_top_features=10, seed=1, n_jobs=-1, fairness=False):
""" Initialise.
Args:
n_actions (int): the number of actions
action_set (list): the set of actions
"""
self.n_actions = n_actions
self.action_set = action_set
print("Initialising policy with ", n_actions, "actions")
print("A = {", action_set, "}")
self.vaccines = [f'Vaccine{i}' for i in range(1, n_actions + 1)]
self.symptoms = ['Covid-Recovered', 'Covid-Positive',
'No_Taste/Smell', 'Fever', 'Headache',
'Pneumonia', 'Stomach', 'Myocarditis',
'Blood-Clots']
self.comorbidities = ['Asthma', 'Obesity', 'Smoking', 'Diabetes',
'Heart-disease', 'Hypertension']
self.full_training_idxs = ['Age', 'Gender', 'Income'] + \
[f'g{i}' for i in range(1, 128 + 1)] + \
self.comorbidities + self.vaccines
self.vaccination_stage = 0
self.response_parameter = "Death"
self.search_pipeline = None
self.seed = seed
self.n_jobs = n_jobs
# from get_action step
self.not_vaccinated = None
self.saved_vaccinated = pd.DataFrame()
self.saved_dead_individuals = pd.DataFrame()
# random expected utility estimations
self.random_expected_utilities = []
# from observe step
self.observed_utilities = []
self.observed_expected_utilities = []
self.observed_expected_utility = None
# from get_utility step
self.ML_expected_utilities = []
self.ML_expected_utility = None
# step checker
self.was_observed = 0
# list containing the policy decision
self.policy_decisions = []
self.last_policy = None
# Fairness
self.plot_fairness = plot_fairness
self.fairness = fairness
# for sensitivity study
self.num_top_features = num_top_features
# saved values for plotting
self.le30_v1 = []
self.le30_v2 = []
self.le30_v3 = []
self.bt3060_v1 = []
self.bt3060_v2 = []
self.bt3060_v3 = []
self.geq60_v1 = []
self.geq60_v2 = []
self.geq60_v3 = []
self.female_v1 = []
self.female_v2 = []
self.female_v3 = []
self.male_v1 = []
self.male_v2 = []
self.male_v3 = []
self.bt0k10k_v1 = []
self.geq10k_v1 = []
self.bt0k10k_v2 = []
self.geq10k_v2 = []
self.bt0k10k_v3 = []
self.geq10k_v3 = []
self.pa_v1 = []
self.pa_v2 = []
self.pa_v3 = []
self.py_a_v1 = []
self.py_a_v2 = []
self.py_a_v3 = []
self.pav1_y = []
self.pav2_y = []
self.pav3_y = []
self.pa_z_bt3060_v1 = []
self.pa_z_geq60_v1 = []
self.pa_z_leq30_v1 = []
self.pa_z_bt3060_v2 = []
self.pa_z_geq60_v2 = []
self.pa_z_leq30_v2 = []
self.pa_z_bt3060_v3 = []
self.pa_z_geq60_v3 = []
self.pa_z_leq30_v3 = []
self.pa_z_female_v1 = []
self.pa_z_female_v2 = []
self.pa_z_female_v3 = []
self.pa_z_male_v1 = []
self.pa_z_male_v2 = []
self.pa_z_male_v3 = []
self.pa_z_bt0k10k_v1 = []
self.pa_z_geq10k_v1 = []
self.pa_z_bt0k10k_v2 = []
self.pa_z_geq10k_v2 = []
self.pa_z_bt0k10k_v3 = []
self.pa_z_geq10k_v3 = []
# TODO: in test for development
self.num_dead = []
self.num_vaccinated = []
self.save_df_not_vaccinated = []
self.save_df_vaccinated = []
def vstack_all(self):
all_not_vaccinated = []
all_vaccinated = []
all_ = []
for df1, df2 in zip(
self.save_df_not_vaccinated,
self.save_df_vaccinated
):
all_not_vaccinated.append(df1.values)
all_vaccinated.append(df2.values)
all_.append(np.vstack([df1.values, df2.values]))
all_not_vaccinated = np.vstack(all_not_vaccinated)
df1 = pd.DataFrame(
all_not_vaccinated,
columns=[self.symptoms+['Death']+self.full_training_idxs]
)
all_vaccinated = np.vstack(all_vaccinated)
df2 = pd.DataFrame(
all_vaccinated,
columns=[self.symptoms+['Death']+self.full_training_idxs]
)
all_ = np.vstack(all_)
df3 = pd.DataFrame(
all_,
columns=[self.symptoms + ['Death'] + self.full_training_idxs]
)
return df1, df2, df3
def fairness_study(self):
# #################### Creating df for fairness computations
ages = []
for a in self.not_vaccinated['Age']:
if a <= 30.:
ages.append('leq30')
elif (a > 30) and (a < 60):
ages.append('bt3060')
else:
ages.append('geq60')
incomes = []
for a in self.not_vaccinated['Income']:
if a < 10000:
incomes.append('bt0k10k')
else:
incomes.append('geq10k')
df = pd.DataFrame(ages, columns=['Age'])
df['Gender'] = self.not_vaccinated['Gender'].to_list()
df['Income'] = incomes
df['Vaccine1'] = self.not_vaccinated['Vaccine1'].to_list()
df['Vaccine2'] = self.not_vaccinated['Vaccine2'].to_list()
df['Vaccine3'] = self.not_vaccinated['Vaccine3'].to_list()
df['Death'] = self.not_vaccinated['Death'].to_list()
crosstable = pd.crosstab(
index=df['Death'],
columns=df['Vaccine1'],
margins=True
)
# P_\theta^\pi(a='Vaccines')
# P_\theta^\pi(a='Vaccines')
for v, pa, py_a, pa_y in zip(
self.vaccines,
[self.pa_v1, self.pa_v2, self.pa_v3],
[self.py_a_v1, self.py_a_v2, self.py_a_v3],
[self.pav1_y, self.pav2_y, self.pav3_y]
):
# P_\theta^\pi(a='Vaccines')
# for fairness computations
pa.append(
self.not_vaccinated[v].sum() /
self.not_vaccinated.shape[0]
)
crosstable = pd.crosstab(
index=df['Death'],
columns=df[v],
margins=True
)
# P_\theta^\pi(y='Death' | a='Vaccines')
# for fairness calibration computations
py_a.append(
crosstable[1.0][1] / crosstable[1.0][2]
)
crosstable = pd.crosstab(
index=df[v],
columns=df['Death'],
margins=True
)
# P_\theta^\pi(a='Vaccines' | y='Death')
# for fairness balance computations
pa_y.append(
crosstable[1.0][1] / crosstable[1.0][2]
)
# P_\theta^\pi(a='Vaccines' | z="Ages")
for v, a, l in zip(
self.vaccines,
[
[self.pa_z_bt3060_v1, self.pa_z_geq60_v1,
self.pa_z_leq30_v1],
[self.pa_z_bt3060_v2, self.pa_z_geq60_v2,
self.pa_z_leq30_v2],
[self.pa_z_bt3060_v3, self.pa_z_geq60_v3,
self.pa_z_leq30_v3],
],
[
['bt3060', 'geq60', 'leq30'],
['bt3060', 'geq60', 'leq30'],
['bt3060', 'geq60', 'leq30'],
]
):
crosstable = pd.crosstab(
index=df[v],
columns=df['Age'],
margins=True
)
a[0].append(crosstable[l[0]][1] / crosstable[l[0]][2])
a[1].append(crosstable[l[1]][1] / crosstable[l[1]][2])
a[2].append(crosstable[l[2]][1] / crosstable[l[2]][2])
# P_\theta^\pi(a='Vaccines' | z="Genders")
for v, a, l in zip(
self.vaccines,
[
[self.pa_z_female_v1, self.pa_z_male_v1],
[self.pa_z_female_v2, self.pa_z_male_v2],
[self.pa_z_female_v3, self.pa_z_male_v3],
],
[
[0.0, 1.0], # 0.0 = female, 1.0 = male
[0.0, 1.0],
[0.0, 1.0],
]
):
crosstable = pd.crosstab(
index=df[v],
columns=df['Gender'],
margins=True
)
a[0].append(crosstable[l[0]][1] / crosstable[l[0]][2])
a[1].append(crosstable[l[1]][1] / crosstable[l[1]][2])
# P_\theta^\pi(a='Vaccines' | z="Incomes")
for v, a, l in zip(
self.vaccines,
[
[self.pa_z_bt0k10k_v1, self.pa_z_geq10k_v1],
[self.pa_z_bt0k10k_v2, self.pa_z_geq10k_v2],
[self.pa_z_bt0k10k_v3, self.pa_z_geq10k_v3],
],
[
['bt0k10k', 'geq10k'],
['bt0k10k', 'geq10k'],
['bt0k10k', 'geq10k'],
]
):
crosstable = pd.crosstab(
index=df[v],
columns=df['Income'],
margins=True
)
a[0].append(crosstable[l[0]][1] / crosstable[l[0]][2])
a[1].append(crosstable[l[1]][1] / crosstable[l[1]][2])
def save_fairness_age(self, df):
le30 = df[df["Age"] <= 30]
bt3060 = df[(df["Age"] > 30) & (df["Age"] < 60)]
geq60 = df[df["Age"] >= 60]
self.le30_v1.append(le30.groupby("Vaccine1").size().iloc[1])
self.le30_v2.append(le30.groupby("Vaccine2").size().iloc[1])
self.le30_v3.append(le30.groupby("Vaccine3").size().iloc[1])
self.bt3060_v1.append(bt3060.groupby("Vaccine1").size().iloc[1])
self.bt3060_v2.append(bt3060.groupby("Vaccine2").size().iloc[1])
self.bt3060_v3.append(bt3060.groupby("Vaccine3").size().iloc[1])
self.geq60_v1.append(geq60.groupby("Vaccine1").size().iloc[1])
self.geq60_v2.append(geq60.groupby("Vaccine2").size().iloc[1])
self.geq60_v3.append(geq60.groupby("Vaccine3").size().iloc[1])
def save_fairness_gender(self, df):
self.female_v1.append(
df.groupby(["Gender", "Vaccine1"]).size().iloc[1])
self.female_v2.append(
df.groupby(["Gender", "Vaccine2"]).size().iloc[1])
self.female_v3.append(
df.groupby(["Gender", "Vaccine3"]).size().iloc[1])
self.male_v1.append(df.groupby(["Gender", "Vaccine1"]).size().iloc[3])
self.male_v2.append(df.groupby(["Gender", "Vaccine2"]).size().iloc[3])
self.male_v3.append(df.groupby(["Gender", "Vaccine3"]).size().iloc[3])
def save_fairness_income(self, df):
bt0k10k = df[df["Income"] < 10000]
geq10k = df[df["Income"] >= 10000]
self.bt0k10k_v1.append(bt0k10k.groupby("Vaccine1").size().iloc[1])
self.geq10k_v1.append(geq10k.groupby("Vaccine1").size().iloc[1])
self.bt0k10k_v2.append(bt0k10k.groupby("Vaccine2").size().iloc[1])
self.geq10k_v2.append(geq10k.groupby("Vaccine2").size().iloc[1])
self.bt0k10k_v3.append(bt0k10k.groupby("Vaccine3").size().iloc[1])
self.geq10k_v3.append(geq10k.groupby("Vaccine3").size().iloc[1])
def create_new_ml_pipeline(self):
"""Create a pipeline with a randomized search CV and random
forest classifier."""
steps = [
('rf', RandomForestClassifier(
random_state=self.seed,
n_jobs=self.n_jobs,
verbose=False
))
]
pipeline = Pipeline(steps)
param_dist = {
'rf__n_estimators': [25, 50, 100, 500, 1000],
'rf__criterion': ['gini', 'entropy'],
'rf__bootstrap': [True, False],
}
self.search_pipeline = RandomizedSearchCV(
estimator=pipeline,
param_distributions=param_dist,
n_iter=20,
scoring="accuracy",
refit=True,
cv=5,
random_state=self.seed
)
def save_database_and_filter_outcomes(self, A, Y):
"""Store dead individual who will be object of our study, because
we want to understand why they come to die after have taken a vaccine
and been infected by the covid-19. Also, filter outcomes to
only return vaccinated individuals who got Covid-19 after
vaccination."""
idx = A.index.to_list()
Y.index = idx
X_ = self.not_vaccinated[self.not_vaccinated.index.isin(idx)]
X_ = X_.drop(
self.symptoms + ["Death"] + self.vaccines,
axis=1
) # dropping old features
# concatenating applied actions and observed outcomes.
filtered_outcomes = pd.concat([Y, X_, A], axis=1)
# saving the ones who survived.
self.saved_vaccinated = pd.concat(
[filtered_outcomes,
self.saved_vaccinated],
axis=0
)
# resetting indexes
self.saved_vaccinated.index = \
[i for i in range(len(self.saved_vaccinated))]
# filtering only covid-positive ones.
filtered_outcomes = \
filtered_outcomes[filtered_outcomes["Covid-Positive"] == 1]
# saving the ones who came to die.
self.saved_dead_individuals = pd.concat(
[filtered_outcomes[filtered_outcomes["Death"] == 1],
self.saved_dead_individuals],
axis=0
)
# resetting indexes
self.saved_dead_individuals.index = \
[i for i in range(len(self.saved_dead_individuals))]
self.num_vaccinated.append(self.saved_vaccinated.shape[0])
self.num_dead.append(self.saved_dead_individuals.shape[0])
return filtered_outcomes
def balance_data(self):
"""Balance target labels in order to properly train a
machine learning model."""
df_dead = self.saved_dead_individuals
df_dead = resample(
df_dead,
replace=True,
n_samples=(3 * len(df_dead)),
random_state=self.seed,
stratify=None
)
df_not_dead = self.saved_vaccinated[
self.saved_vaccinated[self.response_parameter] == 0.0]
df_not_dead = df_not_dead[df_not_dead["Covid-Positive"] == 1]
df_not_dead = df_not_dead.sample(
n=df_dead.shape[0],
replace=True, # try False
random_state=self.seed,
)
df_balanced = pd.concat([df_dead, df_not_dead])
return df_balanced
def sensitivity_study(self, df, num_top_features):
"""Sensitivity study for the correlations between features
and 'Death'."""
real_base_corr = df.drop(self.symptoms, axis=1).corr()
if self.fairness:
real_base_corr = df.drop(
self.symptoms + ["Age", "Gender", "Income"],
axis=1
).corr()
real_base_neg_corr = \
real_base_corr[self.response_parameter].sort_values().head(50)
real_base_pos_corr = \
real_base_corr[self.response_parameter].sort_values().tail(50)
top_pos = \
real_base_pos_corr.index[(-1 - num_top_features):-1].to_list()
top_neg = \
real_base_neg_corr.index[:num_top_features].to_list()
relevant_features = top_neg + top_pos
# always include vaccines as feature
for v in self.vaccines:
if v not in relevant_features:
relevant_features += [v]
self.relevant_features = relevant_features
return relevant_features
# Observe the features, treatments and outcomes of one or more individuals
def observe(self, actions, outcomes):
"""Observe features, actions and outcomes.
Args:
features (t*|X| array)
actions (t*|A| array)
outcomes (t*|Y| array)
The function is used to adapt a model to the observed
outcomes, given the actions and features. I suggest you create
a model that estimates P(y | x,a) and fit it as appropriate.
If the model cannot be updated incrementally, you can save all
observed x,a,y triplets in a database and retrain whenever you
obtain new data.
Pseudocode:
self.data.append(features, actions, outcomes)
self.model.fit(data)
"""
A = actions
Y = outcomes
# accumulating and storing database of vaccinated and dead people
# and return filtered outcomes, i.e., the individuals that received
# the vaccines and got covid.
filtered_outcomes = self.save_database_and_filter_outcomes(A, Y)
# save baseline statistics in order to be compared with
# future utilities:
# P_hat(D|V1 or V2 or V3):
observed_P_D_given_v123 = \
sum(filtered_outcomes["Death"]) / len(self.not_vaccinated)
self.observed_utilities.append(observed_P_D_given_v123)
# E(Num of deaths | V1 or V2 or V3):
self.observed_expected_utility = \
len(self.not_vaccinated) * observed_P_D_given_v123
self.observed_expected_utilities.append(
self.observed_expected_utility)
# step check
self.was_observed = 1
# saving baseline for the first vaccination round
if self.last_policy == "Random":
self.random_expected_utilities.append(
self.observed_expected_utility)
print(f"Observed Expected Utility: "
f"{self.observed_expected_utility}")
# saving data for plotting - fairness part
self.fairness_study()
self.save_fairness_age(df=self.not_vaccinated)
self.save_fairness_gender(df=self.not_vaccinated)
self.save_fairness_income(df=self.not_vaccinated)
self.save_df_not_vaccinated.append(self.not_vaccinated)
def get_utility(self):
""" Obtain the empirical utility of the policy on a set of one or
more people.
If there are t individuals with x features, and the action
Args:
features (t*|X| array)
actions (t*|A| array)
outcomes (t*|Y| array)
Returns:
Empirical utility of the policy on this data.
"""
# balance target labels in order to properly fit the ML model.
df_balanced = self.balance_data()
# call a new pipeline to perform a randomized CV search.
self.create_new_ml_pipeline()
# perform a sensitivity study (correlation study) between
# features and 'Death' in order to pick up the most predictive
# top 10 negative and top 10 positive features, plus actions.
relevant_features = self.sensitivity_study(
df=df_balanced,
num_top_features=self.num_top_features
)
# fit the pipeline
self.search_pipeline.fit(
df_balanced[relevant_features],
df_balanced["Death"]
)
print(f'Best Cross-Validated mean score: '
f'{self.search_pipeline.best_score_}')
# pick the best fitted model
model = self.search_pipeline.best_estimator_
# predict probabilities for not vaccinated individuals
actions_true = np.ones(shape=(self.not_vaccinated.shape[0], 1))
actions_false = np.zeros(shape=(self.not_vaccinated.shape[0], 1))
steps = [
[actions_true, actions_false, actions_false],
[actions_false, actions_true, actions_false],
[actions_false, actions_false, actions_true],
]
self.saved_pred = []
self.saved_pob = []
for s in steps:
self.not_vaccinated['Vaccine1'] = s[0]
self.not_vaccinated['Vaccine2'] = s[1]
self.not_vaccinated['Vaccine3'] = s[2]
self.saved_pred.append(
model.predict(self.not_vaccinated[relevant_features]))
self.saved_pob.append(
model.predict_proba(self.not_vaccinated[relevant_features]))
self.not_vaccinated['Vaccine3'] = actions_false
A = np.zeros(shape=self.not_vaccinated.iloc[:, -3:].shape)
A = pd.DataFrame(A, columns=self.vaccines)
A.index = self.not_vaccinated.index
# pick the vaccine with higher probability of survival,
# and count the number of predicted death E(D|V1orV2orV3).
self.ML_expected_utility = 0
for indx, indv in enumerate(self.not_vaccinated.index):
decision = np.argmax(
np.array([
self.saved_pob[0][indx][0],
self.saved_pob[1][indx][0],
self.saved_pob[2][indx][0]
])
)
self.ML_expected_utility += self.saved_pred[decision][indx]
if decision == 0:
A["Vaccine1"][indv] = 1
elif decision == 1:
A["Vaccine2"][indv] = 1
elif decision == 2:
A["Vaccine3"][indv] = 1
self.ML_expected_utilities.append(self.ML_expected_utility)
print(f"ML Expected Utility: {self.ML_expected_utility}")
return A, self.ML_expected_utility
def get_actions(self, features):
"""Get actions for one or more people.
Args:
features (t*|X| array)
Returns:
actions (t*|A| array)
Here you should take the action maximising expected utility
according to your model. This model can be arbitrary, but
should be adapted using the observe() method.
Pseudocode:
for action in appropriate_action_set:
p = self.model.get_probabilities(features, action)
u[action] = self.get_expected_utility(action, p)
return argmax(u)
You are expected to create whatever helper functions you need.
"""
# step check
if (self.was_observed == 1) or (self.vaccination_stage == 0):
self.was_observed = 0
else:
raise ValueError("ERROR: Observe step was not applied before "
"a new get_action.")
# vide docstrings in this function.
self.split_groups(features)
# For 1st vaccination round: random decisions.
# We vaccinate everybody, assuming that the vaccines are already
# approved by authorities, and secure for any type (age, gender)
# of people.
if self.vaccination_stage == 0:
for indv in self.not_vaccinated.index:
self.not_vaccinated[np.random.choice(self.vaccines)][indv] = 1
A = self.not_vaccinated.iloc[:, -3:]
self.vaccination_stage += 1
self.last_policy = "Random"
self.policy_decisions.append(self.last_policy)
return A
if self.vaccination_stage > 0:
# get actions based on our utility
A, ML_expected_utility = self.get_utility()
# security threshold to use our ML's action recommendations.
if ML_expected_utility <= \
np.mean(self.random_expected_utilities):
self.not_vaccinated["Vaccine1"] = A["Vaccine1"]
self.not_vaccinated["Vaccine2"] = A["Vaccine2"]
self.not_vaccinated["Vaccine3"] = A["Vaccine3"]
self.vaccination_stage += 1
self.last_policy = "ML"
self.policy_decisions.append(self.last_policy)
return self.not_vaccinated.iloc[:, -3:]
# if our ML model does not hit our security threshold,
# then apply random decisions.
else:
for indv in self.not_vaccinated.index:
self.not_vaccinated[np.random.choice(self.vaccines)][
indv] = 1
A = self.not_vaccinated.iloc[:, -3:]
self.vaccination_stage += 1
self.last_policy = "Random"
self.policy_decisions.append(self.last_policy)
return A
def split_groups(self, X):
"""From a raw sample space, we split the data in 3 groups.
1st. the not vaccinated individuals that will be object to
our recommendations. 2nd. the vaccinated people that will be
object to study the influences of the vaccines. 3rd. the dead
individuals who we will have in order to adapt and fit a
machine learning, and understand why those individuals come
to die after vaccination."""
# ~Vaccine1 and ~Vaccine2 and ~Vaccine3
# goal: minimize the number of death between vaccinated people
not_vaccinated = X[X['Vaccine1'] == 0]
not_vaccinated = not_vaccinated[not_vaccinated['Vaccine2'] == 0]
self.not_vaccinated = not_vaccinated[not_vaccinated['Vaccine3'] == 0]
# Vaccine1 or Vaccine2 or Vaccine3
# goal: use this data to get a policy under an utility
vaccinated = X[X['Vaccine1'] == 1]
vaccinated = pd.concat([vaccinated, X[X['Vaccine2'] == 1]], axis=0)
vaccinated = | pd.concat([vaccinated, X[X['Vaccine3'] == 1]], axis=0) | pandas.concat |
# module for processing adjacency matrices in various ways
import pandas as pd
import numpy as np
import csv
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import pymaid
from tqdm import tqdm
from joblib import Parallel, delayed
class Adjacency_matrix():
def __init__(self, adj, input_counts, mat_type):
self.skids = list(adj.index)
self.pairs = Promat.get_pairs()
self.input_counts = input_counts
self.mat_type = mat_type # 'ad', 'aa', 'dd', 'da', 'summed'
self.adj = pd.DataFrame(adj, index = self.skids, columns = self.skids)
self.adj_fract = self.fraction_input_matrix()
self.adj_inter = self.interlaced_matrix()
self.adj_pairwise = self.average_pairwise_matrix()
def fraction_input_matrix(self):
adj_fract = self.adj.copy()
for column in adj_fract.columns:
if((self.mat_type=='aa') | (self.mat_type=='da')):
axon_input = self.input_counts.loc[column].axon_input
if(axon_input == 0):
adj_fract.loc[:, column] = 0
if(axon_input > 0):
adj_fract.loc[:, column] = adj_fract.loc[:, column]/axon_input
if((self.mat_type=='ad') | (self.mat_type=='dd')):
dendrite_input = self.input_counts.loc[column].dendrite_input
if(dendrite_input == 0):
adj_fract.loc[:, column] = 0
if(dendrite_input > 0):
adj_fract.loc[:, column] = adj_fract.loc[:, column]/dendrite_input
if((self.mat_type=='summed') | (self.mat_type not in ['aa', 'da', 'ad', 'dd']) ):
all_input = self.input_counts.loc[column].dendrite_input + self.input_counts.loc[column].axon_input
if(all_input == 0):
adj_fract.loc[:, column] = 0
if(all_input > 0):
adj_fract.loc[:, column] = adj_fract.loc[:, column]/all_input
return(adj_fract)
def interlaced_matrix(self, fract=True):
if(fract):
adj_mat = self.adj_fract.copy()
brain_pairs, brain_unpaired, brain_nonpaired = Promat.extract_pairs_from_list(adj_mat, self.pairs)
if(fract==False):
adj_mat = self.adj.copy()
brain_pairs, brain_unpaired, brain_nonpaired = Promat.extract_pairs_from_list(adj_mat, self.pairs)
# left_right interlaced order for brain matrix
brain_pair_order = []
for i in range(0, len(brain_pairs)):
brain_pair_order.append(brain_pairs.iloc[i].leftid)
brain_pair_order.append(brain_pairs.iloc[i].rightid)
order = brain_pair_order + list(brain_nonpaired.nonpaired)
interlaced_mat = adj_mat.loc[order, order]
index_df = pd.DataFrame([['pairs', Promat.get_paired_skids(skid, self.pairs)[0], skid] for skid in brain_pair_order] + [['nonpaired', skid, skid] for skid in list(brain_nonpaired.nonpaired)],
columns = ['pair_status', 'pair_id', 'skid'])
index = pd.MultiIndex.from_frame(index_df)
interlaced_mat.index = index
interlaced_mat.columns = index
return(interlaced_mat)
def average_pairwise_matrix(self):
adj = self.adj_inter.copy()
adj = adj.groupby('pair_id', axis = 'index').sum().groupby('pair_id', axis='columns').sum()
order = [x[1] for x in self.adj_inter.index]
# remove duplicates (in pair_ids)
order_unique = []
for x in order:
if (order_unique.count(x) == 0):
order_unique.append(x)
# order as before
adj = adj.loc[order_unique, order_unique]
# regenerate multiindex
index = [x[0:2] for x in self.adj_inter.index] # remove skid ids from index
# remove duplicates (in pair_ids)
index_unique = []
for x in index:
if (index_unique.count(x) == 0):
index_unique.append(x)
# add back appropriate multiindex
index_df = pd.DataFrame(index_unique, columns = ['pair_status', 'pair_id'])
index_df = pd.MultiIndex.from_frame(index_df)
adj.index = index_df
adj.columns = index_df
# convert to average (from sum) for paired neurons
adj.loc['pairs'] = adj.loc['pairs'].values/2
adj.loc['nonpaired', 'pairs'] = adj.loc['nonpaired', 'pairs'].values/2
return(adj)
def downstream(self, source, threshold, exclude=[], by_group=False, exclude_unpaired = False):
adj = self.adj_pairwise
source_pair_id = np.unique([x[1] for x in self.adj_inter.loc[(slice(None), slice(None), source), :].index])
if(by_group):
bin_mat = adj.loc[(slice(None), source_pair_id), :].sum(axis=0) > threshold
bin_column = np.where(bin_mat)[0]
ds_neurons = bin_mat.index[bin_column]
ds_neurons_skids = []
for pair in ds_neurons:
if((pair[0] == 'pairs') & (pair[1] not in exclude)):
ds_neurons_skids.append(pair[1])
ds_neurons_skids.append(Promat.identify_pair(pair[1], self.pairs))
if((pair[0] == 'nonpaired') & (pair[1] not in exclude) & (exclude_unpaired==False)):
ds_neurons_skids.append(pair[1])
return(ds_neurons_skids)
if(by_group==False):
bin_mat = adj.loc[(slice(None), source_pair_id), :] > threshold
bin_column = np.where(bin_mat.sum(axis = 0) > 0)[0]
ds_neurons = bin_mat.columns[bin_column]
bin_row = np.where(bin_mat.sum(axis = 1) > 0)[0]
us_neurons = bin_mat.index[bin_row]
ds_neurons_skids = []
for pair in ds_neurons:
if((pair[0] == 'pairs') & (pair[1] not in exclude)):
ds_neurons_skids.append(pair[1])
ds_neurons_skids.append(Promat.identify_pair(pair[1], self.pairs))
if((pair[0] == 'nonpaired') & (pair[1] not in exclude) & (exclude_unpaired==False)):
ds_neurons_skids.append(pair[1])
source_skids = []
for pair in us_neurons:
if(pair[0] == 'pairs'):
source_skids.append(pair[1])
source_skids.append(Promat.identify_pair(pair[1], self.pairs))
if(pair[0] == 'nonpaired'):
source_skids.append(pair[1])
edges = []
for pair in us_neurons:
if(pair[0] == 'pairs'):
specific_ds = adj.loc[('pairs', pair[1]), bin_mat.loc[('pairs', pair[1]), :]].index
if(exclude_unpaired):
specific_ds_edges = [[pair[1], x[1]] for x in specific_ds if (x[0]=='pairs') & (x[1] not in exclude)]
if(exclude_unpaired==False):
specific_ds_edges = [[pair[1], x[1]] for x in specific_ds if (x[1] not in exclude)]
for edge in specific_ds_edges:
edges.append(edge)
if(pair[0] == 'nonpaired'):
specific_ds = adj.loc[('nonpaired', pair[1]), bin_mat.loc[('nonpaired', pair[1]), :]].index
if(exclude_unpaired):
specific_ds_edges = [[pair[1], x[1]] for x in specific_ds if (x[0]=='pairs') & (x[1] not in exclude)]
if(exclude_unpaired==False):
specific_ds_edges = [[pair[1], x[1]] for x in specific_ds if (x[1] not in exclude)]
for edge in specific_ds_edges:
edges.append(edge)
return(source_skids, ds_neurons_skids, edges)
def upstream(self, source, threshold, exclude = []):
adj = self.adj_pairwise
source_pair_id = np.unique([x[1] for x in self.adj_inter.loc[(slice(None), slice(None), source), :].index])
bin_mat = adj.loc[:, (slice(None), source_pair_id)] > threshold
bin_row = np.where(bin_mat.sum(axis = 1) > 0)[0]
us_neuron_pair_ids = bin_mat.index[bin_row]
us_neurons_skids = []
for pair in us_neuron_pair_ids:
if((pair[0] == 'pairs') & (pair[1] not in exclude)):
us_neurons_skids.append(pair[1])
us_neurons_skids.append(Promat.identify_pair(pair[1], self.pairs))
if((pair[0] == 'nonpaired') & (pair[1] not in exclude)):
us_neurons_skids.append(pair[1])
us_neuron_pair_ids = Promat.extract_pairs_from_list(us_neurons_skids, self.pairs)
us_neuron_pair_ids = list(us_neuron_pair_ids[0].leftid) + list(us_neuron_pair_ids[2].nonpaired)
edges = []
for pair in us_neuron_pair_ids:
specific_ds = bin_mat.loc[(slice(None), pair), bin_mat.loc[(slice(None), pair), :].values[0]].columns
specific_ds_edges = [[pair, x[1]] for x in specific_ds]
for edge in specific_ds_edges:
edges.append(edge)
return(us_neurons_skids, edges)
def downstream_multihop(self, source, threshold, min_members=0, hops=10, exclude=[], strict=False, allow_source_ds=False):
if(allow_source_ds==False):
_, ds, edges = self.downstream(source, threshold, exclude=(source + exclude))
if(allow_source_ds):
_, ds, edges = self.downstream(source, threshold, exclude=(exclude))
left = Promat.get_hemis('left')
right = Promat.get_hemis('right')
_, ds = self.edge_threshold(edges, threshold, direction='downstream', strict=strict, left=left, right=right)
if(allow_source_ds==False):
before = source + ds
if(allow_source_ds):
before = ds
layers = []
layers.append(ds)
for i in range(0,(hops-1)):
source = ds
_, ds, edges = self.downstream(source, threshold, exclude=before)
_, ds = self.edge_threshold(edges, threshold, direction='downstream', strict=strict, left=left, right=right)
if((len(ds)!=0) & (len(ds)>=min_members)):
layers.append(ds)
before = before + ds
return(layers)
def upstream_multihop(self, source, threshold, min_members=10, hops=10, exclude=[], strict=False, allow_source_us=False):
if(allow_source_us==False):
us, edges = self.upstream(source, threshold, exclude=(source + exclude))
if(allow_source_us):
us, edges = self.upstream(source, threshold, exclude=(exclude))
_, us = self.edge_threshold(edges, threshold, direction='upstream', strict=strict)
if(allow_source_us==False):
before = source + us
if(allow_source_us):
before = us
layers = []
layers.append(us)
for i in range(0,(hops-1)):
source = us
us, edges = self.upstream(source, threshold, exclude = before)
_, us = self.edge_threshold(edges, threshold, direction='upstream', strict=strict)
if((len(us)!=0) & (len(us)>=min_members)):
layers.append(us)
before = before + us
return(layers)
# checking additional threshold criteria after identifying neurons over summed threshold
# left and right are only necessary when nonpaired neurons are included
def edge_threshold(self, edges, threshold, direction, strict=False, include_nonpaired=True, left=[], right=[]):
adj = self.adj_inter.copy()
all_edges = []
for edge in edges:
print(edge)
specific_edges = adj.loc[(slice(None), edge[0]), (slice(None), edge[1])]
us_pair_status = adj.loc[(slice(None), slice(None), edge[0]), :].index[0][0]
ds_pair_status = adj.loc[(slice(None), slice(None), edge[1]), :].index[0][0]
# note that edge weights in 'left', 'right' columns refer to %input onto the dendrite of the left or right hemisphere downstream neuron
# the 'type' column indicates whether the edge is contralateral/ipsilateral (this can allow one to determine whether the signal originated on the left or right side if that's important)
# note: the split_paired_edges() method takes the output of threshold_edge_list() and splits these paired edges so that it becomes more explicit which hemisphere the upstream neuron belongs to
# check for paired connections
if((us_pair_status == 'pairs') & (ds_pair_status == 'pairs')):
specific_edges = pd.DataFrame([[edge[0], edge[1], specific_edges.iloc[0,0], specific_edges.iloc[1,1], False, 'ipsilateral', 'paired','paired'],
[edge[0], edge[1], specific_edges.iloc[1,0], specific_edges.iloc[0,1], False, 'contralateral', 'paired','paired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(strict==True):
# is each edge weight over threshold?
for index in specific_edges.index:
if((specific_edges.loc[index].left>threshold) & (specific_edges.loc[index].right>threshold)):
specific_edges.loc[index, 'overthres'] = True
if(strict==False):
# is average edge weight over threshold
for index in specific_edges.index:
if(((specific_edges.loc[index].left + specific_edges.loc[index].right)/2) > threshold):
specific_edges.loc[index, 'overthres'] = True
# are both edges present?
for index in specific_edges.index:
if((specific_edges.loc[index].left==0) | (specific_edges.loc[index].right==0)):
specific_edges.loc[index, 'overthres'] = False
all_edges.append(specific_edges.values[0])
all_edges.append(specific_edges.values[1])
# check for edges to downstream nonpaired neurons
if((us_pair_status == 'pairs') & (ds_pair_status == 'nonpaired') & (include_nonpaired==True)):
if(edge[1] in left):
specific_edges = pd.DataFrame([[edge[0], edge[1], specific_edges.iloc[0].values[0], 0, False, 'ipsilateral', 'paired', 'nonpaired'],
[edge[0], edge[1], specific_edges.iloc[1].values[0], 0, False, 'contralateral', 'paired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[1] in right):
specific_edges = pd.DataFrame([[edge[0], edge[1], 0, specific_edges.iloc[0].values[0], False, 'contralateral', 'paired', 'nonpaired'],
[edge[0], edge[1], 0, specific_edges.iloc[1].values[0], False, 'ipsilateral', 'paired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
# is edge over threshold?
# don't check both because one will be missing
# strict==True/False doesn't apply for the same reason
for index in specific_edges.index:
if(((specific_edges.loc[index].left + specific_edges.loc[index].right)>threshold)):
specific_edges.loc[index, 'overthres'] = True
all_edges.append(specific_edges.values[0])
all_edges.append(specific_edges.values[1])
# check for edges from upstream nonpaired neurons
if((us_pair_status == 'nonpaired') & (ds_pair_status == 'pairs') & (include_nonpaired==True)):
if(edge[0] in left):
specific_edges = pd.DataFrame([[edge[0], edge[1], specific_edges.iloc[0, 0], 0, False, 'ipsilateral', 'nonpaired', 'paired'],
[edge[0], edge[1], 0, specific_edges.iloc[0, 1], False, 'contralateral', 'nonpaired', 'paired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[0] in right):
specific_edges = pd.DataFrame([[edge[0], edge[1], specific_edges.iloc[0, 0], 0, False, 'contralateral', 'nonpaired', 'paired'],
[edge[0], edge[1], 0, specific_edges.iloc[0, 1], False, 'ipsilateral', 'nonpaired', 'paired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
# is edge over threshold?
# don't check both because one will be missing
# strict==True/False doesn't apply for the same reason
for index in specific_edges.index:
if(((specific_edges.loc[index].left + specific_edges.loc[index].right)>threshold)):
specific_edges.loc[index, 'overthres'] = True
all_edges.append(specific_edges.values[0])
all_edges.append(specific_edges.values[1])
# check for edges between two nonpaired neurons
if((us_pair_status == 'nonpaired') & (ds_pair_status == 'nonpaired') & (include_nonpaired==True)):
edge_weight = specific_edges.values[0][0]
if(edge[0] in left):
if(edge[1] in right):
specific_edges = pd.DataFrame([[edge[0], edge[1], 0, edge_weight, False, 'contralateral', 'nonpaired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[1] in left):
specific_edges = pd.DataFrame([[edge[0], edge[1], edge_weight, 0, False, 'ipsilateral', 'nonpaired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[0] in right):
if(edge[1] in left):
specific_edges = pd.DataFrame([[edge[0], edge[1], edge_weight, 0, False, 'contralateral', 'nonpaired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(edge[1] in right):
specific_edges = pd.DataFrame([[edge[0], edge[1], 0, edge_weight, False, 'ipsilateral', 'nonpaired', 'nonpaired']],
columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
# is edge over threshold?
# only one edge so strict==True/False doesn't apply
if(edge_weight>threshold):
specific_edges.loc[:, 'overthres'] = True
all_edges.append(specific_edges.values[0])
all_edges = pd.DataFrame(all_edges, columns = ['upstream_pair_id', 'downstream_pair_id', 'left', 'right', 'overthres', 'type', 'upstream_status', 'downstream_status'])
if(direction=='downstream'):
partner_skids = np.unique(all_edges[all_edges.overthres==True].downstream_pair_id) # identify downstream pairs
if(direction=='upstream'):
partner_skids = np.unique(all_edges[all_edges.overthres==True].upstream_pair_id) # identify upstream pairs
partner_skids = [x[2] for x in adj.loc[(slice(None), partner_skids), :].index] # convert from pair_id to skids
return(all_edges, partner_skids)
# select edges from results of edge_threshold that are over threshold; include non_paired edges as specified by user
def select_edges(self, pair_id, threshold, edges_only=False, include_nonpaired=[], exclude_nonpaired=[], left=[], right=[]):
_, ds, ds_edges = self.downstream(pair_id, threshold)
ds_edges, _ = self.edge_threshold(ds_edges, threshold, 'downstream', include_nonpaired=include_nonpaired, left=left, right=right)
overthres_ds_edges = ds_edges[ds_edges.overthres==True]
overthres_ds_edges.reset_index(inplace=True)
overthres_ds_edges.drop(labels=['index', 'overthres'], axis=1, inplace=True)
if(edges_only==False):
return(overthres_ds_edges, np.unique(overthres_ds_edges.downstream_pair_id))
if(edges_only):
return(overthres_ds_edges)
# generate edge list for whole matrix with some threshold
def threshold_edge_list(self, all_sources, matrix_nonpaired, threshold, left, right):
all_edges = Parallel(n_jobs=-1)(delayed(self.select_edges)(pair, threshold, edges_only=True, include_nonpaired=matrix_nonpaired, left=left, right=right) for pair in tqdm(all_sources))
all_edges_combined = [x for x in all_edges if type(x)==pd.DataFrame]
all_edges_combined = pd.concat(all_edges_combined, axis=0)
all_edges_combined.reset_index(inplace=True, drop=True)
return(all_edges_combined)
# convert paired edge list with pair-wise threshold back to normal edge list, input from threshold_edge_list()
# note that neurons with bilateral dendrites aren't treated in any special way, so they may be indicated as contralateral edges even if that's inaccurate/complicated
def split_paired_edges(self, all_edges_combined, left, right, flip_weirdos=True):
pairs = self.pairs
# note that edge_weights are from the perspective of the downstream neuron, i.e. %input onto their dendrite
all_edges_combined_split = []
for i in range(len(all_edges_combined.index)):
row = all_edges_combined.iloc[i]
if((row.upstream_status=='paired') & (row.downstream_status=='paired')):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
all_edges_combined_split.append([Promat.identify_pair(row.upstream_pair_id, pairs), Promat.identify_pair(row.downstream_pair_id, pairs), 'right', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, Promat.identify_pair(row.downstream_pair_id, pairs), 'left', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
all_edges_combined_split.append([Promat.identify_pair(row.upstream_pair_id, pairs), row.downstream_pair_id, 'right', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
# note that pair_ids are really skeleton IDs for nonpaired neurons; this allows one to compare to left/right annotations
# this comparison is required because the location of nonpaired -> pair edges depends on whether the nonpaired is left or right
if((row.upstream_status=='nonpaired') & (row.downstream_status=='paired')):
if(row.upstream_pair_id in left):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, Promat.identify_pair(row.downstream_pair_id, pairs), 'left', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.upstream_pair_id in right):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, Promat.identify_pair(row.downstream_pair_id, pairs), 'right', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'right', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
# use the downstream_pair_id because this is really just skeleton ID for nonpaired neurons
# therefore one can compare to left/right annotations to determine which hemisphere it belongs to
if((row.upstream_status=='paired') & (row.downstream_status=='nonpaired')):
if(row.downstream_pair_id in left):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([Promat.identify_pair(row.upstream_pair_id, pairs), row.downstream_pair_id, 'right', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.downstream_pair_id in right):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([Promat.identify_pair(row.upstream_pair_id, pairs), row.downstream_pair_id, 'right', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
# use the downstream_pair_id because this is really just skeleton ID for nonpaired neurons
# therefore one can compare to left/right annotations to determine which hemisphere it belongs to
if((row.upstream_status=='nonpaired') & (row.downstream_status=='nonpaired')):
if(row.downstream_pair_id in left):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'right', 'left', row.left, row.type, row.upstream_status, row.downstream_status])
if(row.downstream_pair_id in right):
if(row.type=='ipsilateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'right', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
if(row.type=='contralateral'):
all_edges_combined_split.append([row.upstream_pair_id, row.downstream_pair_id, 'left', 'right', row.right, row.type, row.upstream_status, row.downstream_status])
all_edges_combined_split = pd.DataFrame(all_edges_combined_split, columns = ['upstream_skid', 'downstream_skid', 'upstream_side', 'downstream_side', 'edge_weight', 'type', 'upstream_status', 'downstream_status'])
return(all_edges_combined_split)
# generate edge list for whole matrix
def edge_list(self, exclude_loops=False):
edges = []
for i in range(len(self.adj.index)):
for j in range(len(self.adj.columns)):
if(exclude_loops):
if((self.adj.iloc[i, j]>0) & (i!=j)):
edges.append([self.adj.index[i], self.adj.columns[j]])
if(exclude_loops==False):
if(self.adj.iloc[i, j]>0):
edges.append([self.adj.index[i], self.adj.columns[j]])
edges = pd.DataFrame(edges, columns = ['upstream_pair_id', 'downstream_pair_id'])
return(edges)
# generate a binary connectivity matrix that displays number of hops between neuron types
def hop_matrix(self, layer_id_skids, source_leftid, destination_leftid, include_start=False):
mat = pd.DataFrame(np.zeros(shape = (len(source_leftid), len(destination_leftid))),
index = source_leftid,
columns = destination_leftid)
for index in mat.index:
data = layer_id_skids.loc[index, :]
for i, hop in enumerate(data):
for column in mat.columns:
if(column in hop):
if(include_start==True): # if the source of the hop signal is the first layer
mat.loc[index, column] = i
if(include_start==False): # if the first layer is the first layer downstream of source
mat.loc[index, column] = i+1
max_value = mat.values.max()
mat_plotting = mat.copy()
for index in mat_plotting.index:
for column in mat_plotting.columns:
if(mat_plotting.loc[index, column]>0):
mat_plotting.loc[index, column] = 1 - (mat_plotting.loc[index, column] - max_value)
return(mat, mat_plotting)
class Promat():
# default method to import pair list and process it to deal with duplicated neurons
@staticmethod
def get_pairs(pairs_path='data/pairs/pairs-2021-04-06.csv', flip_weirdos=True):
print(f'Path to pairs list is: {pairs_path}')
pairs = pd.read_csv(pairs_path, header = 0) # import pairs, manually determined with help from <NAME> and <NAME>'s scripts
pairs = pairs.loc[:, ['leftid', 'rightid']] # only include useful columns
# duplicated right-side neurons to throw out for simplicity
duplicated = pymaid.get_skids_by_annotation('mw duplicated neurons to delete')
duplicated_index = np.where(sum([pairs.rightid==x for x in duplicated])==1)[0]
pairs = pairs.drop(duplicated_index)
# change left/right ids of contra-contra neurons so they behave properly in downstream analysis
# these neurons have somas on one brain hemisphere and dendrites/axons on the other
# and so they functionally all completely contralateral and can therefore be considered ipsilateral neurons
if(flip_weirdos):
# identify contra-contra neurons
contra_contra = np.intersect1d(pymaid.get_skids_by_annotation('mw contralateral axon'), pymaid.get_skids_by_annotation('mw contralateral dendrite'))
contra_contra_pairs = Promat.extract_pairs_from_list(contra_contra, pairs)[0]
if(len(contra_contra_pairs)>0):
# flip left/right neurons in contra-contra neurons
for index in contra_contra_pairs.index:
cc_left = contra_contra_pairs.loc[index, 'leftid']
cc_right = contra_contra_pairs.loc[index, 'rightid']
pairs.loc[pairs[pairs.leftid==cc_left].index, 'rightid'] = cc_left
pairs.loc[pairs[pairs.leftid==cc_left].index, 'leftid'] = cc_right
return(pairs)
# returns all skids in left or right side of the brain, depending on whether side = 'left' or 'right'
def get_hemis(side=None, flip_weirdos=True):
left = pymaid.get_skids_by_annotation('mw left')
right = pymaid.get_skids_by_annotation('mw right')
if(flip_weirdos):
# identifying contra-contra neurons so they can be flipped to opposite side of brain
neurons_to_flip = np.intersect1d(pymaid.get_skids_by_annotation('mw contralateral axon'), pymaid.get_skids_by_annotation('mw contralateral dendrite'))
neurons_to_flip_left = [skid for skid in neurons_to_flip if skid in left]
neurons_to_flip_right = [skid for skid in neurons_to_flip if skid in right]
# removing neurons_to_flip and adding to the other side
left = list(np.setdiff1d(left, neurons_to_flip_left)) + neurons_to_flip_right
right = list(np.setdiff1d(right, neurons_to_flip_right)) + neurons_to_flip_left
if(side=='left'): return(left)
if(side=='right'): return(right)
if(side==None): return([left, right])
# converts any df with df.index = list of skids to a multiindex with ['pair_status', 'pair_id', 'skid']
# 'pair_status': pairs / nonpaired
# 'pair_id': left skid of a pair or simply the skid of a nonpaired neuron
@staticmethod
def convert_df_to_pairwise(df, pairs=None):
if(pairs==None):
pairs = Promat.get_pairs()
brain_pairs, brain_unpaired, brain_nonpaired = Promat.extract_pairs_from_list(df.index, pairList = pairs)
# left_right interlaced order for brain matrix
brain_pair_order = []
for i in range(0, len(brain_pairs)):
brain_pair_order.append(brain_pairs.iloc[i].leftid)
brain_pair_order.append(brain_pairs.iloc[i].rightid)
order = brain_pair_order + list(brain_nonpaired.nonpaired)
interlaced = df.loc[order, :]
index_df = pd.DataFrame([['pairs', Promat.get_paired_skids(skid, pairs)[0], skid] for skid in brain_pair_order] + [['nonpaired', skid, skid] for skid in list(brain_nonpaired.nonpaired)],
columns = ['pair_status', 'pair_id', 'skid'])
index = pd.MultiIndex.from_frame(index_df)
interlaced.index = index
return(interlaced)
# trim out neurons not currently in the brain matrix
@staticmethod
def trim_missing(skidList, brainMatrix):
trimmedList = []
for i in skidList:
if(i in brainMatrix.index):
trimmedList.append(i)
else:
print("*WARNING* skid: %i is not in whole brain matrix" % (i))
return(trimmedList)
# identify skeleton ID of hemilateral neuron pair, based on CSV pair list
@staticmethod
def identify_pair(skid, pairList):
pair_skid = []
if(skid in pairList["leftid"].values):
pair_skid = pairList["rightid"][pairList["leftid"]==skid].iloc[0]
if(skid in pairList["rightid"].values):
pair_skid = pairList["leftid"][pairList["rightid"]==skid].iloc[0]
if((skid not in pairList['rightid'].values) & (skid not in pairList['leftid'].values)):
print(f'skid {skid} is not in paired list')
pair_skid = skid
return(pair_skid)
# returns paired skids in array [left, right]; can input either left or right skid of a pair to identify
@staticmethod
def get_paired_skids(skid, pairList):
if(type(skid)!=list):
if(skid in pairList["leftid"].values):
pair_right = pairList["rightid"][pairList["leftid"]==skid].iloc[0]
pair_left = skid
if(skid in pairList["rightid"].values):
pair_left = pairList["leftid"][pairList["rightid"]==skid].iloc[0]
pair_right = skid
if((skid in pairList["leftid"].values) == False and (skid in pairList["rightid"].values) == False):
print(f"skid {skid} is not in paired list")
return([skid])
return([pair_left, pair_right])
if(type(skid)==list):
data = [Promat.get_paired_skids(x, pairList) for x in skid]
df = pd.DataFrame(data, columns = ['leftid', 'rightid'])
return(df)
# converts array of skids into left-right pairs in separate columns
# puts unpaired and nonpaired neurons in different lists
@staticmethod
def extract_pairs_from_list(skids, pairList):
pairs = pd.DataFrame([], columns = ['leftid', 'rightid'])
unpaired = pd.DataFrame([], columns = ['unpaired'])
nonpaired = pd.DataFrame([], columns = ['nonpaired'])
for i in skids:
if((int(i) not in pairList.leftid.values) & (int(i) not in pairList.rightid.values)):
nonpaired = nonpaired.append({'nonpaired': int(i)}, ignore_index=True)
continue
if((int(i) in pairList["leftid"].values) & (Promat.get_paired_skids(int(i), pairList)[1] in skids)):
pair = Promat.get_paired_skids(int(i), pairList)
pairs = pairs.append({'leftid': pair[0], 'rightid': pair[1]}, ignore_index=True)
if(((int(i) in pairList["leftid"].values) & (Promat.get_paired_skids(int(i), pairList)[1] not in skids)|
(int(i) in pairList["rightid"].values) & (Promat.get_paired_skids(int(i), pairList)[0] not in skids))):
unpaired = unpaired.append({'unpaired': int(i)}, ignore_index=True)
pairs = pd.DataFrame(pairs)
unpaired = pd.DataFrame(unpaired)
nonpaired = | pd.DataFrame(nonpaired) | pandas.DataFrame |
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append("f2", df, index=["string"], data_columns=["string", "string2"])
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
msg = "cannot create table index on a Fixed format store"
with pytest.raises(TypeError, match=msg):
store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
msg = "'Cols' object has no attribute 'string2'"
with pytest.raises(AttributeError, match=msg):
col("f", "string2").is_indexed
# try to index a col which isn't a data_column
msg = (
"column string2 is not a data_column.\n"
"In order to read column string2 you must reload the dataframe \n"
"into HDFStore and include string2 with the data_columns argument."
)
with pytest.raises(AttributeError, match=msg):
store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
# GH 14435
idx = MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_table_mixed_dtypes(setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_calendar_roundtrip_issue(setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_remove(setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_same_name_scoping(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(20, 2), index=date_range("20130101", periods=20))
store.put("df", df, format="table")
expected = df[df.index > Timestamp("20130105")]
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
# changes what 'datetime' points to in the namespace where
# 'select' does the lookup
from datetime import datetime # noqa:F401
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_store_index_name(setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(table_format, setup_path):
# GH #13492
idx = Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@pytest.mark.filterwarnings("ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning")
def test_overwrite_node(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_coordinates(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame({"A": range(5), "B": range(5)})
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
| _maybe_remove(store, "df1") | pandas.tests.io.pytables.common._maybe_remove |
import inspect
from PySide import QtCore, QtGui
import pandas
class Recorder(QtCore.QObject):
"""
Takes incoming frame dicts, stores them, and records them to csv file on command
"""
recordingChanged = QtCore.Signal(bool)
def __init__(self, statusLabel):
super().__init__()
self._statusLabel = statusLabel
self.data = [] # Save data as list of frame dicts, for now
self.extraData = {}
self.pathData = {}
self.recording = False
self._desktopWidget = QtGui.QDesktopWidget()
self.clear()
def _flatten(self, d, parent_key=''):
"""
Flatten a dict like {'avg':{'x':42, 'y':0}} into {'avg_x':42, 'avg_y':0}
"""
import collections
items = []
for k, v in d.items():
new_key = parent_key + '_' + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def handleFrame(self, frame):
"""
Save the gaze frame to memory, along with mouse position
"""
if self.recording:
# Flatten frame dict to one level
datapoint = self._flatten(frame)
# Add to memory
self.data.append(datapoint)
self._statusLabel.setText('Recorded {} frames'.format(len(self.data)))
def setRecording(self, rec):
if self.recording != rec:
self.recording = rec
self.recordingChanged.emit(rec)
def saveStim(self, stimItem, stimPos):
"""
Keep track of what function was used to draw a stimulus, and it's position (QPoint)
"""
if stimItem is None:
self.extraData.clear()
else:
self.extraData.update({'stim_item': stimItem.__name__,
'stim_module': stimItem.__module__,
'stim_x': stimPos.x(),
'stim_y': stimPos.y()})
def savePath(self, points):
"""
Similar to saveStim, save the drawn path (currently only one)
points = array of QPointF
"""
if points is None:
self.pathData.clear()
else:
self.pathData.update({'x': [p.x() for p in points],
'y': [p.y() for p in points]})
def clear(self):
self.data.clear()
self._statusLabel.setText('Recorder ready')
def toDataFrame(self):
""" Return a pandas.DataFrame with all data in memory up to this point """
df = pandas.DataFrame(self.data)
if len(df) > 0:
df.set_index('timestamp', inplace=True)
df.index.name = None
return df
def saveToFile(self):
""" Save data to csv file, after opening a file dialog """
filename, _ = QtGui.QFileDialog.getSaveFileName(self.parent(), 'Save File', '',
'Excel Workbook (*.xlsx);; Excel 97-2003 Workbook (*.xls)')
if filename:
writer = pandas.ExcelWriter(filename)
self.toDataFrame().to_excel(writer, 'GazeData')
pandas.DataFrame.from_dict(self.extraData, orient='index').to_excel(writer, 'Extra')
| pandas.DataFrame.from_dict(self.pathData) | pandas.DataFrame.from_dict |
import queue
import random
import threading
import itertools
from functools import reduce, partial
from pathlib import Path
from time import perf_counter
import sys
import os
#from numba import jit
#from numba.experimental import jitclass
#import panel as pn
import bokeh.layouts
import datashader
import numpy as np
from bokeh.models import ColumnDataSource, CustomJS, Button, Select, Div
from bokeh.plotting import curdoc
from holoviews.plotting.util import process_cmap
from holoviews.streams import Selection1D
from numpy import log
from pyopenms import MSExperiment, PeakMap, MzMLFile, PeakFileOptions, SignalToNoiseEstimatorMedian, PeakSpectrum, DRange1, \
DPosition1, FeatureMap, Feature, PeptideIdentification, PeptideHit, FeatureXMLFile
import pandas as pd
import holoviews as hv
import holoviews.operation.datashader as hd
from holoviews import opts, dim
from tornado import gen
class FeatureMapDF(FeatureMap):
def __init__(self):
super().__init__()
def get_df(self):
def gen(fmap: FeatureMap, fun):
for f in fmap:
yield from fun(f)
def extractMetaData(f: Feature):
# subfeatures = f.getFeatureList() # type: list[FeatureHandle]
pep = f.getPeptideIdentifications() # type: list[PeptideIdentification]
bb = f.getConvexHull().getBoundingBox2D()
if len(pep) != 0:
hits = pep[0].getHits()
if len(hits) != 0:
besthit = hits[0] # type: PeptideHit
# TODO what else
yield f.getUniqueId(), besthit.getSequence().toString(), f.getCharge(), f.getRT(), f.getMZ(), bb[0][0], bb[1][0], bb[0][1], bb[1][1], f.getOverallQuality(), f.getIntensity()
else:
yield f.getUniqueId(), None, f.getCharge(), f.getRT(), f.getMZ(), bb[0][0], bb[1][0], bb[0][1], bb[1][1], f.getOverallQuality(), f.getIntensity()
else:
yield f.getUniqueId(), None, f.getCharge(), f.getRT(), f.getMZ(), bb[0][0], bb[1][0], bb[0][1], bb[1][1], f.getOverallQuality(), f.getIntensity()
cnt = self.size()
mddtypes = [('id', np.dtype('uint64')), ('sequence', 'U200'), ('charge', 'i4'), ('RT', 'f'), ('mz', 'f'),
('RTstart', 'f'), ('RTend', 'f'), ('mzstart', 'f'), ('mzend', 'f'),
('quality', 'f'), ('intensity', 'f')]
mdarr = np.fromiter(iter=gen(self, extractMetaData), dtype=mddtypes, count=cnt)
return pd.DataFrame(mdarr).set_index('id')
def SpectrumGenerator(file_path):
q = queue.Queue(maxsize=1)
JOB_DONE = object()
TIMEOUT = 30
def task():
loader = MzMLFile()
loadopts = loader.getOptions() # type: PeakFileOptions
loadopts.setMSLevels([1])
loadopts.setSkipXMLChecks(True)
loadopts.setIntensity32Bit(True)
loadopts.setIntensityRange(DRange1(DPosition1(10000), DPosition1(sys.maxsize)))
loader.setOptions(loadopts)
loader.transform(file_path, MSCallback(q), True, True)
q.put(JOB_DONE)
t = threading.Thread(target=task)
t.start()
while True:
# better set a timeout, or if task in sub-threading fails, it will result in a deadlock
chunk = q.get(timeout=TIMEOUT)
if chunk is JOB_DONE:
break
yield from chunk
t.join()
#@jit(nopython=True)
def unpack(rt, mzs, intys):
return [(rt, point[0], point[1]) for point in zip(mzs, intys)]
class MSCallback:
def __init__(self, q):
self.q = q
def setExperimentalSettings(self, s):
pass
def setExpectedSize(self, a, b):
pass
def consumeChromatogram(self, c):
pass
def consumeSpectrum(self, s: PeakSpectrum):
#if s.getMSLevel() == 2:
self.q.put((s.getRT(), point[0], point[1]) for point in zip(*s.get_peaks()))
#pn.extension()
hv.extension('bokeh')
renderer = hv.renderer('bokeh').instance(mode='server')
spectradf = | pd.DataFrame() | pandas.DataFrame |
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.shortcuts import render,get_object_or_404,redirect
from django.db.models import Q
from django.http import Http404
from .models import Movie,Myrating,Movielens
from django.contrib import messages
from .forms import UserForm
from django.db.models import Case, When
import numpy as np
import pandas as pd
from collections import defaultdict
from surprise.model_selection import train_test_split
from surprise.model_selection import RandomizedSearchCV
from surprise import accuracy
from surprise.model_selection.validation import cross_validate
from surprise import KNNBasic
from surprise import KNNWithMeans
from surprise import Reader
from surprise import dataset
#-------------------------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------------
def get_pred(predictions, n=10):
top_pred = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_pred[uid].append((iid, est)) #Mapping list of (movieid, predicted rating) to each userid
for uid, user_ratings in top_pred.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_pred[uid] = user_ratings[:n] #Sorting and displaying the top 'n' predictions
print(top_pred)
return top_pred
class collab_filtering_based_recommender_model():
def __init__(self, model, trainset, testset, data):
self.model = model
self.trainset = trainset
self.testset = testset
self.data = data
self.pred_test = None
self.recommendations = None
self.top_pred = None
self.recommenddf = None
def fit_and_predict(self):
#print('-------Fitting the train data-------')
self.model.fit(self.trainset)
#print('-------Predicting the test data-------')
self.pred_test = self.model.test(self.testset)
rmse = accuracy.rmse(self.pred_test)
#print('-------RMSE for the predicted result is ' + str(rmse) + '-------')
self.top_pred = get_pred(self.pred_test)
self.recommenddf = pd.DataFrame(columns=['userId', 'MovieId', 'Rating'])
for item in self.top_pred:
subdf = | pd.DataFrame(self.top_pred[item], columns=['MovieId', 'Rating']) | pandas.DataFrame |
'''
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import geopandas as gpd
import pandas as pd
import numpy as np
from .grids import GPS_to_grid
from .preprocess import id_reindex, clean_same
def plot_activity(data, col=['stime', 'etime', 'LONCOL', 'LATCOL'],
figsize=(10, 5), dpi=250):
'''
Plot the activity plot of individual
Parameters
----------------
data : DataFrame
activity information of one person
col : List
The column name.[starttime,endtime,LONCOL,LATCOL] of activities
'''
stime, etime, LONCOL, LATCOL = col
activity = data.copy()
activity['date'] = activity[stime].dt.date
dates = list(activity['date'].astype(str).drop_duplicates())
dates_all = []
minday = min(dates)
maxday = max(dates)
import datetime
thisdate = minday
while thisdate != maxday:
dates_all.append(thisdate)
thisdate = str((pd.to_datetime(thisdate+' 00:00:00') +
datetime.timedelta(days=1)).date())
dates = dates_all
import matplotlib.pyplot as plt
import numpy as np
activity['duration'] = (activity[etime]-activity[stime]).dt.total_seconds()
activity = activity[-activity['duration'].isnull()]
import time
activity['ststmp'] = activity[stime].astype(str).apply(
lambda x: time.mktime(
time.strptime(x, '%Y-%m-%d %H:%M:%S'))).astype('int64')
activity['etstmp'] = activity[etime].astype(str).apply(
lambda x: time.mktime(
time.strptime(x, '%Y-%m-%d %H:%M:%S'))).astype('int64')
activityinfo = activity[[LONCOL, LATCOL]].drop_duplicates()
indexs = list(range(1, len(activityinfo)+1))
np.random.shuffle(indexs)
activityinfo['index'] = indexs
import matplotlib as mpl
norm = mpl.colors.Normalize(vmin=0, vmax=len(activityinfo))
from matplotlib.colors import ListedColormap
import seaborn as sns
cmap = ListedColormap(sns.hls_palette(
n_colors=len(activityinfo), l=.5, s=0.8))
plt.figure(1, figsize, dpi)
ax = plt.subplot(111)
plt.sca(ax)
for day in range(len(dates)):
plt.bar(day, height=24*3600, bottom=0, width=0.4, color=(0, 0, 0, 0.1))
stime = dates[day]+' 00:00:00'
etime = dates[day]+' 23:59:59'
bars = activity[(activity['stime'] < etime) &
(activity['etime'] > stime)].copy()
bars['ststmp'] = bars['ststmp'] - \
time.mktime(time.strptime(stime, '%Y-%m-%d %H:%M:%S'))
bars['etstmp'] = bars['etstmp'] - \
time.mktime(time.strptime(stime, '%Y-%m-%d %H:%M:%S'))
for row in range(len(bars)):
plt.bar(day,
height=bars['etstmp'].iloc[row]-bars['ststmp'].iloc[row],
bottom=bars['ststmp'].iloc[row],
color=cmap(
norm(
activityinfo[
(activityinfo[LONCOL] == bars[LONCOL].
iloc[row]) &
(activityinfo[LATCOL] ==
bars[LATCOL].iloc[row])
]['index'].iloc[0])))
plt.xlim(-0.5, len(dates))
plt.ylim(0, 24*3600)
plt.xticks(range(len(dates)), [i[-5:] for i in dates])
plt.yticks(range(0, 24*3600+1, 3600),
pd.DataFrame({'t': range(0, 25)})['t'].astype('str')+':00')
plt.show()
def traj_stay_move(data, params,
col=['ID', 'dataTime', 'longitude', 'latitude'],
activitytime=1800):
'''
Input trajectory data and gridding parameters, identify stay and move
Parameters
----------------
data : DataFrame
trajectory data
params : List
gridding parameters
col : List
The column name, in the order of ['ID','dataTime','longitude',
'latitude']
activitytime : Number
How much time to regard as activity
Returns
----------------
stay : DataFrame
stay information
move : DataFrame
move information
'''
uid, timecol, lon, lat = col
trajdata = data.copy()
trajdata[timecol] = pd.to_datetime(trajdata[timecol])
trajdata['LONCOL'], trajdata['LATCOL'] = GPS_to_grid(
trajdata[lon], trajdata[lat], params)
trajdata = clean_same(trajdata, col=[uid, timecol, 'LONCOL', 'LATCOL'])
trajdata['stime'] = trajdata[timecol]
trajdata['etime'] = trajdata[timecol].shift(-1)
trajdata[uid+'_next'] = trajdata[uid].shift(-1)
trajdata = trajdata[trajdata[uid+'_next'] == trajdata[uid]]
trajdata['duration'] = (
trajdata['etime'] - trajdata['stime']).dt.total_seconds()
activity = trajdata[[uid, lon, lat, 'stime',
'etime', 'duration', 'LONCOL', 'LATCOL']]
activity = activity[activity['duration'] >= activitytime].rename(
columns={lon: 'lon', lat: 'lat'})
stay = activity.copy()
activity['stime_next'] = activity['stime'].shift(-1)
activity['elon'] = activity['lon'].shift(-1)
activity['elat'] = activity['lat'].shift(-1)
activity['ELONCOL'] = activity['LONCOL'].shift(-1)
activity['ELATCOL'] = activity['LATCOL'].shift(-1)
activity[uid+'_next'] = activity[uid].shift(-1)
activity = activity[activity[uid+'_next'] == activity[uid]
].drop(['stime', 'duration', uid+'_next'], axis=1)
activity = activity.rename(columns={'lon': 'slon',
'lat': 'slat',
'etime': 'stime',
'stime_next': 'etime',
'LONCOL': 'SLONCOL',
'LATCOL': 'SLATCOL',
})
activity['duration'] = (
activity['etime'] - activity['stime']).dt.total_seconds()
move = activity.copy()
return stay, move
def traj_densify(data, col=['Vehicleid', 'Time', 'Lng', 'Lat'], timegap=15):
'''
Trajectory densification, ensure that there is a trajectory point each
timegap seconds
Parameters
-------
data : DataFrame
Data
col : List
The column name, in the sequence of [Vehicleid, Time, lng, lat]
timegap : number
The sampling interval (second)
Returns
-------
data1 : DataFrame
The processed data
'''
Vehicleid, Time, Lng, Lat = col
data[Time] = pd.to_datetime(data[Time])
data1 = data.copy()
data1 = data1.drop_duplicates([Vehicleid, Time])
data1 = id_reindex(data1, Vehicleid)
data1 = data1.sort_values(by=[Vehicleid+'_new', Time])
data1['utctime'] = data1[Time].apply(lambda r: int(r.value/1000000000))
data1['utctime_new'] = data1[Vehicleid+'_new']*10000000000+data1['utctime']
a = data1.groupby([Vehicleid+'_new']
)['utctime'].min().rename('mintime').reset_index()
b = data1.groupby([Vehicleid+'_new']
)['utctime'].max().rename('maxtime').reset_index()
minmaxtime = pd.merge(a, b)
mintime = data1['utctime'].min()
maxtime = data1['utctime'].max()
timedata = pd.DataFrame(range(mintime, maxtime, timegap), columns=[Time])
timedata['tmp'] = 1
minmaxtime['tmp'] = 1
minmaxtime = pd.merge(minmaxtime, timedata)
minmaxtime = minmaxtime[(minmaxtime['mintime'] <= minmaxtime[Time]) & (
minmaxtime['maxtime'] >= minmaxtime[Time])]
minmaxtime['utctime_new'] = minmaxtime[Vehicleid+'_new'] * \
10000000000+minmaxtime[Time]
minmaxtime[Time] = pd.to_datetime(minmaxtime[Time], unit='s')
data1 = pd.concat([data1, minmaxtime[['utctime_new', Time]]]
).sort_values(by=['utctime_new'])
data1 = data1.drop_duplicates(['utctime_new'])
data1[Lng] = data1.set_index('utctime_new')[
Lng].interpolate(method='index').values
data1[Lat] = data1.set_index('utctime_new')[
Lat].interpolate(method='index').values
data1[Vehicleid] = data1[Vehicleid].ffill()
data1[Vehicleid] = data1[Vehicleid].bfill()
data1 = data1.drop([Vehicleid+'_new', 'utctime', 'utctime_new'], axis=1)
return data1
def traj_sparsify(data, col=['Vehicleid', 'Time', 'Lng', 'Lat'], timegap=15,
method='subsample'):
'''
Trajectory sparsify. When the sampling frequency of trajectory data is too
high, the amount of data is too large, which is not convenient for the
analysis of some studies that require less data frequency. This function
can expand the sampling interval and reduce the amount of data.
Parameters
-------
data : DataFrame
Data
col : List
The column name, in the sequence of [Vehicleid, Time, lng, lat]
timegap : number
Time gap between trajectory point
method : str
'interpolate' or 'subsample'
Returns
-------
data1 : DataFrame
Sparsified trajectory data
'''
Vehicleid, Time, Lng, Lat = col
data[Time] = pd.to_datetime(data[Time], unit='s')
data1 = data.copy()
data1 = data1.drop_duplicates([Vehicleid, Time])
data1 = id_reindex(data1, Vehicleid)
data1 = data1.sort_values(by=[Vehicleid+'_new', Time])
data1['utctime'] = data1[Time].apply(lambda r: int(r.value/1000000000))
data1['utctime_new'] = data1[Vehicleid+'_new']*10000000000+data1['utctime']
if method == 'interpolate':
a = data1.groupby([Vehicleid+'_new']
)['utctime'].min().rename('mintime').reset_index()
b = data1.groupby([Vehicleid+'_new']
)['utctime'].max().rename('maxtime').reset_index()
minmaxtime = pd.merge(a, b)
mintime = data1['utctime'].min()
maxtime = data1['utctime'].max()
timedata = pd.DataFrame(
range(mintime, maxtime, timegap), columns=[Time])
timedata['tmp'] = 1
minmaxtime['tmp'] = 1
minmaxtime = pd.merge(minmaxtime, timedata)
minmaxtime = minmaxtime[(minmaxtime['mintime'] <= minmaxtime[Time]) & (
minmaxtime['maxtime'] >= minmaxtime[Time])]
minmaxtime['utctime_new'] = minmaxtime[Vehicleid+'_new'] * \
10000000000+minmaxtime[Time]
minmaxtime[Time] = | pd.to_datetime(minmaxtime[Time], unit='s') | pandas.to_datetime |
from linearmodels.compat.statsmodels import Summary
from itertools import product
import struct
from typing import Optional
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
import scipy.sparse as sp
from scipy.sparse import csc_matrix
from linearmodels.iv._utility import annihilate
from linearmodels.iv.absorbing import (
_VARIABLE_CACHE,
AbsorbingLS,
AbsorbingRegressor,
Interaction,
category_continuous_interaction,
category_interaction,
category_product,
clear_cache,
)
from linearmodels.iv.model import _OLS
from linearmodels.iv.results import AbsorbingLSResults, OLSResults
from linearmodels.panel.utility import (
AbsorbingEffectError,
AbsorbingEffectWarning,
dummy_matrix,
)
from linearmodels.shared.exceptions import MissingValueWarning
from linearmodels.shared.utility import AttrDict
NOBS = 100
pytestmark = pytest.mark.filterwarnings(
"ignore:the matrix subclass:PendingDeprecationWarning"
)
class Hasher(object):
@property
def hash_func(self):
try:
import xxhash
return xxhash.xxh64()
except ImportError:
import hashlib
return hashlib.sha256()
def single(self, value):
h = self.hash_func
h.update(np.ascontiguousarray(value))
return h.hexdigest()
hasher = Hasher()
@pytest.fixture(scope="function")
def random_gen(request):
return np.random.RandomState(12345678)
def random_cat(ncat, size, frame=False, rs=None):
if rs is None:
rs = np.random.RandomState()
series = pd.Series(pd.Categorical(rs.randint(0, ncat, size)))
if frame:
return pd.DataFrame(series)
return series
def random_cont(size, rs=None):
if rs is None:
rs = np.random.RandomState()
series = pd.Series(rs.standard_normal(size))
return pd.DataFrame(series)
@pytest.fixture(scope="module", params=[1, 2, 3])
def cat(request):
rs = np.random.RandomState(0)
return pd.DataFrame(
{str(i): random_cat(4, NOBS, rs=rs) for i in range(request.param)}
)
@pytest.fixture(scope="module", params=[1, 2])
def cont(request):
rs = np.random.RandomState(0)
return pd.DataFrame(
{
"cont" + str(i): pd.Series(rs.standard_normal(NOBS))
for i in range(request.param)
}
)
@pytest.fixture(scope="module", params=[True, False])
def weights(request):
if not request.param:
return None
rs = np.random.RandomState(0)
return rs.chisquare(10, NOBS) / 10.0
@pytest.fixture(scope="module", params=[0, 1, 2])
def interact(request):
if not request.param:
return None
rs = np.random.RandomState(0)
interactions = []
for _ in range(request.param):
cat = random_cat(4, 100, frame=True, rs=rs)
cont = random_cont(100, rs=rs)
interactions.append(Interaction(cat, cont))
return interactions
def generate_data(
k=3,
const=True,
nfactors=1,
factor_density=10,
nobs=2000,
cont_interactions=1,
factor_format="interaction",
singleton_interaction=False,
weighted=False,
ncont=0,
):
rs = np.random.RandomState(1234567890)
density = [factor_density] * max(nfactors, cont_interactions)
x = rs.standard_normal((nobs, k))
if const:
x = np.column_stack([np.ones(nobs), x])
e = rs.standard_normal(nobs)
y = x.sum(1) + e
factors = []
for i in range(nfactors):
ncat = nobs // density[min(i, len(density) - 1)]
fact = rs.randint(ncat, size=nobs)
effects = rs.standard_normal(ncat)
y += effects[fact]
factors.append(pd.Series(pd.Categorical(fact)))
for i in range(ncont):
cont = rs.standard_normal(size=nobs)
factors.append(pd.Series(cont))
if factors:
factors = pd.concat(factors, axis=1)
if factor_format == "interaction":
if nfactors and ncont:
factors = Interaction(
factors.iloc[:, :nfactors], factors.iloc[:, nfactors:]
)
elif nfactors:
factors = Interaction(factors, None)
else:
factors = Interaction(None, factors)
else:
factors = None
interactions = []
for i in range(cont_interactions):
ncat = nobs // density[min(i, len(density) - 1)]
fact = rs.randint(ncat, size=nobs)
effects = rs.standard_normal(nobs)
y += effects
df = pd.DataFrame(
pd.Series(pd.Categorical(fact)), columns=["fact{0}".format(i)]
)
df_eff = pd.DataFrame(effects[:, None], columns=["effect_{0}".format(i)])
interactions.append(Interaction(df, df_eff))
if factor_format == "pandas":
for i, interact in enumerate(interactions):
interactions[i] = pd.concat([interact.cat, interact.cont], axis=1)
interactions = interactions if interactions else None
if interactions and singleton_interaction:
interactions = interactions[0]
if weighted:
weights = pd.DataFrame(rs.chisquare(10, size=(nobs, 1)) / 10)
else:
weights = None
return AttrDict(
y=y, x=x, absorb=factors, interactions=interactions, weights=weights
)
# Permutations, k in (0,3), const in (True,False), factors=(0,1,2), interactions in (0,1)
# k=3, const=True, nfactors=1, factor_density=10, nobs=2000, cont_interactions=1,
# format='interaction', singleton_interaction=False
configs = product(
[0, 3], # k
[False, True], # constant
[1, 2, 0], # factors
[10], # density
[2000], # nobs
[0, 1], # cont interactions
["interaction", "pandas"], # format
[False, True], # singleton
[False, True], # weighted
[0, 1], # ncont
)
data_configs = [c for c in configs if (c[2] or c[5] or c[9])]
id_str = (
"k: {0}, const: {1}, nfactors: {2}, density: {3}, nobs: {4}, "
"cont_interacts: {5}, format:{6}, singleton:{7}, weighted: {8}, ncont: {9}"
)
data_ids = [id_str.format(*config) for config in configs]
@pytest.fixture(scope="module", params=data_configs, ids=data_ids)
def data(request):
return generate_data(*request.param)
configs_ols = product(
[0, 3], # k
[False, True], # constant
[1, 2, 0], # factors
[50], # density
[500], # nobs
[0, 1], # cont interactions
["interaction"], # format
[False], # singleton
[False, True], # weighted
[0, 1], # ncont
)
configs_ols_data = [c for c in configs_ols if (c[0] or c[1])]
id_str = (
"k: {0}, const: {1}, nfactors: {2}, density: {3}, nobs: {4}, "
"cont_interacts: {5}, format:{6}, singleton:{7}, weighted: {8}, ncont: {9}"
)
ids_ols_data = [id_str.format(*config) for config in configs_ols]
@pytest.fixture(scope="module", params=configs_ols_data, ids=ids_ols_data)
def ols_data(request):
return generate_data(*request.param)
@pytest.mark.smoke
def test_smoke(data):
mod = AbsorbingLS(
data.y,
data.x,
absorb=data.absorb,
interactions=data.interactions,
weights=data.weights,
)
res = mod.fit()
assert isinstance(res.summary, Summary)
assert isinstance(str(res.summary), str)
def test_absorbing_exceptions(random_gen):
with pytest.raises(TypeError):
absorbed = random_gen.standard_normal((NOBS, 2))
assert isinstance(absorbed, np.ndarray)
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
absorb=absorbed,
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS), random_gen.standard_normal((NOBS - 1, 2))
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
absorb=pd.DataFrame(random_gen.standard_normal((NOBS - 1, 1))),
)
with pytest.raises(ValueError):
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=random_cat(10, NOBS - 1, frame=True, rs=random_gen),
)
mod = AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=random_cat(10, NOBS, frame=True, rs=random_gen),
)
with pytest.raises(RuntimeError):
assert isinstance(mod.absorbed_dependent, pd.DataFrame)
with pytest.raises(RuntimeError):
assert isinstance(mod.absorbed_exog, pd.DataFrame)
with pytest.raises(TypeError):
interactions = random_gen.randint(0, 10, size=(NOBS, 2))
assert isinstance(interactions, np.ndarray)
AbsorbingLS(
random_gen.standard_normal(NOBS),
random_gen.standard_normal((NOBS, 2)),
interactions=interactions,
)
def test_clear_cache():
_VARIABLE_CACHE["key"] = {"a": np.empty(100)}
clear_cache()
assert len(_VARIABLE_CACHE) == 0
def test_category_product(cat):
prod = category_product(cat)
if cat.shape[1] == 1:
assert_series_equal(prod, cat.iloc[:, 0], check_names=False)
else:
alt = cat.iloc[:, 0].astype("int64")
for i in range(1, cat.shape[1]):
alt += 10 ** (4 * i) * cat.iloc[:, i].astype("int64")
alt = pd.Categorical(alt)
alt = pd.Series(alt)
df = pd.DataFrame([prod.cat.codes, alt.cat.codes], index=["cat_prod", "alt"]).T
g = df.groupby("cat_prod").alt
assert (g.nunique() == 1).all()
g = df.groupby("alt").cat_prod
assert (g.nunique() == 1).all()
def test_category_product_too_large(random_gen):
dfc = {}
for i in range(20):
dfc[str(i)] = random_cat(10, 1000)
cat = pd.DataFrame(dfc)
with pytest.raises(ValueError):
category_product(cat)
def test_category_product_not_cat(random_gen):
cat = pd.DataFrame(
{str(i): pd.Series(random_gen.randint(0, 10, 1000)) for i in range(3)}
)
with pytest.raises(TypeError):
category_product(cat)
def test_category_interaction():
c = pd.Series(pd.Categorical([0, 0, 0, 1, 1, 1]))
actual = category_interaction(c, precondition=False).A
expected = np.zeros((6, 2))
expected[:3, 0] = 1.0
expected[3:, 1] = 1.0
assert_allclose(actual, expected)
actual = category_interaction(c, precondition=True).A
cond = np.sqrt((expected**2).sum(0))
expected /= cond
assert_allclose(actual, expected)
def test_category_continuous_interaction():
c = pd.Series(pd.Categorical([0, 0, 0, 1, 1, 1]))
v = pd.Series(np.arange(6.0))
actual = category_continuous_interaction(c, v, precondition=False)
expected = np.zeros((6, 2))
expected[:3, 0] = v[:3]
expected[3:, 1] = v[3:]
assert_allclose(actual.A, expected)
actual = category_continuous_interaction(c, v, precondition=True)
cond = np.sqrt((expected**2).sum(0))
expected /= cond
assert_allclose(actual.A, expected)
def test_category_continuous_interaction_interwoven():
c = pd.Series(pd.Categorical([0, 1, 0, 1, 0, 1]))
v = pd.Series(np.arange(6.0))
actual = category_continuous_interaction(c, v, precondition=False)
expected = np.zeros((6, 2))
expected[::2, 0] = v[::2]
expected[1::2, 1] = v[1::2]
assert_allclose(actual.A, expected)
def test_interaction_cat_only(cat):
interact = Interaction(cat=cat)
assert interact.nobs == cat.shape[0]
| assert_frame_equal(cat, interact.cat) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 10:09:21 2019
@author: nmei
"""
from autoreject import (AutoReject,get_rejection_threshold)
import mne
from glob import glob
import re
import os
import numpy as np
import pandas as pd
import pickle
#import faster # https://gist.github.com/wmvanvliet/d883c3fe1402c7ced6fc
from sklearn.metrics import roc_auc_score,roc_curve
from sklearn.metrics import (
classification_report,
matthews_corrcoef,
confusion_matrix,
f1_score,
log_loss,
r2_score
)
from sklearn.preprocessing import (MinMaxScaler,
OneHotEncoder,
FunctionTransformer,
StandardScaler)
from sklearn.pipeline import make_pipeline
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.utils import shuffle
from sklearn.svm import SVC,LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.decomposition import PCA
from sklearn.dummy import DummyClassifier
from sklearn.feature_selection import (SelectFromModel,
SelectPercentile,
VarianceThreshold,
mutual_info_classif,
f_classif,
chi2,
f_regression,
GenericUnivariateSelect)
from sklearn.model_selection import (StratifiedShuffleSplit,
cross_val_score)
from sklearn.ensemble import RandomForestClassifier,BaggingClassifier,VotingClassifier
from sklearn.neural_network import MLPClassifier
from xgboost import XGBClassifier
from itertools import product,combinations
from sklearn.base import clone
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from collections import OrderedDict
from scipy import stats
from collections import Counter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from nilearn.plotting.img_plotting import (_load_anat,
_utils,
_plot_img_with_bg,
_get_colorbar_and_data_ranges,
_safe_get_data)
import matplotlib.patches as patches
try:
#from mvpa2.datasets.base import Dataset
from mvpa2.mappers.fx import mean_group_sample
#from mvpa2.measures import rsa
#from mvpa2.measures.searchlight import sphere_searchlight
#from mvpa2.base.learner import ChainLearner
#from mvpa2.mappers.shape import TransposeMapper
#from mvpa2.generators.partition import NFoldPartitioner
except:
pass#print('pymvpa is not installed')
try:
# from tqdm import tqdm_notebook as tqdm
from tqdm.auto import tqdm
except:
print('why is tqdm not installed?')
def preprocessing_conscious(raw,
events,
session,
tmin = -0,
tmax = 1,
notch_filter = 50,
event_id = {'living':1,'nonliving':2},
baseline = (None,None),
perform_ICA = False,
lowpass = None,
interpolate_bad_channels = True,):
"""
0. re-reference - explicitly
"""
raw_ref ,_ = mne.set_eeg_reference(raw,
ref_channels = 'average',
projection = True,)
raw_ref.apply_proj() # it might tell you it already has been re-referenced, but do it anyway
# everytime before filtering, explicitly pick the type of channels you want
# to perform the filters
picks = mne.pick_types(raw_ref.info,
meg = False, # No MEG
eeg = True, # YES EEG
eog = perform_ICA, # depends on ICA
)
# regardless the bandpass filtering later, we should always filter
# for wire artifacts and their oscillations
raw_ref.notch_filter(np.arange(notch_filter,241,notch_filter),
picks = picks)
if lowpass is not None:
raw_ref.filter(None,lowpass,)
epochs = mne.Epochs(raw_ref,
events, # numpy array
event_id, # dictionary
tmin = tmin,
tmax = tmax,
baseline = baseline, # range of time for computing the mean references for each channel and subtract these values from all the time points per channel
picks = picks,
detrend = 1, # detrend
preload = True # must be true if we want to do further processing
)
"""
1. if necessary, perform ICA
"""
if perform_ICA:
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
if interpolate_bad_channels:
interpolation_list = faster_bad_channels(epochs,picks=picks)
for ch_name in interpolation_list:
epochs.info['bads'].append(ch_name)
epochs = epochs.interpolate_bads()
# ar = AutoReject(
# picks = picks,
# random_state = 12345,
# )
# ar.fit(epochs)
# _,reject_log = ar.transform(epochs,return_log=True)
# calculate the noise covariance of the epochs
noise_cov = mne.compute_covariance(epochs,#[~reject_log.bad_epochs],
tmin = baseline[0],
tmax = baseline[1],
method = 'empirical',
rank = None,)
# define an ica function
ica = mne.preprocessing.ICA(n_components = .99,
n_pca_components = .99,
max_pca_components = None,
method = 'infomax',
max_iter = int(3e3),
noise_cov = noise_cov,
random_state = 12345,)
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
ica.fit(epochs,#[~reject_log.bad_epochs],
picks = picks,
start = tmin,
stop = tmax,
decim = 3,
tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw.
)
# search for artificial ICAs automatically
# most of these hyperparameters were used in a unrelated published study
ica.detect_artifacts(epochs,#[~reject_log.bad_epochs],
eog_ch = ['FT9','FT10','TP9','TP10'],
eog_criterion = 0.4, # arbitary choice
skew_criterion = 1, # arbitary choice
kurt_criterion = 1, # arbitary choice
var_criterion = 1, # arbitary choice
)
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs],
exclude = ica.exclude,
)
epochs = epochs_ica.copy()
else:
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
if interpolate_bad_channels:
interpolation_list = faster_bad_channels(epochs,picks=picks)
for ch_name in interpolation_list:
epochs.info['bads'].append(ch_name)
epochs = epochs.interpolate_bads()
# pick the EEG channels for later use
clean_epochs = epochs.pick_types(eeg = True, eog = False)
return clean_epochs
def preprocessing_unconscious(raw,
events,
session,
tmin = -0,
tmax = 1,
notch_filter = 50,
event_id = {'living':1,'nonliving':2},
baseline = (None,None),
perform_ICA = False,
eog_chs = [],
ecg_chs = [],):
# everytime before filtering, explicitly pick the type of channels you want
# to perform the filters
picks = mne.pick_types(raw.info,
meg = True, # No MEG
eeg = False, # NO EEG
eog = True, # YES EOG
ecg = True, # YES ECG
)
# regardless the bandpass filtering later, we should always filter
# for wire artifacts and their oscillations
if type(notch_filter) is list:
for item in notch_filter:
raw.notch_filter(np.arange(item,301,item),
picks = picks)
else:
raw.notch_filter(np.arange(notch_filter,301,notch_filter),
picks = picks)
# filter EOG and ECG channels
picks = mne.pick_types(raw.info,
meg = False,
eeg = False,
eog = True,
ecg = True,)
raw.filter(1,12,picks = picks,)
# epoch the data
picks = mne.pick_types(raw.info,
meg = True,
eog = True,
ecg = True,
)
epochs = mne.Epochs(raw,
events, # numpy array
event_id, # dictionary
tmin = tmin,
tmax = tmax,
baseline = baseline, # range of time for computing the mean references for each channel and subtract these values from all the time points per channel
picks = picks,
detrend = 1, # detrend
preload = True # must be true if we want to do further processing
)
"""
1. if necessary, perform ICA
"""
if perform_ICA:
picks = mne.pick_types(epochs.info,
meg = True, # YES MEG
eeg = False, # NO EEG
eog = False, # NO EOG
ecg = False, # NO ECG
)
# ar = AutoReject(
# picks = picks,
# random_state = 12345,
# )
# ar.fit(epochs)
# _,reject_log = ar.transform(epochs,return_log=True)
# calculate the noise covariance of the epochs
noise_cov = mne.compute_covariance(epochs,#[~reject_log.bad_epochs],
tmin = tmin,
tmax = 0,
method = 'empirical',
rank = None,)
# define an ica function
ica = mne.preprocessing.ICA(n_components = .99,
n_pca_components = .99,
max_pca_components = None,
method = 'extended-infomax',
max_iter = int(3e3),
noise_cov = noise_cov,
random_state = 12345,)
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
ica.fit(epochs,#[~reject_log.bad_epochs],
picks = picks,
start = tmin,
stop = tmax,
decim = 3,
tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw.
)
# search for artificial ICAs automatically
# most of these hyperparameters were used in a unrelated published study
ica.detect_artifacts(epochs,#[~reject_log.bad_epochs],
eog_ch = eog_chs,
ecg_ch = ecg_chs[0],
eog_criterion = 0.4, # arbitary choice
ecg_criterion = 0.1, # arbitary choice
skew_criterion = 1, # arbitary choice
kurt_criterion = 1, # arbitary choice
var_criterion = 1, # arbitary choice
)
epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs],
exclude = ica.exclude,
)
epochs = epochs_ica.copy()
# pick the EEG channels for later use
clean_epochs = epochs.pick_types(meg = True, eeg = True, eog = False)
return clean_epochs
def _preprocessing_conscious(
raw,events,session,
n_interpolates = np.arange(1,32,4),
consensus_pers = np.linspace(0,1.0,11),
event_id = {'living':1,'nonliving':2},
tmin = -0.15,
tmax = 0.15 * 6,
high_pass = 0.001,
low_pass = 30,
notch_filter = 50,
fix = False,
ICA = False,
logging = None,
filtering = False,):
"""
Preprocessing pipeline for conscious trials
Inputs
-------------------
raw: MNE Raw object, contineous EEG raw data
events: Numpy array with 3 columns, where the first column indicates time and the last column indicates event code
n_interpolates: list of values 1 <= N <= max number of channels
consensus_pers: ?? autoreject hyperparameter search grid
event_id: MNE argument, to control for epochs
tmin: first time stamp of the epoch
tmax: last time stamp of the epoch
high_pass: low cutoff of the bandpass filter
low_pass: high cutoff of the bandpass filter
notch_filter: frequency of the notch filter, 60 in US and 50 in Europe
fix : when "True", apply autoReject algorithm to remove artifacts that was not identifed in the ICA procedure
Output
ICA : when "True", apply ICA artifact correction in ICA space
logging: when not "None", output some log files for us to track the process
-------------------
Epochs: MNE Epochs object, segmented and cleaned EEG data (n_trials x n_channels x n_times)
"""
"""
0. re-reference - explicitly
"""
raw_ref ,_ = mne.set_eeg_reference(raw,
ref_channels = 'average',
projection = True,)
raw_ref.apply_proj() # it might tell you it already has been re-referenced, but do it anyway
"""
1. highpass filter
by a 4th order zero-phase Butterworth filter
"""
# everytime before filtering, explicitly pick the type of channels you want
# to perform the filters
picks = mne.pick_types(raw_ref.info,
meg = False, # No MEG
eeg = True, # YES EEG
eog = True, # YES EOG
)
# regardless the bandpass filtering later, we should always filter
# for wire artifacts and their oscillations
raw_ref.notch_filter(np.arange(notch_filter,241,notch_filter),
picks = picks)
# high pass filtering
picks = mne.pick_types(raw_ref.info,
meg = False, # No MEG
eeg = True, # YES EEG
eog = False, # No EOG
)
if filtering:
raw_ref.filter(high_pass,
None,
picks = picks,
filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”)
l_trans_bandwidth= high_pass,
method = 'fir', # overlap-add FIR filtering
phase = 'zero', # the delay of this filter is compensated for
fir_window = 'hamming', # The window to use in FIR design
fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2”
)
"""
2. epoch the data
"""
picks = mne.pick_types(raw_ref.info,
eeg = True, # YES EEG
eog = True, # YES EOG
)
epochs = mne.Epochs(raw_ref,
events, # numpy array
event_id, # dictionary
tmin = tmin,
tmax = tmax,
baseline = (tmin,- (1 / 60 * 20)), # range of time for computing the mean references for each channel and subtract these values from all the time points per channel
picks = picks,
detrend = 1, # linear detrend
preload = True # must be true if we want to do further processing
)
"""
4. ica on epoch data
"""
if ICA:
"""
3. apply autoreject
"""
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
ar = AutoReject(
# n_interpolate = n_interpolates,
# consensus = consensus_pers,
# thresh_method = 'bayesian_optimization',
picks = picks,
random_state = 12345,
# n_jobs = 1,
# verbose = 'progressbar',
)
ar.fit(epochs)
_,reject_log = ar.transform(epochs,return_log=True)
if logging is not None:
fig = plot_EEG_autoreject_log(ar)
fig.savefig(logging,bbox_inches = 'tight')
for key in epochs.event_id.keys():
evoked = epochs[key].average()
fig_ = evoked.plot_joint(title = key)
fig_.savefig(logging.replace('.png',f'_{key}_pre.png'),
bbox_inches = 'tight')
plt.close('all')
# calculate the noise covariance of the epochs
noise_cov = mne.compute_covariance(epochs[~reject_log.bad_epochs],
tmin = tmin,
tmax = tmax,
method = 'empirical',
rank = None,)
# define an ica function
ica = mne.preprocessing.ICA(n_components = .99,
n_pca_components = .99,
max_pca_components = None,
method = 'extended-infomax',
max_iter = int(3e3),
noise_cov = noise_cov,
random_state = 12345,)
# # search for a global rejection threshold globally
# reject = get_rejection_threshold(epochs[~reject_log.bad_epochs],
# decim = 1,
# random_state = 12345)
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
ica.fit(epochs[~reject_log.bad_epochs],
picks = picks,
start = tmin,
stop = tmax,
# reject = reject, # if some data in a window has values that exceed the rejection threshold, this window will be ignored when computing the ICA
decim = 3,
tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw.
)
# search for artificial ICAs automatically
# most of these hyperparameters were used in a unrelated published study
ica.detect_artifacts(epochs[~reject_log.bad_epochs],
eog_ch = ['FT9','FT10','TP9','TP10'],
eog_criterion = 0.4, # arbitary choice
skew_criterion = 2, # arbitary choice
kurt_criterion = 2, # arbitary choice
var_criterion = 2, # arbitary choice
)
# # explicitly search for eog ICAs
# eog_idx,scores = ica.find_bads_eog(raw_ref,
# start = tmin,
# stop = tmax,
# l_freq = 2,
# h_freq = 10,
# )
# ica.exclude += eog_idx
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs],
exclude = ica.exclude,
)
else:
picks = mne.pick_types(epochs.info,
eeg = True,
eog = False,)
# epochs.filter(None,
# low_pass,
# picks = picks,
# filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”)
# method = 'fir', # overlap-add FIR filtering
# phase = 'zero', # the delay of this filter is compensated for
# fir_window = 'hamming', # The window to use in FIR design
# fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2”
# )
if logging is not None:
for key in epochs.event_id.keys():
evoked = epochs[key].average()
fig_ = evoked.plot_joint(title = key)
fig_.savefig(logging.replace('.png',f'_{key}_post.png'),
bbox_inches = 'tight')
plt.close('all')
return epochs
if fix:
"""
"""
ar = AutoReject(
# n_interpolate = n_interpolates,
# consensus = consensus_pers,
# thresh_method = 'bayesian_optimization',
picks = picks,
random_state = 12345,
# n_jobs = 1,
# verbose = 'progressbar',
)
epochs_clean = ar.fit_transform(epochs_ica,
)
return epochs_clean.pick_types(eeg=True,eog=False)
else:
clean_epochs = epochs_ica.pick_types(eeg = True,
eog = False)
picks = mne.pick_types(clean_epochs.info,
eeg = True,
eog = False,)
# clean_epochs.filter(None,
# low_pass,
# picks = picks,
# filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”)
# method = 'fir', # overlap-add FIR filtering
# phase = 'zero', # the delay of this filter is compensated for
# fir_window = 'hamming', # The window to use in FIR design
# fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2”
# )
if logging is not None:
for key in clean_epochs.event_id.keys():
evoked = epochs[key].average()
fig_ = evoked.plot_joint(title = key)
fig_.savefig(logging.replace('.png',f'_{key}_post.png'),
bbox_inches = 'tight')
plt.close('all')
return clean_epochs
def plot_temporal_decoding(times,
scores,
frames,
ii,
conscious_state,
plscores,
n_splits,
ylim = (0.2,0.8)):
scores_mean = scores.mean(0)
scores_se = scores.std(0) / np.sqrt(n_splits)
fig,ax = plt.subplots(figsize = (16,8))
ax.plot(times,scores_mean,
color = 'k',
alpha = .9,
label = f'Average across {n_splits} folds',
)
ax.fill_between(times,
scores_mean + scores_se,
scores_mean - scores_se,
color = 'red',
alpha = 0.4,
label = 'Standard Error',)
ax.axhline(0.5,
linestyle = '--',
color = 'k',
alpha = 0.7,
label = 'Chance level')
ax.axvline(0,
linestyle = '--',
color = 'blue',
alpha = 0.7,
label = 'Probe onset',)
if ii is not None:
ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'blue',
alpha = 0.3,
label = 'probe offset ave +/- std',)
ax.set(xlim = (times.min(),
times.max()),
ylim = ylim,#(0.4,0.6),
title = f'Temporal decoding of {conscious_state} = {plscores.mean():.3f}+/-{plscores.std():.3f}',
)
ax.legend()
return fig,ax
def plot_temporal_generalization(scores_gen_,
times,
ii,
conscious_state,
frames,
vmin = 0.4,
vmax = 0.6):
fig, ax = plt.subplots(figsize = (10,10))
if len(scores_gen_.shape) > 2:
scores_gen_ = scores_gen_.mean(0)
im = ax.imshow(
scores_gen_,
interpolation = 'hamming',
origin = 'lower',
cmap = 'RdBu_r',
extent = times[[0, -1, 0, -1]],
vmin = vmin,
vmax = vmax,
)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title(f'Temporal generalization of {conscious_state}')
ax.axhline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
label = 'Probe onset',)
ax.axvline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
)
if ii is not None:
ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
label = 'probe offset ave +/- std',)
ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
)
plt.colorbar(im, ax = ax)
ax.legend()
return fig,ax
def plot_t_stats(T_obs,
clusters,
cluster_p_values,
times,
ii,
conscious_state,
frames,):
# since the p values of each cluster is corrected for multiple comparison,
# we could directly use 0.05 as the threshold to filter clusters
T_obs_plot = 0 * np.ones_like(T_obs)
k = np.array([np.sum(c) for c in clusters])
if np.max(k) > 1000:
c_thresh = 1000
elif 1000 > np.max(k) > 500:
c_thresh = 500
elif 500 > np.max(k) > 100:
c_thresh = 100
elif 100 > np.max(k) > 10:
c_thresh = 10
else:
c_thresh = 0
for c, p_val in zip(clusters, cluster_p_values):
if (p_val <= 0.01) and (np.sum(c) >= c_thresh):# and (distance.cdist(np.where(c == True)[0].reshape(1,-1),np.where(c == True)[1].reshape(1,-1))[0][0] < 200):# and (np.sum(c) >= c_thresh):
T_obs_plot[c] = T_obs[c]
# defind the range of the colorbar
vmax = np.max(np.abs(T_obs))
vmin = -vmax# - 2 * t_threshold
plt.close('all')
fig,ax = plt.subplots(figsize=(10,10))
im = ax.imshow(T_obs_plot,
origin = 'lower',
cmap = plt.cm.RdBu_r,# to emphasize the clusters
extent = times[[0, -1, 0, -1]],
vmin = vmin,
vmax = vmax,
interpolation = 'lanczos',
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right",
size = "5%",
pad = 0.2)
cb = plt.colorbar(im,
cax = cax,
ticks = np.linspace(vmin,vmax,3))
cb.ax.set(title = 'T Statistics')
ax.plot([times[0],times[-1]],[times[0],times[-1]],
linestyle = '--',
color = 'black',
alpha = 0.7,
)
ax.axhline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
label = 'Probe onset',)
ax.axvline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
)
if ii is not None:
ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
label = 'probe offset ave +/- std',)
ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
)
ax.set(xlabel = 'Test time',
ylabel = 'Train time',
title = f'nonparametric t test of {conscious_state}')
ax.legend()
return fig,ax
def plot_p_values(times,
clusters,
cluster_p_values,
ii,
conscious_state,
frames):
width = len(times)
p_clust = np.ones((width, width))# * np.nan
k = np.array([np.sum(c) for c in clusters])
if np.max(k) > 1000:
c_thresh = 1000
elif 1000 > np.max(k) > 500:
c_thresh = 500
elif 500 > np.max(k) > 100:
c_thresh = 100
elif 100 > np.max(k) > 10:
c_thresh = 10
else:
c_thresh = 0
for c, p_val in zip(clusters, cluster_p_values):
if (np.sum(c) >= c_thresh):
p_val_ = p_val.copy()
if p_val_ > 0.05:
p_val_ = 1.
p_clust[c] = p_val_
# defind the range of the colorbar
vmax = 1.
vmin = 0.
plt.close('all')
fig,ax = plt.subplots(figsize = (10,10))
im = ax.imshow(p_clust,
origin = 'lower',
cmap = plt.cm.RdBu_r,# to emphasize the clusters
extent = times[[0, -1, 0, -1]],
vmin = vmin,
vmax = vmax,
interpolation = 'hanning',
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right",
size = "5%",
pad = 0.2)
cb = plt.colorbar(im,
cax = cax,
ticks = [0,0.05,1])
cb.ax.set(title = 'P values')
ax.plot([times[0],times[-1]],[times[0],times[-1]],
linestyle = '--',
color = 'black',
alpha = 0.7,
)
ax.axhline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
label = 'Probe onset',)
ax.axvline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
)
if ii is not None:
ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
label = 'probe offset ave +/- std',)
ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
)
ax.set(xlabel = 'Test time',
ylabel = 'Train time',
title = f'p value map of {conscious_state}')
ax.legend()
return fig,ax
def plot_EEG_autoreject_log(autoreject_object,):
ar = autoreject_object
loss = ar.loss_['eeg'].mean(axis=-1) # losses are stored by channel type.
fig,ax = plt.subplots(figsize=(10,6))
im = ax.matshow(loss.T * 1e6, cmap=plt.get_cmap('viridis'))
ax.set(xticks = range(len(ar.consensus)),
xticklabels = ar.consensus.round(2),
yticks = range(len(ar.n_interpolate)),
yticklabels = ar.n_interpolate)
# Draw rectangle at location of best parameters
idx, jdx = np.unravel_index(loss.argmin(), loss.shape)
rect = patches.Rectangle((idx - 0.5, jdx - 0.5), 1, 1, linewidth=2,
edgecolor='r', facecolor='none')
ax.add_patch(rect)
ax.xaxis.set_ticks_position('bottom')
ax.set(xlabel = r'Consensus percentage $\kappa$',
ylabel = r'Max sensors interpolated $\rho$',
title = 'Mean cross validation error (x 1e6)')
plt.colorbar(im)
return fig
def str2int(x):
if type(x) is str:
return float(re.findall(r'\d+',x)[0])
else:
return x
def simple_load(f,idx):
df = pd.read_csv(f)
df['run'] = idx
return df
def get_frames(directory,new = True,EEG = True):
if EEG:
files = glob(os.path.join(directory,'*trials.csv'))
# elif EEG == 'fMRI':
# files = glob(os.path.join(directory,'*trials.csv'))
else:
files = glob(os.path.join(directory,'*','*.csv'))
empty_temp = ''
for ii,f in enumerate(files):
df = pd.read_csv(f).dropna()
for vis,df_sub in df.groupby(['visible.keys_raw']):
try:
print(f'session {ii+1}, vis = {vis}, n_trials = {df_sub.shape[0]}')
empty_temp += f'session {ii+1}, vis = {vis}, n_trials = {df_sub.shape[0]}'
empty_temp += '\n'
except:
print('session {}, vis = {}, n_trials = {}'.format(ii+1,
vis,df_sub.shape[0]))
df = pd.concat([simple_load(f,ii).dropna() for ii,f in enumerate(files)])
try:
for col in ['probeFrames_raw',
'response.keys_raw',
'visible.keys_raw']:
# print(df[col])
df[col] = df[col].apply(str2int)
except:
for col in ['probe_Frames_raw',
'response.keys_raw',
'visible.keys_raw']:
# print(df[col])
df[col] = df[col].apply(str2int)
df["probeFrames_raw"] = df["probe_Frames_raw"]
df = df[df['probeFrames_raw'] != 999]
df = df.sort_values(['run','order'])
for vis,df_sub in df.groupby(['visible.keys_raw']):
df_press1 = df_sub[df_sub['response.keys_raw'] == 1]
df_press2 = df_sub[df_sub['response.keys_raw'] == 2]
prob1 = df_press1.shape[0] / df_sub.shape[0]
prob2 = df_press2.shape[0] / df_sub.shape[0]
try:
print(f"\nvis = {vis},mean frames = {np.median(df_sub['probeFrames_raw']):.5f}")
print(f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}")
empty_temp += f"\nvis = {vis},mean frames = {np.median(df_sub['probeFrames_raw']):.5f}\n"
empty_temp += f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}\n"
except:
print("\nvis = {},mean frames = {:.5f}".format(
vis,np.median(df_sub['probeFrames_raw'])))
print(f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}")
if new:
df = []
for f in files:
temp = pd.read_csv(f).dropna()
try:
temp[['probeFrames_raw','visible.keys_raw']]
except:
temp['probeFrames_raw'] = temp['probe_Frames_raw']
probeFrame = []
for ii,row in temp.iterrows():
if int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 1:
probeFrame.append(row['probeFrames_raw'])
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 2:
probeFrame.append(row['probeFrames_raw'])
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 3:
probeFrame.append(row['probeFrames_raw'])
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 4:
probeFrame.append(row['probeFrames_raw'])
temp['probeFrames'] = probeFrame
df.append(temp)
df = pd.concat(df)
else:
df = []
for f in files:
temp = pd.read_csv(f).dropna()
temp[['probeFrames_raw','visible.keys_raw']]
probeFrame = []
for ii,row in temp.iterrows():
if int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 1:
probeFrame.append(row['probeFrames_raw'] - 2)
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 2:
probeFrame.append(row['probeFrames_raw'] - 1)
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 3:
probeFrame.append(row['probeFrames_raw'] + 1)
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 4:
probeFrame.append(row['probeFrames_raw'] + 2)
temp['probeFrames'] = probeFrame
df.append(temp)
df = pd.concat(df)
df['probeFrames'] = df['probeFrames'].apply(str2int)
df = df[df['probeFrames'] != 999]
results = []
for vis,df_sub in df.groupby(['visible.keys_raw']):
corrects = df_sub['response.corr_raw'].sum() / df_sub.shape[0]
try:
print(f"vis = {vis},N = {df_sub.shape[0]},mean frames = {np.mean(df_sub['probeFrames']):.2f} +/- {np.std(df_sub['probeFrames']):.2f}\np(correct) = {corrects:.4f}")
empty_temp += f"vis = {vis},N = {df_sub.shape[0]},mean frames = {np.mean(df_sub['probeFrames']):.2f} +/- {np.std(df_sub['probeFrames']):.2f}\np(correct) = {corrects:.4f}\n"
empty_temp += f"RT = {np.mean(df_sub['visible.rt_raw']):.3f} +/- {np.std(df_sub['visible.rt_raw']):.3f}\n"
except:
print("vis = {},mean frames = {:.2f} +/- {:.2f}".format(
vis,np.mean(df_sub['probeFrames']),np.std(df_sub['probeFrames'])))
results.append([vis,np.mean(df_sub['probeFrames']),np.std(df_sub['probeFrames'])])
return results,empty_temp
def preprocess_behavioral_file(f):
df = read_behavorial_file(f)
for col in ['probeFrames_raw',
'response.keys_raw',
'visible.keys_raw']:
df[col] = df[col].apply(str2int)
df = df.sort_values(['order'])
return df
def read_behavorial_file(f):
temp = pd.read_csv(f).iloc[:-12,:]
return temp
def preload(f):
temp = pd.read_csv(f).iloc[-12:,:2]
return temp
def extract(x):
try:
return int(re.findall(r'\d',x)[0])
except:
return int(99)
#def extract_session_run_from_MRI(x):
# temp = re.findall(r'\d+',x)
# session = temp[1]
# if int(session) == 7:
# session = '1'
# run = temp[-1]
# return session,run
#def check_behaviral_data_session_block(x):
# temp = preload(x)
# temp.index = temp['category']
# temp = temp.T
# session = int(temp['session'].values[-1])
# block = int(temp['block'].values[-1])
# return session,block
#def compare_match(behavorial_file_name,session,block):
# behav_session,behav_block = check_behaviral_data_session_block(behavorial_file_name)
# if np.logical_and(behav_session == session, behav_block == block):
# return True
# else:
# return False
def add_track(df_sub):
n_rows = df_sub.shape[0]
if len(df_sub.index.values) > 1:
temp = '+'.join(str(item + 10) for item in df_sub.index.values)
else:
temp = str(df_sub.index.values[0])
df_sub = df_sub.iloc[0,:].to_frame().T # why did I use 1 instead of 0?
df_sub['n_volume'] = n_rows
df_sub['time_indices'] = temp
return df_sub
def groupby_average(fmri,df,groupby = ['trials']):
BOLD_average = np.array([np.mean(fmri[df_sub.index],0) for _,df_sub in df.groupby(groupby)])
df_average = pd.concat([add_track(df_sub) for ii,df_sub in df.groupby(groupby)])
return BOLD_average,df_average
def get_brightness_threshold(thresh):
return [0.75 * val for val in thresh]
def get_brightness_threshold_double(thresh):
return [2 * 0.75 * val for val in thresh]
def cartesian_product(fwhms, in_files, usans, btthresh):
from nipype.utils.filemanip import ensure_list
# ensure all inputs are lists
in_files = ensure_list(in_files)
fwhms = [fwhms] if isinstance(fwhms, (int, float)) else fwhms
# create cartesian product lists (s_<name> = single element of list)
cart_in_file = [
s_in_file for s_in_file in in_files for s_fwhm in fwhms
]
cart_fwhm = [
s_fwhm for s_in_file in in_files for s_fwhm in fwhms
]
cart_usans = [
s_usans for s_usans in usans for s_fwhm in fwhms
]
cart_btthresh = [
s_btthresh for s_btthresh in btthresh for s_fwhm in fwhms
]
return cart_in_file, cart_fwhm, cart_usans, cart_btthresh
def getusans(x):
return [[tuple([val[0], 0.5 * val[1]])] for val in x]
def create_fsl_FEAT_workflow_func(whichrun = 0,
whichvol = 'middle',
workflow_name = 'nipype_mimic_FEAT',
first_run = True,
func_data_file = 'temp',
fwhm = 3):
"""
Works with fsl-5.0.9 and fsl-5.0.11, but not fsl-6.0.0
"""
from nipype.workflows.fmri.fsl import preprocess
from nipype.interfaces import fsl
from nipype.interfaces import utility as util
from nipype.pipeline import engine as pe
"""
Setup some functions and hyperparameters
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
pickrun = preprocess.pickrun
pickvol = preprocess.pickvol
getthreshop = preprocess.getthreshop
getmeanscale = preprocess.getmeanscale
# chooseindex = preprocess.chooseindex
"""
Start constructing the workflow graph
"""
preproc = pe.Workflow(name = workflow_name)
"""
Initialize the input and output spaces
"""
inputnode = pe.Node(
interface = util.IdentityInterface(fields = ['func',
'fwhm',
'anat']),
name = 'inputspec')
outputnode = pe.Node(
interface = util.IdentityInterface(fields = ['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'mean']),
name = 'outputspec')
"""
first step: convert Images to float values
"""
img2float = pe.MapNode(
interface = fsl.ImageMaths(
out_data_type = 'float',
op_string = '',
suffix = '_dtype'),
iterfield = ['in_file'],
name = 'img2float')
preproc.connect(inputnode,'func',
img2float,'in_file')
"""
delete first 10 volumes
"""
develVolume = pe.MapNode(
interface = fsl.ExtractROI(t_min = 10,
t_size = 508),
iterfield = ['in_file'],
name = 'remove_volumes')
preproc.connect(img2float, 'out_file',
develVolume, 'in_file')
if first_run == True:
"""
extract example fMRI volume: middle one
"""
extract_ref = pe.MapNode(
interface = fsl.ExtractROI(t_size = 1,),
iterfield = ['in_file'],
name = 'extractref')
# connect to the deleteVolume node to get the data
preproc.connect(develVolume,'roi_file',
extract_ref,'in_file')
# connect to the deleteVolume node again to perform the extraction
preproc.connect(develVolume,('roi_file',pickvol,0,whichvol),
extract_ref,'t_min')
# connect to the output node to save the reference volume
preproc.connect(extract_ref,'roi_file',
outputnode, 'reference')
if first_run == True:
"""
Realign the functional runs to the reference (`whichvol` volume of first run)
"""
motion_correct = pe.MapNode(
interface = fsl.MCFLIRT(save_mats = True,
save_plots = True,
save_rms = True,
stats_imgs = True,
interpolation = 'spline'),
iterfield = ['in_file','ref_file'],
name = 'MCFlirt',
)
# connect to the develVolume node to get the input data
preproc.connect(develVolume, 'roi_file',
motion_correct, 'in_file',)
######################################################################################
################# the part where we replace the actual reference image if exists ####
######################################################################################
# connect to the develVolume node to get the reference
preproc.connect(extract_ref, 'roi_file',
motion_correct, 'ref_file')
######################################################################################
# connect to the output node to save the motion correction parameters
preproc.connect(motion_correct, 'par_file',
outputnode, 'motion_parameters')
# connect to the output node to save the other files
preproc.connect(motion_correct, 'out_file',
outputnode, 'realigned_files')
else:
"""
Realign the functional runs to the reference (`whichvol` volume of first run)
"""
motion_correct = pe.MapNode(
interface = fsl.MCFLIRT(ref_file = first_run,
save_mats = True,
save_plots = True,
save_rms = True,
stats_imgs = True,
interpolation = 'spline'),
iterfield = ['in_file','ref_file'],
name = 'MCFlirt',
)
# connect to the develVolume node to get the input data
preproc.connect(develVolume, 'roi_file',
motion_correct, 'in_file',)
# connect to the output node to save the motion correction parameters
preproc.connect(motion_correct, 'par_file',
outputnode, 'motion_parameters')
# connect to the output node to save the other files
preproc.connect(motion_correct, 'out_file',
outputnode, 'realigned_files')
"""
plot the estimated motion parameters
"""
plot_motion = pe.MapNode(
interface = fsl.PlotMotionParams(in_source = 'fsl'),
iterfield = ['in_file'],
name = 'plot_motion',
)
plot_motion.iterables = ('plot_type',['rotations',
'translations',
'displacement'])
preproc.connect(motion_correct, 'par_file',
plot_motion, 'in_file')
preproc.connect(plot_motion, 'out_file',
outputnode, 'motion_plots')
"""
extract the mean volume of the first functional run
"""
meanfunc = pe.Node(
interface = fsl.ImageMaths(op_string = '-Tmean',
suffix = '_mean',),
name = 'meanfunc')
preproc.connect(motion_correct, ('out_file',pickrun,whichrun),
meanfunc, 'in_file')
"""
strip the skull from the mean functional to generate a mask
"""
meanfuncmask = pe.Node(
interface = fsl.BET(mask = True,
no_output = True,
frac = 0.3,
surfaces = True,),
name = 'bet2_mean_func')
preproc.connect(meanfunc, 'out_file',
meanfuncmask, 'in_file')
"""
Mask the motion corrected functional data with the mask to create the masked (bet) motion corrected functional data
"""
maskfunc = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_bet',
op_string = '-mas'),
iterfield = ['in_file'],
name = 'maskfunc')
preproc.connect(motion_correct, 'out_file',
maskfunc, 'in_file')
preproc.connect(meanfuncmask, 'mask_file',
maskfunc, 'in_file2')
"""
determine the 2nd and 98th percentiles of each functional run
"""
getthreshold = pe.MapNode(
interface = fsl.ImageStats(op_string = '-p 2 -p 98'),
iterfield = ['in_file'],
name = 'getthreshold')
preproc.connect(maskfunc, 'out_file',
getthreshold, 'in_file')
"""
threshold the functional data at 10% of the 98th percentile
"""
threshold = pe.MapNode(
interface = fsl.ImageMaths(out_data_type = 'char',
suffix = '_thresh',
op_string = '-Tmin -bin'),
iterfield = ['in_file','op_string'],
name = 'tresholding')
preproc.connect(maskfunc, 'out_file',
threshold,'in_file')
"""
define a function to get 10% of the intensity
"""
preproc.connect(getthreshold,('out_stat',getthreshop),
threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(
interface = fsl.ImageStats(op_string = '-k %s -p 50'),
iterfield = ['in_file','mask_file'],
name = 'cal_intensity_scale_factor')
preproc.connect(motion_correct, 'out_file',
medianval, 'in_file')
preproc.connect(threshold, 'out_file',
medianval, 'mask_file')
"""
dilate the mask
"""
dilatemask = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_dil',
op_string = '-dilF'),
iterfield = ['in_file'],
name = 'dilatemask')
preproc.connect(threshold, 'out_file',
dilatemask, 'in_file')
preproc.connect(dilatemask, 'out_file',
outputnode, 'mask')
"""
mask the motion corrected functional runs with the dilated mask
"""
dilateMask_MCed = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'dilateMask_MCed')
preproc.connect(motion_correct, 'out_file',
dilateMask_MCed, 'in_file',)
preproc.connect(dilatemask, 'out_file',
dilateMask_MCed, 'in_file2')
"""
We now take this functional data that is motion corrected, high pass filtered, and
create a "mean_func" image that is the mean across time (Tmean)
"""
meanfunc2 = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_mean',
op_string = '-Tmean',),
iterfield = ['in_file'],
name = 'meanfunc2')
preproc.connect(dilateMask_MCed, 'out_file',
meanfunc2, 'in_file')
"""
smooth each run using SUSAN with the brightness threshold set to
75% of the median value for each run and a mask constituing the
mean functional
"""
merge = pe.Node(
interface = util.Merge(2, axis = 'hstack'),
name = 'merge')
preproc.connect(meanfunc2, 'out_file',
merge, 'in1')
preproc.connect(medianval,('out_stat',get_brightness_threshold_double),
merge, 'in2')
smooth = pe.MapNode(
interface = fsl.SUSAN(dimension = 3,
use_median = True),
iterfield = ['in_file',
'brightness_threshold',
'fwhm',
'usans'],
name = 'susan_smooth')
preproc.connect(dilateMask_MCed, 'out_file',
smooth, 'in_file')
preproc.connect(medianval, ('out_stat',get_brightness_threshold),
smooth, 'brightness_threshold')
preproc.connect(inputnode, 'fwhm',
smooth, 'fwhm')
preproc.connect(merge, ('out',getusans),
smooth, 'usans')
"""
mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'dilateMask_smoothed')
# connect the output of the susam smooth component to the maskfunc3 node
preproc.connect(smooth, 'smoothed_file',
maskfunc3, 'in_file')
# connect the output of the dilated mask to the maskfunc3 node
preproc.connect(dilatemask, 'out_file',
maskfunc3, 'in_file2')
"""
scale the median value of the run is set to 10000
"""
meanscale = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_intnorm'),
iterfield = ['in_file','op_string'],
name = 'meanscale')
preproc.connect(maskfunc3, 'out_file',
meanscale, 'in_file')
preproc.connect(meanscale, 'out_file',
outputnode,'smoothed_files')
"""
define a function to get the scaling factor for intensity normalization
"""
preproc.connect(medianval,('out_stat',getmeanscale),
meanscale,'op_string')
"""
generate a mean functional image from the first run
should this be the 'mean.nii.gz' we will use in the future?
"""
meanfunc3 = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_mean',
op_string = '-Tmean',),
iterfield = ['in_file'],
name = 'gen_mean_func_img')
preproc.connect(meanscale, 'out_file',
meanfunc3, 'in_file')
preproc.connect(meanfunc3, 'out_file',
outputnode,'mean')
# initialize some of the input files
preproc.inputs.inputspec.func = os.path.abspath(func_data_file)
preproc.inputs.inputspec.fwhm = 3
preproc.base_dir = os.path.abspath('/'.join(
func_data_file.split('/')[:-1]))
output_dir = os.path.abspath(os.path.join(
preproc.base_dir,
'outputs',
'func'))
MC_dir = os.path.join(output_dir,'MC')
for directories in [output_dir,MC_dir]:
if not os.path.exists(directories):
os.makedirs(directories)
# initialize all the output files
if first_run == True:
preproc.inputs.extractref.roi_file = os.path.abspath(os.path.join(
output_dir,'example_func.nii.gz'))
preproc.inputs.dilatemask.out_file = os.path.abspath(os.path.join(
output_dir,'mask.nii.gz'))
preproc.inputs.meanscale.out_file = os.path.abspath(os.path.join(
output_dir,'prefiltered_func.nii.gz'))
preproc.inputs.gen_mean_func_img.out_file = os.path.abspath(os.path.join(
output_dir,'mean_func.nii.gz'))
return preproc,MC_dir,output_dir
def create_registration_workflow(
anat_brain,
anat_head,
example_func,
standard_brain,
standard_head,
standard_mask,
workflow_name = 'registration',
output_dir = 'temp'):
from nipype.interfaces import fsl
from nipype.interfaces import utility as util
from nipype.pipeline import engine as pe
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
registration = pe.Workflow(name = 'registration')
inputnode = pe.Node(
interface = util.IdentityInterface(
fields = [
'highres', # anat_brain
'highres_head', # anat_head
'example_func',
'standard', # standard_brain
'standard_head',
'standard_mask'
]),
name = 'inputspec')
outputnode = pe.Node(
interface = util.IdentityInterface(
fields = ['example_func2highres_nii_gz',
'example_func2highres_mat',
'linear_example_func2highres_log',
'highres2example_func_mat',
'highres2standard_linear_nii_gz',
'highres2standard_mat',
'linear_highres2standard_log',
'highres2standard_nii_gz',
'highres2standard_warp_nii_gz',
'highres2standard_head_nii_gz',
# 'highres2standard_apply_warp_nii_gz',
'highres2highres_jac_nii_gz',
'nonlinear_highres2standard_log',
'highres2standard_nii_gz',
'standard2highres_mat',
'example_func2standard_mat',
'example_func2standard_warp_nii_gz',
'example_func2standard_nii_gz',
'standard2example_func_mat',
]),
name = 'outputspec')
"""
fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres
fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres_head
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain standard
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm standard_head
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain_mask_dil standard_mask
"""
# skip
"""
/opt/fsl/fsl-5.0.10/fsl/bin/flirt
-in example_func
-ref highres
-out example_func2highres
-omat example_func2highres.mat
-cost corratio
-dof 7
-searchrx -180 180
-searchry -180 180
-searchrz -180 180
-interp trilinear
"""
linear_example_func2highres = pe.MapNode(
interface = fsl.FLIRT(cost = 'corratio',
interp = 'trilinear',
dof = 7,
save_log = True,
searchr_x = [-180, 180],
searchr_y = [-180, 180],
searchr_z = [-180, 180],),
iterfield = ['in_file','reference'],
name = 'linear_example_func2highres')
registration.connect(inputnode, 'example_func',
linear_example_func2highres, 'in_file')
registration.connect(inputnode, 'highres',
linear_example_func2highres, 'reference')
registration.connect(linear_example_func2highres, 'out_file',
outputnode, 'example_func2highres_nii_gz')
registration.connect(linear_example_func2highres, 'out_matrix_file',
outputnode, 'example_func2highres_mat')
registration.connect(linear_example_func2highres, 'out_log',
outputnode, 'linear_example_func2highres_log')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat highres2example_func.mat example_func2highres.mat
"""
get_highres2example_func = pe.MapNode(
interface = fsl.ConvertXFM(invert_xfm = True),
iterfield = ['in_file'],
name = 'get_highres2example_func')
registration.connect(linear_example_func2highres,'out_matrix_file',
get_highres2example_func,'in_file')
registration.connect(get_highres2example_func,'out_file',
outputnode,'highres2example_func_mat')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/flirt
-in highres
-ref standard
-out highres2standard
-omat highres2standard.mat
-cost corratio
-dof 12
-searchrx -180 180
-searchry -180 180
-searchrz -180 180
-interp trilinear
"""
linear_highres2standard = pe.MapNode(
interface = fsl.FLIRT(cost = 'corratio',
interp = 'trilinear',
dof = 12,
save_log = True,
searchr_x = [-180, 180],
searchr_y = [-180, 180],
searchr_z = [-180, 180],),
iterfield = ['in_file','reference'],
name = 'linear_highres2standard')
registration.connect(inputnode,'highres',
linear_highres2standard,'in_file')
registration.connect(inputnode,'standard',
linear_highres2standard,'reference',)
registration.connect(linear_highres2standard,'out_file',
outputnode,'highres2standard_linear_nii_gz')
registration.connect(linear_highres2standard,'out_matrix_file',
outputnode,'highres2standard_mat')
registration.connect(linear_highres2standard,'out_log',
outputnode,'linear_highres2standard_log')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/fnirt
--iout=highres2standard_head
--in=highres_head
--aff=highres2standard.mat
--cout=highres2standard_warp
--iout=highres2standard
--jout=highres2highres_jac
--config=T1_2_MNI152_2mm
--ref=standard_head
--refmask=standard_mask
--warpres=10,10,10
"""
nonlinear_highres2standard = pe.MapNode(
interface = fsl.FNIRT(warp_resolution = (10,10,10),
config_file = "T1_2_MNI152_2mm"),
iterfield = ['in_file','ref_file','affine_file','refmask_file'],
name = 'nonlinear_highres2standard')
# -- iout
registration.connect(nonlinear_highres2standard,'warped_file',
outputnode,'highres2standard_head_nii_gz')
# --in
registration.connect(inputnode,'highres',
nonlinear_highres2standard,'in_file')
# --aff
registration.connect(linear_highres2standard,'out_matrix_file',
nonlinear_highres2standard,'affine_file')
# --cout
registration.connect(nonlinear_highres2standard,'fieldcoeff_file',
outputnode,'highres2standard_warp_nii_gz')
# --jout
registration.connect(nonlinear_highres2standard,'jacobian_file',
outputnode,'highres2highres_jac_nii_gz')
# --ref
registration.connect(inputnode,'standard_head',
nonlinear_highres2standard,'ref_file',)
# --refmask
registration.connect(inputnode,'standard_mask',
nonlinear_highres2standard,'refmask_file')
# log
registration.connect(nonlinear_highres2standard,'log_file',
outputnode,'nonlinear_highres2standard_log')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
-i highres
-r standard
-o highres2standard
-w highres2standard_warp
"""
warp_highres2standard = pe.MapNode(
interface = fsl.ApplyWarp(),
iterfield = ['in_file','ref_file','field_file'],
name = 'warp_highres2standard')
registration.connect(inputnode,'highres',
warp_highres2standard,'in_file')
registration.connect(inputnode,'standard',
warp_highres2standard,'ref_file')
registration.connect(warp_highres2standard,'out_file',
outputnode,'highres2standard_nii_gz')
registration.connect(nonlinear_highres2standard,'fieldcoeff_file',
warp_highres2standard,'field_file')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2highres.mat highres2standard.mat
"""
get_standard2highres = pe.MapNode(
interface = fsl.ConvertXFM(invert_xfm = True),
iterfield = ['in_file'],
name = 'get_standard2highres')
registration.connect(linear_highres2standard,'out_matrix_file',
get_standard2highres,'in_file')
registration.connect(get_standard2highres,'out_file',
outputnode,'standard2highres_mat')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-omat example_func2standard.mat -concat highres2standard.mat example_func2highres.mat
"""
get_exmaple_func2standard = pe.MapNode(
interface = fsl.ConvertXFM(concat_xfm = True),
iterfield = ['in_file','in_file2'],
name = 'get_exmaple_func2standard')
registration.connect(linear_example_func2highres, 'out_matrix_file',
get_exmaple_func2standard,'in_file')
registration.connect(linear_highres2standard,'out_matrix_file',
get_exmaple_func2standard,'in_file2')
registration.connect(get_exmaple_func2standard,'out_file',
outputnode,'example_func2standard_mat')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convertwarp
--ref=standard
--premat=example_func2highres.mat
--warp1=highres2standard_warp
--out=example_func2standard_warp
"""
convertwarp_example2standard = pe.MapNode(
interface = fsl.ConvertWarp(),
iterfield = ['reference','premat','warp1'],
name = 'convertwarp_example2standard')
registration.connect(inputnode,'standard',
convertwarp_example2standard,'reference')
registration.connect(linear_example_func2highres,'out_matrix_file',
convertwarp_example2standard,'premat')
registration.connect(nonlinear_highres2standard,'fieldcoeff_file',
convertwarp_example2standard,'warp1')
registration.connect(convertwarp_example2standard,'out_file',
outputnode,'example_func2standard_warp_nii_gz')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
--ref=standard
--in=example_func
--out=example_func2standard
--warp=example_func2standard_warp
"""
warp_example2stand = pe.MapNode(
interface = fsl.ApplyWarp(),
iterfield = ['ref_file','in_file','field_file'],
name = 'warp_example2stand')
registration.connect(inputnode,'standard',
warp_example2stand,'ref_file')
registration.connect(inputnode,'example_func',
warp_example2stand,'in_file')
registration.connect(warp_example2stand,'out_file',
outputnode,'example_func2standard_nii_gz')
registration.connect(convertwarp_example2standard,'out_file',
warp_example2stand,'field_file')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2example_func.mat example_func2standard.mat
"""
get_standard2example_func = pe.MapNode(
interface = fsl.ConvertXFM(invert_xfm = True),
iterfield = ['in_file'],
name = 'get_standard2example_func')
registration.connect(get_exmaple_func2standard,'out_file',
get_standard2example_func,'in_file')
registration.connect(get_standard2example_func,'out_file',
outputnode,'standard2example_func_mat')
registration.base_dir = output_dir
registration.inputs.inputspec.highres = anat_brain
registration.inputs.inputspec.highres_head= anat_head
registration.inputs.inputspec.example_func = example_func
registration.inputs.inputspec.standard = standard_brain
registration.inputs.inputspec.standard_head = standard_head
registration.inputs.inputspec.standard_mask = standard_mask
# define all the oupput file names with the directory
registration.inputs.linear_example_func2highres.out_file = os.path.abspath(os.path.join(output_dir,
'example_func2highres.nii.gz'))
registration.inputs.linear_example_func2highres.out_matrix_file = os.path.abspath(os.path.join(output_dir,
'example_func2highres.mat'))
registration.inputs.linear_example_func2highres.out_log = os.path.abspath(os.path.join(output_dir,
'linear_example_func2highres.log'))
registration.inputs.get_highres2example_func.out_file = os.path.abspath(os.path.join(output_dir,
'highres2example_func.mat'))
registration.inputs.linear_highres2standard.out_file = os.path.abspath(os.path.join(output_dir,
'highres2standard_linear.nii.gz'))
registration.inputs.linear_highres2standard.out_matrix_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.mat'))
registration.inputs.linear_highres2standard.out_log = os.path.abspath(os.path.join(output_dir,
'linear_highres2standard.log'))
# --iout
registration.inputs.nonlinear_highres2standard.warped_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.nii.gz'))
# --cout
registration.inputs.nonlinear_highres2standard.fieldcoeff_file = os.path.abspath(os.path.join(output_dir,
'highres2standard_warp.nii.gz'))
# --jout
registration.inputs.nonlinear_highres2standard.jacobian_file = os.path.abspath(os.path.join(output_dir,
'highres2highres_jac.nii.gz'))
registration.inputs.nonlinear_highres2standard.log_file = os.path.abspath(os.path.join(output_dir,
'nonlinear_highres2standard.log'))
registration.inputs.warp_highres2standard.out_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.nii.gz'))
registration.inputs.get_standard2highres.out_file = os.path.abspath(os.path.join(output_dir,
'standard2highres.mat'))
registration.inputs.get_exmaple_func2standard.out_file = os.path.abspath(os.path.join(output_dir,
'example_func2standard.mat'))
registration.inputs.convertwarp_example2standard.out_file = os.path.abspath(os.path.join(output_dir,
'example_func2standard_warp.nii.gz'))
registration.inputs.warp_example2stand.out_file = os.path.abspath(os.path.join(output_dir,
'example_func2standard.nii.gz'))
registration.inputs.get_standard2example_func.out_file = os.path.abspath(os.path.join(output_dir,
'standard2example_func.mat'))
return registration
def _create_registration_workflow(anat_brain,
anat_head,
func_ref,
standard_brain,
standard_head,
standard_mask,
output_dir = 'temp'):
from nipype.interfaces import fsl
"""
fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres
fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres_head
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain standard
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm standard_head
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain_mask_dil standard_mask
"""
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = anat_brain
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres.nii.gz'))
fslmaths.cmdline
fslmaths.run()
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = anat_head
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres_head.nii.gz'))
fslmaths.cmdline
fslmaths.run()
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = standard_brain
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard.nii.gz'))
fslmaths.cmdline
fslmaths.run()
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = standard_head
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard_head.nii.gz'))
fslmaths.cmdline
fslmaths.run()
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = standard_mask
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard_mask.nii.gz'))
fslmaths.cmdline
fslmaths.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/flirt
-in example_func
-ref highres
-out example_func2highres
-omat example_func2highres.mat
-cost corratio
-dof 7
-searchrx -180 180
-searchry -180 180
-searchrz -180 180
-interp trilinear
"""
flt = fsl.FLIRT()
flt.inputs.in_file = func_ref
flt.inputs.reference = anat_brain
flt.inputs.out_file = os.path.abspath(os.path.join(output_dir,'example_func2highres.nii.gz'))
flt.inputs.out_matrix_file = os.path.abspath(os.path.join(output_dir,'example_func2highres.mat'))
flt.inputs.out_log = os.path.abspath(os.path.join(output_dir,'example_func2highres.log'))
flt.inputs.cost = 'corratio'
flt.inputs.interp = 'trilinear'
flt.inputs.searchr_x = [-180, 180]
flt.inputs.searchr_y = [-180, 180]
flt.inputs.searchr_z = [-180, 180]
flt.inputs.dof = 7
flt.inputs.save_log = True
flt.cmdline
flt.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat highres2example_func.mat example_func2highres.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,"example_func2highres.mat"))
inverse_transformer.inputs.invert_xfm = True
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres2example_func.mat'))
inverse_transformer.cmdline
inverse_transformer.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/flirt
-in highres
-ref standard
-out highres2standard
-omat highres2standard.mat
-cost corratio
-dof 12
-searchrx -180 180
-searchry -180 180
-searchrz -180 180
-interp trilinear
"""
flt = fsl.FLIRT()
flt.inputs.in_file = anat_brain
flt.inputs.reference = standard_brain
flt.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres2standard_linear.nii.gz'))
flt.inputs.out_matrix_file = os.path.abspath(os.path.join(output_dir,'highres2standard.mat'))
flt.inputs.out_log = os.path.abspath(os.path.join(output_dir,'highres2standard.log'))
flt.inputs.cost = 'corratio'
flt.inputs.interp = 'trilinear'
flt.inputs.searchr_x = [-180, 180]
flt.inputs.searchr_y = [-180, 180]
flt.inputs.searchr_z = [-180, 180]
flt.inputs.dof = 12
flt.inputs.save_log = True
flt.cmdline
flt.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/fnirt
--iout=highres2standard_head
--in=highres_head
--aff=highres2standard.mat
--cout=highres2standard_warp
--iout=highres2standard
--jout=highres2highres_jac
--config=T1_2_MNI152_2mm
--ref=standard_head
--refmask=standard_mask
--warpres=10,10,10
"""
fnirt_mprage = fsl.FNIRT()
fnirt_mprage.inputs.warp_resolution = (10, 10, 10)
# --iout name of output image
fnirt_mprage.inputs.warped_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.nii.gz'))
# --in input image
fnirt_mprage.inputs.in_file = anat_head
# --aff affine transform
fnirt_mprage.inputs.affine_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.mat'))
# --cout output file with field coefficients
fnirt_mprage.inputs.fieldcoeff_file = os.path.abspath(os.path.join(output_dir,
'highres2standard_warp.nii.gz'))
# --jout
fnirt_mprage.inputs.jacobian_file = os.path.abspath(os.path.join(output_dir,
'highres2highres_jac.nii.gz'))
# --config
fnirt_mprage.inputs.config_file = 'T1_2_MNI152_2mm'
# --ref
fnirt_mprage.inputs.ref_file = os.path.abspath(standard_head)
# --refmask
fnirt_mprage.inputs.refmask_file = os.path.abspath(standard_mask)
# --warpres
fnirt_mprage.inputs.log_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.log'))
fnirt_mprage.cmdline
fnirt_mprage.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
-i highres
-r standard
-o highres2standard
-w highres2standard_warp
"""
aw = fsl.ApplyWarp()
aw.inputs.in_file = anat_brain
aw.inputs.ref_file = os.path.abspath(standard_brain)
aw.inputs.out_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.nii.gz'))
aw.inputs.field_file = os.path.abspath(os.path.join(output_dir,
'highres2standard_warp.nii.gz'))
aw.cmdline
aw.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2highres.mat highres2standard.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,"highres2standard.mat"))
inverse_transformer.inputs.invert_xfm = True
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard2highres.mat'))
inverse_transformer.cmdline
inverse_transformer.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-omat example_func2standard.mat -concat highres2standard.mat example_func2highres.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file2 = os.path.abspath(os.path.join(output_dir,"highres2standard.mat"))
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,
"example_func2highres.mat"))
inverse_transformer.inputs.concat_xfm = True
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'example_func2standard.mat'))
inverse_transformer.cmdline
inverse_transformer.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convertwarp
--ref=standard
--premat=example_func2highres.mat
--warp1=highres2standard_warp
--out=example_func2standard_warp
"""
warputils = fsl.ConvertWarp()
warputils.inputs.reference = os.path.abspath(standard_brain)
warputils.inputs.premat = os.path.abspath(os.path.join(output_dir,
"example_func2highres.mat"))
warputils.inputs.warp1 = os.path.abspath(os.path.join(output_dir,
"highres2standard_warp.nii.gz"))
warputils.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard_warp.nii.gz"))
warputils.cmdline
warputils.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
--ref=standard
--in=example_func
--out=example_func2standard
--warp=example_func2standard_warp
"""
aw = fsl.ApplyWarp()
aw.inputs.ref_file = os.path.abspath(standard_brain)
aw.inputs.in_file = os.path.abspath(func_ref)
aw.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard.nii.gz"))
aw.inputs.field_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard_warp.nii.gz"))
aw.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2example_func.mat example_func2standard.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard.mat"))
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"standard2example_func.mat"))
inverse_transformer.inputs.invert_xfm = True
inverse_transformer.cmdline
inverse_transformer.run()
######################
###### plotting ######
example_func2highres = os.path.abspath(os.path.join(output_dir,
'example_func2highres'))
example_func2standard = os.path.abspath(os.path.join(output_dir,
"example_func2standard"))
highres2standard = os.path.abspath(os.path.join(output_dir,
'highres2standard'))
highres = os.path.abspath(anat_brain)
standard = os.path.abspath(standard_brain)
plot_example_func2highres = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2highres} {highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres} {example_func2highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2highres}1.png - {example_func2highres}2.png {example_func2highres}.png;
/bin/rm -f sl?.png {example_func2highres}2.png
/bin/rm {example_func2highres}1.png
""".replace("\n"," ")
plot_highres2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {highres2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {highres2standard}1.png - {highres2standard}2.png {highres2standard}.png;
/bin/rm -f sl?.png {highres2standard}2.png
/bin/rm {highres2standard}1.png
""".replace("\n"," ")
plot_example_func2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {example_func2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2standard}1.png - {example_func2standard}2.png {example_func2standard}.png;
/bin/rm -f sl?.png {example_func2standard}2.png
""".replace("\n"," ")
for cmdline in [plot_example_func2highres,plot_example_func2standard,plot_highres2standard]:
os.system(cmdline)
def create_simple_struc2BOLD(roi,
roi_name,
preprocessed_functional_dir,
output_dir):
from nipype.interfaces import fsl
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as util
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
simple_workflow = pe.Workflow(name = 'struc2BOLD')
inputnode = pe.Node(interface = util.IdentityInterface(
fields = ['flt_in_file',
'flt_in_matrix',
'flt_reference',
'mask']),
name = 'inputspec')
outputnode = pe.Node(interface = util.IdentityInterface(
fields = ['BODL_mask']),
name = 'outputspec')
"""
flirt
-in /export/home/dsoto/dsoto/fmri/$s/sess2/label/$i
-ref /export/home/dsoto/dsoto/fmri/$s/sess2/run1_prepro1.feat/example_func.nii.gz
-applyxfm
-init /export/home/dsoto/dsoto/fmri/$s/sess2/run1_prepro1.feat/reg/highres2example_func.mat
-out /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i}
"""
flirt_convert = pe.MapNode(
interface = fsl.FLIRT(apply_xfm = True),
iterfield = ['in_file',
'reference',
'in_matrix_file'],
name = 'flirt_convert')
simple_workflow.connect(inputnode, 'flt_in_file',
flirt_convert, 'in_file')
simple_workflow.connect(inputnode, 'flt_reference',
flirt_convert, 'reference')
simple_workflow.connect(inputnode, 'flt_in_matrix',
flirt_convert, 'in_matrix_file')
"""
fslmaths /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} -mul 2
-thr `fslstats /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} -p 99.6`
-bin /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i}
"""
def getthreshop(thresh):
return ['-mul 2 -thr %.10f -bin' % (val) for val in thresh]
getthreshold = pe.MapNode(
interface = fsl.ImageStats(op_string='-p 99.6'),
iterfield = ['in_file','mask_file'],
name = 'getthreshold')
simple_workflow.connect(flirt_convert, 'out_file',
getthreshold, 'in_file')
simple_workflow.connect(inputnode, 'mask',
getthreshold, 'mask_file')
threshold = pe.MapNode(
interface = fsl.ImageMaths(
suffix = '_thresh',
op_string = '-mul 2 -bin'),
iterfield = ['in_file','op_string'],
name = 'thresholding')
simple_workflow.connect(flirt_convert, 'out_file',
threshold, 'in_file')
simple_workflow.connect(getthreshold, ('out_stat',getthreshop),
threshold, 'op_string')
# simple_workflow.connect(threshold,'out_file',outputnode,'BOLD_mask')
bound_by_mask = pe.MapNode(
interface = fsl.ImageMaths(
suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'bound_by_mask')
simple_workflow.connect(threshold, 'out_file',
bound_by_mask, 'in_file')
simple_workflow.connect(inputnode, 'mask',
bound_by_mask, 'in_file2')
simple_workflow.connect(bound_by_mask, 'out_file',
outputnode, 'BOLD_mask')
# setup inputspecs
simple_workflow.inputs.inputspec.flt_in_file = roi
simple_workflow.inputs.inputspec.flt_in_matrix = os.path.abspath(os.path.join(preprocessed_functional_dir,
'reg',
'highres2example_func.mat'))
simple_workflow.inputs.inputspec.flt_reference = os.path.abspath(os.path.join(preprocessed_functional_dir,
'func',
'example_func.nii.gz'))
simple_workflow.inputs.inputspec.mask = os.path.abspath(os.path.join(preprocessed_functional_dir,
'func',
'mask.nii.gz'))
simple_workflow.inputs.bound_by_mask.out_file = os.path.abspath(os.path.join(output_dir,
roi_name.replace('_fsl.nii.gz',
'_BOLD.nii.gz')))
return simple_workflow
def registration_plotting(output_dir,
anat_brain,
standard_brain):
######################
###### plotting ######
try:
example_func2highres = os.path.abspath(os.path.join(output_dir,
'example_func2highres'))
example_func2standard = os.path.abspath(os.path.join(output_dir,
'example_func2standard_warp'))
highres2standard = os.path.abspath(os.path.join(output_dir,
'highres2standard'))
highres = os.path.abspath(anat_brain)
standard = os.path.abspath(standard_brain)
plot_example_func2highres = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2highres} {highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres} {example_func2highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2highres}1.png - {example_func2highres}2.png {example_func2highres}.png;
/bin/rm -f sl?.png {example_func2highres}2.png
/bin/rm {example_func2highres}1.png
""".replace("\n"," ")
plot_highres2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {highres2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {highres2standard}1.png - {highres2standard}2.png {highres2standard}.png;
/bin/rm -f sl?.png {highres2standard}2.png
/bin/rm {highres2standard}1.png
""".replace("\n"," ")
plot_example_func2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {example_func2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2standard}1.png - {example_func2standard}2.png {example_func2standard}.png;
/bin/rm -f sl?.png {example_func2standard}2.png
""".replace("\n"," ")
for cmdline in [plot_example_func2highres,
plot_example_func2standard,
plot_highres2standard]:
os.system(cmdline)
except:
print('you should not use python 2.7, update your python!!')
def create_highpass_filter_workflow(workflow_name = 'highpassfiler',
HP_freq = 60,
TR = 0.85):
from nipype.workflows.fmri.fsl import preprocess
from nipype.interfaces import fsl
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as util
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
getthreshop = preprocess.getthreshop
getmeanscale = preprocess.getmeanscale
highpass_workflow = pe.Workflow(name = workflow_name)
inputnode = pe.Node(interface = util.IdentityInterface(
fields = ['ICAed_file',]),
name = 'inputspec')
outputnode = pe.Node(interface = util.IdentityInterface(
fields = ['filtered_file']),
name = 'outputspec')
img2float = pe.MapNode(interface = fsl.ImageMaths(out_data_type = 'float',
op_string = '',
suffix = '_dtype'),
iterfield = ['in_file'],
name = 'img2float')
highpass_workflow.connect(inputnode,'ICAed_file',
img2float,'in_file')
getthreshold = pe.MapNode(interface = fsl.ImageStats(op_string = '-p 2 -p 98'),
iterfield = ['in_file'],
name = 'getthreshold')
highpass_workflow.connect(img2float, 'out_file',
getthreshold, 'in_file')
thresholding = pe.MapNode(interface = fsl.ImageMaths(out_data_type = 'char',
suffix = '_thresh',
op_string = '-Tmin -bin'),
iterfield = ['in_file','op_string'],
name = 'thresholding')
highpass_workflow.connect(img2float, 'out_file',
thresholding, 'in_file')
highpass_workflow.connect(getthreshold,('out_stat',getthreshop),
thresholding,'op_string')
dilatemask = pe.MapNode(interface = fsl.ImageMaths(suffix = '_dil',
op_string = '-dilF'),
iterfield = ['in_file'],
name = 'dilatemask')
highpass_workflow.connect(thresholding,'out_file',
dilatemask,'in_file')
maskfunc = pe.MapNode(interface = fsl.ImageMaths(suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'apply_dilatemask')
highpass_workflow.connect(img2float, 'out_file',
maskfunc, 'in_file')
highpass_workflow.connect(dilatemask, 'out_file',
maskfunc, 'in_file2')
medianval = pe.MapNode(interface = fsl.ImageStats(op_string = '-k %s -p 50'),
iterfield = ['in_file','mask_file'],
name = 'cal_intensity_scale_factor')
highpass_workflow.connect(img2float, 'out_file',
medianval, 'in_file')
highpass_workflow.connect(thresholding, 'out_file',
medianval, 'mask_file')
meanscale = pe.MapNode(interface = fsl.ImageMaths(suffix = '_intnorm'),
iterfield = ['in_file','op_string'],
name = 'meanscale')
highpass_workflow.connect(maskfunc, 'out_file',
meanscale, 'in_file')
highpass_workflow.connect(medianval, ('out_stat',getmeanscale),
meanscale, 'op_string')
meanfunc = pe.MapNode(interface = fsl.ImageMaths(suffix = '_mean',
op_string = '-Tmean'),
iterfield = ['in_file'],
name = 'meanfunc')
highpass_workflow.connect(meanscale, 'out_file',
meanfunc, 'in_file')
hpf = pe.MapNode(interface = fsl.ImageMaths(suffix = '_tempfilt',
op_string = '-bptf %.10f -1' % (HP_freq/2/TR)),
iterfield = ['in_file'],
name = 'highpass_filering')
highpass_workflow.connect(meanscale,'out_file',
hpf, 'in_file',)
addMean = pe.MapNode(interface = fsl.BinaryMaths(operation = 'add'),
iterfield = ['in_file','operand_file'],
name = 'addmean')
highpass_workflow.connect(hpf, 'out_file',
addMean, 'in_file')
highpass_workflow.connect(meanfunc, 'out_file',
addMean, 'operand_file')
highpass_workflow.connect(addMean, 'out_file',
outputnode,'filtered_file')
return highpass_workflow
def load_csv(f,print_ = False):
temp = re.findall(r'\d+',f)
n_session = int(temp[-2])
n_run = int(temp[-1])
if print_:
print(n_session,n_run)
df = pd.read_csv(f)
df['session'] = n_session
df['run'] = n_run
df['id'] = df['session'] * 1000 + df['run'] * 100 + df['trials']
return df
def build_model_dictionary(print_train = False,
class_weight = 'balanced',
remove_invariant = True,
n_jobs = 1):
np.random.seed(12345)
svm = LinearSVC(penalty = 'l2', # default
dual = True, # default
tol = 1e-3, # not default
random_state = 12345, # not default
max_iter = int(1e3), # default
class_weight = class_weight, # not default
)
svm = CalibratedClassifierCV(base_estimator = svm,
method = 'sigmoid',
cv = 8)
xgb = XGBClassifier(
learning_rate = 1e-3, # not default
max_depth = 10, # not default
n_estimators = 100, # not default
objective = 'binary:logistic', # default
booster = 'gbtree', # default
subsample = 0.9, # not default
colsample_bytree = 0.9, # not default
reg_alpha = 0, # default
reg_lambda = 1, # default
random_state = 12345, # not default
importance_type = 'gain', # default
n_jobs = n_jobs,# default to be 1
)
bagging = BaggingClassifier(base_estimator = svm,
n_estimators = 30, # not default
max_features = 0.9, # not default
max_samples = 0.9, # not default
bootstrap = True, # default
bootstrap_features = True, # default
random_state = 12345, # not default
)
RF = SelectFromModel(xgb,
prefit = False,
threshold = 'median' # induce sparsity
)
uni = SelectPercentile(mutual_info_classif,50) # so annoying that I cannot control the random state
knn = KNeighborsClassifier()
tree = DecisionTreeClassifier(random_state = 12345,
class_weight = class_weight)
dummy = DummyClassifier(strategy = 'uniform',random_state = 12345,)
if remove_invariant:
RI = VarianceThreshold()
models = OrderedDict([
['None + Dummy', make_pipeline(RI,MinMaxScaler(),
dummy,)],
['None + Linear-SVM', make_pipeline(RI,MinMaxScaler(),
svm,)],
['None + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(),
bagging,)],
['None + KNN', make_pipeline(RI,MinMaxScaler(),
knn,)],
['None + Tree', make_pipeline(RI,MinMaxScaler(),
tree,)],
['PCA + Dummy', make_pipeline(RI,MinMaxScaler(),
PCA(),
dummy,)],
['PCA + Linear-SVM', make_pipeline(RI,MinMaxScaler(),
PCA(),
svm,)],
['PCA + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(),
PCA(),
bagging,)],
['PCA + KNN', make_pipeline(RI,MinMaxScaler(),
PCA(),
knn,)],
['PCA + Tree', make_pipeline(RI,MinMaxScaler(),
PCA(),
tree,)],
['Mutual + Dummy', make_pipeline(RI,MinMaxScaler(),
uni,
dummy,)],
['Mutual + Linear-SVM', make_pipeline(RI,MinMaxScaler(),
uni,
svm,)],
['Mutual + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(),
uni,
bagging,)],
['Mutual + KNN', make_pipeline(RI,MinMaxScaler(),
uni,
knn,)],
['Mutual + Tree', make_pipeline(RI,MinMaxScaler(),
uni,
tree,)],
['RandomForest + Dummy', make_pipeline(RI,MinMaxScaler(),
RF,
dummy,)],
['RandomForest + Linear-SVM', make_pipeline(RI,MinMaxScaler(),
RF,
svm,)],
['RandomForest + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(),
RF,
bagging,)],
['RandomForest + KNN', make_pipeline(RI,MinMaxScaler(),
RF,
knn,)],
['RandomForest + Tree', make_pipeline(RI,MinMaxScaler(),
RF,
tree,)],]
)
else:
models = OrderedDict([
['None + Dummy', make_pipeline(MinMaxScaler(),
dummy,)],
['None + Linear-SVM', make_pipeline(MinMaxScaler(),
svm,)],
['None + Ensemble-SVMs', make_pipeline(MinMaxScaler(),
bagging,)],
['None + KNN', make_pipeline(MinMaxScaler(),
knn,)],
['None + Tree', make_pipeline(MinMaxScaler(),
tree,)],
['PCA + Dummy', make_pipeline(MinMaxScaler(),
PCA(),
dummy,)],
['PCA + Linear-SVM', make_pipeline(MinMaxScaler(),
PCA(),
svm,)],
['PCA + Ensemble-SVMs', make_pipeline(MinMaxScaler(),
PCA(),
bagging,)],
['PCA + KNN', make_pipeline(MinMaxScaler(),
PCA(),
knn,)],
['PCA + Tree', make_pipeline(MinMaxScaler(),
PCA(),
tree,)],
['Mutual + Dummy', make_pipeline(MinMaxScaler(),
uni,
dummy,)],
['Mutual + Linear-SVM', make_pipeline(MinMaxScaler(),
uni,
svm,)],
['Mutual + Ensemble-SVMs', make_pipeline(MinMaxScaler(),
uni,
bagging,)],
['Mutual + KNN', make_pipeline(MinMaxScaler(),
uni,
knn,)],
['Mutual + Tree', make_pipeline(MinMaxScaler(),
uni,
tree,)],
['RandomForest + Dummy', make_pipeline(MinMaxScaler(),
RF,
dummy,)],
['RandomForest + Linear-SVM', make_pipeline(MinMaxScaler(),
RF,
svm,)],
['RandomForest + Ensemble-SVMs', make_pipeline(MinMaxScaler(),
RF,
bagging,)],
['RandomForest + KNN', make_pipeline(MinMaxScaler(),
RF,
knn,)],
['RandomForest + Tree', make_pipeline(MinMaxScaler(),
RF,
tree,)],]
)
return models
def get_blocks(df__,label_map,):
ids = df__['id'].values
chunks = df__['session'].values
words = df__['labels'].values
labels = np.array([label_map[item] for item in df__['targets'].values])[:,-1]
sample_indecies = np.arange(len(labels))
blocks = [np.array([ids[ids == target],
chunks[ids == target],
words[ids == target],
labels[ids == target],
sample_indecies[ids == target]
]) for target in np.unique(ids)
]
block_labels = np.array([np.unique(ll[-2]) for ll in blocks]).ravel()
return blocks,block_labels
def make_unique_class_target(df_data):
make_class = {name:[] for name in pd.unique(df_data['targets'])}
for ii,df_sub in df_data.groupby(['labels']):
target = pd.unique(df_sub['targets'])
label = pd.unique(df_sub['labels'])
make_class[target[0]].append(label[0])
return make_class
def Find_Optimal_Cutoff(target, predicted):
""" Find the optimal probability cutoff point for a classification model related to event rate
Parameters
----------
target : Matrix with dependent or target data, where rows are observations
predicted : Matrix with predicted data, where rows are observations
Returns
-------
list type, with optimal cutoff value
"""
fpr, tpr, threshold = roc_curve(target, predicted)
i = np.arange(len(tpr))
roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)})
roc_t = roc.iloc[(roc.tf-0).abs().argsort()[:1]]
return list(roc_t['threshold'])
def customized_partition(df_data,groupby_column = ['id','labels'],n_splits = 100,):
"""
modified for unaveraged volumes
"""
idx_object = dict(ids = [],idx = [],labels = [])
for label,df_sub in df_data.groupby(groupby_column):
idx_object['ids'].append(label[0])
idx_object['idx'].append(df_sub.index.tolist())
idx_object['labels'].append(label[-1])
df_object = pd.DataFrame(idx_object)
idxs_test = []
for counter in range(int(1e4)):
idx_test = [np.random.choice(item['idx'].values) for ii,item in df_object.groupby(groupby_column[-1])]
if counter >= n_splits:
return [np.concatenate(item) for item in idxs_test]
break
if counter > 0:
temp = []
for used in idxs_test:
used_temp = [','.join(str(ii) for ii in item) for item in used]
idx_test_temp = [','.join(str(ii) for ii in item) for item in idx_test]
a = set(used_temp)
b = set(idx_test_temp)
temp.append(len(a.intersection(b)) != len(idx_test))
if all(temp) == True:
idxs_test.append(idx_test)
else:
idxs_test.append(idx_test)
def check_train_test_splits(idxs_test):
"""
check if we get repeated test sets
"""
temp = []
for ii,item1 in enumerate(idxs_test):
for jj,item2 in enumerate(idxs_test):
if not ii == jj:
if len(item1) == len(item2):
sample1 = np.sort(item1)
sample2 = np.sort(item2)
temp.append(len(set(sample1).intersection(set(sample2))) == len(sample1))
temp = np.array(temp)
return any(temp)
def check_train_balance(df,idx_train,keys):
"""
check the balance of the training set.
if only one of the classes has more 2 instances than the other
we will randomly take out those 'extra instances' from the major
class
"""
Counts = dict(Counter(df.iloc[idx_train]['targets'].values))
if np.abs(Counts[keys[0]] - Counts[keys[1]]) > 2:
if Counts[keys[0]] > Counts[keys[1]]:
key_major = keys[0]
key_minor = keys[1]
else:
key_major = keys[1]
key_minor = keys[0]
ids_major = df.iloc[idx_train]['id'][df.iloc[idx_train]['targets'] == key_major]
idx_train_new = idx_train.copy()
for n in range(len(idx_train_new)):
random_pick = np.random.choice(np.unique(ids_major),size = 1)[0]
# print(random_pick,np.unique(ids_major))
idx_train_new = np.array([item for item,id_temp in zip(idx_train_new,df.iloc[idx_train_new]['id']) if (id_temp != random_pick)])
ids_major = np.array([item for item in ids_major if (item != random_pick)])
new_counts = dict(Counter(df.iloc[idx_train_new]['targets']))
if np.abs(new_counts[keys[0]] - new_counts[keys[1]]) > 3:
if new_counts[keys[0]] > new_counts[keys[1]]:
key_major = keys[0]
key_minor = keys[1]
else:
key_major = keys[1]
key_minor = keys[0]
ids_major = df.iloc[idx_train_new]['id'][df.iloc[idx_train_new]['targets'] == key_major]
elif np.abs(new_counts[keys[0]] - new_counts[keys[1]]) < 3:
break
return idx_train_new
else:
return idx_train
def LOO_partition(df_data,target_column = 'labels'):
temp = {'targets':[],target_column:[]}
for (targets,labels),df_sub in df_data.groupby(['targets',target_column]):
temp['targets'].append(targets)
temp[target_column].append(labels)
temp = pd.DataFrame(temp)
temp = temp.sort_values(['targets',target_column])
living = temp[temp['targets'] == 'Living_Things'][target_column].values
nonliving = temp[temp['targets'] == 'Nonliving_Things'][target_column].values
test_pairs = [[a,b] for a in living for b in nonliving]
idxs_train,idxs_test = [],[]
for test_pair in test_pairs:
idx_test = np.logical_or(df_data[target_column] == test_pair[0],
df_data[target_column] == test_pair[1])
idx_train = np.invert(idx_test)
idxs_train.append(np.where(idx_train == True)[0])
idxs_test.append(np.where(idx_test == True)[0])
return idxs_train,idxs_test
def resample_ttest(x,
baseline = 0.5,
n_ps = 100,
n_permutation = 10000,
one_tail = False,
n_jobs = 12,
verbose = 0,
full_size = True
):
"""
http://www.stat.ucla.edu/~rgould/110as02/bshypothesis.pdf
https://www.tau.ac.il/~saharon/StatisticsSeminar_files/Hypothesis.pdf
Inputs:
----------
x: numpy array vector, the data that is to be compared
baseline: the single point that we compare the data with
n_ps: number of p values we want to estimate
one_tail: whether to perform one-tailed comparison
"""
import numpy as np
import gc
from joblib import Parallel,delayed
# statistics with the original data distribution
t_experiment = np.mean(x)
null = x - np.mean(x) + baseline # shift the mean to the baseline but keep the distribution
if null.shape[0] > int(1e4): # catch for big data
full_size = False
if not full_size:
size = int(1e3)
else:
size = null.shape[0]
gc.collect()
def t_statistics(null,size,):
"""
null: shifted data distribution
size: tuple of 2 integers (n_for_averaging,n_permutation)
"""
null_dist = np.random.choice(null,size = size,replace = True)
t_null = np.mean(null_dist,0)
if one_tail:
return ((np.sum(t_null >= t_experiment)) + 1) / (size[1] + 1)
else:
return ((np.sum(np.abs(t_null) >= np.abs(t_experiment))) + 1) / (size[1] + 1) /2
ps = Parallel(n_jobs = n_jobs,verbose = verbose)(delayed(t_statistics)(**{
'null':null,
'size':(size,int(n_permutation)),}) for i in range(n_ps))
return np.array(ps)
def resample_ttest_2sample(a,b,
n_ps = 100,
n_permutation = 10000,
one_tail = False,
match_sample_size = True,
n_jobs = 6,
verbose = 0):
from joblib import Parallel,delayed
import gc
# when the samples are dependent just simply test the pairwise difference against 0
# which is a one sample comparison problem
if match_sample_size:
difference = a - b
ps = resample_ttest(difference,
baseline = 0,
n_ps = n_ps,
n_permutation = n_permutation,
one_tail = one_tail,
n_jobs = n_jobs,
verbose = verbose,)
return ps
else: # when the samples are independent
t_experiment = np.mean(a) - np.mean(b)
if not one_tail:
t_experiment = np.abs(t_experiment)
def t_statistics(a,b):
group = np.concatenate([a,b])
np.random.shuffle(group)
new_a = group[:a.shape[0]]
new_b = group[a.shape[0]:]
t_null = np.mean(new_a) - np.mean(new_b)
if not one_tail:
t_null = np.abs(t_null)
return t_null
gc.collect()
ps = np.zeros(n_ps)
for ii in range(n_ps):
t_null_null = Parallel(n_jobs = n_jobs,verbose = verbose)(delayed(t_statistics)(**{
'a':a,
'b':b}) for i in range(n_permutation))
if one_tail:
ps[ii] = ((np.sum(t_null_null >= t_experiment)) + 1) / (n_permutation + 1)
else:
ps[ii] = ((np.sum(np.abs(t_null_null) >= np.abs(t_experiment))) + 1) / (n_permutation + 1) / 2
return ps
class MCPConverter(object):
import statsmodels as sms
"""
https://gist.github.com/naturale0/3915e2def589553e91dce99e69d138cc
https://en.wikipedia.org/wiki/Holm%E2%80%93Bonferroni_method
input: array of p-values.
* convert p-value into adjusted p-value (or q-value)
"""
def __init__(self, pvals, zscores = None):
self.pvals = pvals
self.zscores = zscores
self.len = len(pvals)
if zscores is not None:
srted = np.array(sorted(zip(pvals.copy(), zscores.copy())))
self.sorted_pvals = srted[:, 0]
self.sorted_zscores = srted[:, 1]
else:
self.sorted_pvals = np.array(sorted(pvals.copy()))
self.order = sorted(range(len(pvals)), key=lambda x: pvals[x])
def adjust(self, method = "holm"):
import statsmodels as sms
"""
methods = ["bonferroni", "holm", "bh", "lfdr"]
(local FDR method needs 'statsmodels' package)
"""
if method == "bonferroni":
return [np.min([1, i]) for i in self.sorted_pvals * self.len]
elif method == "holm":
return [np.min([1, i]) for i in (self.sorted_pvals * (self.len - np.arange(1, self.len+1) + 1))]
elif method == "bh":
p_times_m_i = self.sorted_pvals * self.len / np.arange(1, self.len+1)
return [np.min([p, p_times_m_i[i+1]]) if i < self.len-1 else p for i, p in enumerate(p_times_m_i)]
elif method == "lfdr":
if self.zscores is None:
raise ValueError("Z-scores were not provided.")
return sms.stats.multitest.local_fdr(abs(self.sorted_zscores))
else:
raise ValueError("invalid method entered: '{}'".format(method))
def adjust_many(self, methods = ["bonferroni", "holm", "bh", "lfdr"]):
if self.zscores is not None:
df = pd.DataFrame(np.c_[self.sorted_pvals, self.sorted_zscores], columns=["p_values", "z_scores"])
for method in methods:
df[method] = self.adjust(method)
else:
df = pd.DataFrame(self.sorted_pvals, columns=["p_values"])
for method in methods:
if method != "lfdr":
df[method] = self.adjust(method)
return df
def define_roi_category():
roi_dict = {'fusiform':'Visual',
'parahippocampal':'Visual',
'pericalcarine':'Visual',
'precuneus':'Visual',
'superiorparietal':'Working Memory',
'inferiortemporal':'Visual',
'lateraloccipital':'Visual',
'lingual':'Visual',
'rostralmiddlefrontal':'Working Memory',
'superiorfrontal':'Working Memory',
'ventrolateralPFC':'Working Memory',
'inferiorparietal':'Visual',
}
return roi_dict
def stars(x):
if x < 0.001:
return '***'
elif x < 0.01:
return '**'
elif x < 0.05:
return '*'
else:
return 'n.s.'
def get_fs(x):
return x.split(' + ')[0]
def get_clf(x):
return x.split(' + ')[1]
def rename_roi(x):
return x.split('-')[-1] + '-' + x.split('-')[1]
def strip_interaction_names(df_corrected):
results = []
for ii,row in df_corrected.iterrows():
row['window'] = row['level1'].split('_')[0]
try:
row['attribute1']= row['level1'].split('_')[1]
row['attribute2']= row['level2'].split('_')[1]
except:
row['attr1']= row['level1'].split('_')[1]
row['attr2']= row['level2'].split('_')[1]
results.append(row.to_frame().T)
results = pd.concat(results)
return results
def compute_xy(df_sub,position_map,hue_map):
df_add = []
for ii,row in df_sub.iterrows():
xtick = int(row['window']) - 1
attribute1_x = xtick + position_map[hue_map[row['attribute1']]]
attribute2_x = xtick + position_map[hue_map[row['attribute2']]]
row['x1'] = attribute1_x
row['x2'] = attribute2_x
df_add.append(row.to_frame().T)
df_add = | pd.concat(df_add) | pandas.concat |
"""
Implementation of different statistical analysis tools that are used to compare the significant difference between two trajectories.
(1) Splinectomy longitudinal statistical analysis tools
References:
- https://github.com/RRShieldsCutler/splinectomeR
- https://www.frontiersin.org/articles/10.3389/fmicb.2018.00785/full
The following 3 methods below are translated from R to Python and acomodated for our project.
Original package is called splinectomeR, implemented in R. For more details please use the reference links.
In short, the test compares whether the area between two polynomial lines is significantly different.
Our trajectory lines are the plynomial lines with degree 2 or 3.
(2) Linear regression statistical analysis tools
After the 3 methods from splinectomeR, we have the other one, based on comparing the two linear lines (line y=k*x+n).
To compare two linear lines, we compare the significant difference in the two coefficients that represent the line k and n.
In the microbiome trajectory, the only linear part is the part where infant is younger than 2 years. After that, the
trajectory plateaus, therefore, this test can be used only before 2 years.
"""
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import statsmodels.api as sm
def permuspliner(
data,
xvar,
yvar,
category,
degree,
cases,
groups=None,
perms=999,
test_direction="more",
ints=1000,
quiet=False,
cut_sparse=4,
):
"""Permuspliner tests for a significant difference between two groups overall.
Permutation to test whether there is a non-zero trend among a set
of individuals/samples over a continuous variable -- such as time. So, there
does not need to be two groups in this test. The x variable datapoints are
permuated within each case/individual, thus maintaining the distribution in
the y component but shuffling the hypothesized trend.
Note: the description is taken from splinectomeR
Parameters
----------
data: pd.DataFrame
A dataframe object containing your data.
xvar: str
The independent variable; is continuous, e.g. y.
yvar: str
The dependent variable; is continuous, e.g. y_pred.
category: str
The column name of the category to be tested, e.g. country.
cases:
The column name defining the individual cases, e.g. subjectID.
groups: dict
If more than two groups, the two groups to compare as character vector, e.g. ["RUS", "FIN"].
perms: int
The number of permutations to generate
cut_sparse: int
Minimum number of total observations necessary per group to fit a spline (default 4)
ints: int
Number of x intervals over which to measure area
quiet: bool
Silence all text outputs
Returns
-------
result: dict
Returns a bunc of stuff, but the most important for you is pval.
"""
perm_output = {}
equation = lambda a, b: np.polyval(a, b)
if test_direction not in ["more", "less"]:
raise Exception(
"Error in test direction option: must be either 'more' or 'less'"
)
in_df = data.copy()
# Determine the two groups to compare
if groups is None:
if len(in_df.category.unique()) > 2:
raise Exception(
'More than two groups in category column. Define groups with (groups = c("Name1","Name2"))'
)
else:
cats = in_df.category.unique()
v1 = cats[0]
v2 = cats[1]
else:
v1 = groups[0]
v2 = groups[1]
if quiet == False:
print(f"\nGroups detected:", v1, "and", v2, ".\n")
print(
f"\nNow testing between variables",
v1,
"and",
v2,
"for a difference in the response labeled",
yvar,
"\n",
)
print(
f"\nScalpel please: performing permusplinectomy with",
perms,
"permutations...\n",
)
# The experimentally reported response
df_v1 = in_df[(in_df[category] == v1) & (~in_df[xvar].isna())]
df_v2 = in_df[(in_df[category] == v2) & (~in_df[xvar].isna())]
if len(df_v1[xvar]) < cut_sparse or len(df_v2[xvar]) < cut_sparse:
raise Exception("Not enough data in each group to fit spline")
# Prevent issues arising from identical case labels across groups
if len(list(set(df_v1[cases]).intersection(set(df_v2[cases])))) > 0:
raise Exception(
"\nIt appears there may be identically labeled cases in both groups.\n...Please ensure that the cases are uniquely labeled between the two groups\n"
)
# Fit the splines for each group
p_v1, _ = np.polyfit(
df_v1[xvar], df_v1[yvar], degree, cov=True
) # parameters and covariance from of the fit of 1-D polynom.
p_v2, _ = np.polyfit(
df_v2[xvar], df_v2[yvar], degree, cov=True
) # parameters and covariance from of the fit of 1-D polynom.
x0 = max(min(df_v1[xvar]), min(df_v2[xvar]))
x1 = min(max(df_v1[xvar]), max(df_v2[xvar]))
x0 = x0 + (
(x1 - x0) * 0.1
) # Trim the first and last 10% to avoid low-density artifacts
x1 = x1 - ((x1 - x0) * 0.1)
xby = (x1 - x0) / (ints - 1)
xx = np.arange(x0, x1, xby) # Set the interval range
# var1 , x
v1_spl_f = pd.DataFrame(data={"var1": equation(p_v1, xx), "x": xx})
v2_spl_f = pd.DataFrame(data={"var2": equation(p_v2, xx), "x": xx})
real_spl_dist = (
v1_spl_f.set_index("x").join(v2_spl_f.set_index("x"), on="x").reset_index()
)
real_spl_dist["abs_dist"] = np.abs(
real_spl_dist.var1 - real_spl_dist.var2
) # Measure the real group distance
real_area = (
np.sum(real_spl_dist.abs_dist) / ints
) # Calculate the area between the groups
if quiet == False:
print(
"\nArea between groups successfully calculated, now spinning up permutations...\n"
)
# Define the permutation function
case_shuff = "case_shuff" # Dummy label
def spline_permute(randy, ix, perm_output):
randy_meta = randy.drop_duplicates(subset=cases, keep="first").copy(
deep=False
) # Pull out the individual IDs
randy_meta[case_shuff] = np.random.choice(
randy_meta[category].values,
size=len(randy_meta[category].values),
replace=True,
p=None,
)
randy_meta = randy_meta[[cases, case_shuff]]
randy = (
randy.set_index(cases)
.join(randy_meta.set_index(cases), on=cases)
.reset_index()
)
randy_v1 = randy[(randy[case_shuff] == v1) & (~randy[xvar].isna())]
randy_v2 = randy[(randy[case_shuff] == v2) & (~randy[xvar].isna())]
# Fit the splines for the permuted groups
p_randy_v1, _ = np.polyfit(randy_v1[xvar], randy_v1[yvar], degree, cov=True)
p_randy_v2, _ = np.polyfit(randy_v2[xvar], randy_v2[yvar], degree, cov=True)
# var1 , x
randy_v1_fit = pd.DataFrame(data={"var1": equation(p_randy_v1, xx), "x": xx})
randy_v2_fit = pd.DataFrame(data={"var2": equation(p_randy_v2, xx), "x": xx})
spl_dist = (
randy_v1_fit.set_index("x")
.join(randy_v2_fit.set_index("x"), on="x")
.reset_index()
)
spl_dist["abs_dist"] = np.abs(
spl_dist.var1 - spl_dist.var2
) # Measure the real group distance
real_area = (
np.sum(real_spl_dist.abs_dist) / ints
) # Calculate the area between the groups
transfer_perms = spl_dist[["var1", "var2", "abs_dist"]]
transfer_perms = transfer_perms.rename(
columns={
"var1": f"v1perm_{ix}",
"var2": f"v2perm_{ix}",
"abs_dist": f"pdistance_{ix}",
}
)
if ix > 0:
perm_retainer = perm_output["perm_retainer"]
else:
perm_retainer = pd.DataFrame({"x": xx})
perm_retainer = pd.concat([perm_retainer, transfer_perms], axis=1)
perm_output["perm_retainer"] = perm_retainer
perm_area = [
np.nansum(spl_dist["abs_dist"]) / np.sum(~spl_dist["abs_dist"].isna())
] # Calculate the area between permuted groups
if ix > 0:
permuted = perm_output["permuted"]
else:
permuted = []
permuted = np.concatenate([permuted, perm_area])
perm_output["permuted"] = permuted
return perm_output
# Run the permutation over desired number of iterations
in_rand = pd.concat([df_v1, df_v2])
pval = None
for ix in range(perms):
perm_output = spline_permute(in_rand, ix, perm_output)
if quiet == False:
print("...permutations completed...\n")
if test_direction == "more":
pval = (np.sum(perm_output["permuted"] >= real_area) + 1) / (perms + 1)
elif test_direction == "less":
pval = (sum(perm_output["permuted"] <= real_area) + 1) / (perms + 1)
if quiet == False:
print("p-value: ", pval)
result = {
"pval": pval,
"category_1": v1,
"category_2": v2,
"v1_interpolated": v1_spl_f,
"v2_interpolated": v2_spl_f,
# "v1_spline": df_v1_spl, "v2_spline": df_v2_spl,
"permuted_splines": perm_output["perm_retainer"],
"true_distance": real_spl_dist,
"v1_data": df_v1,
"v2_data": df_v2,
}
return result
def permuspliner_plot_permdistance(data, xlabel=None, ylabel=""):
"""Plot permuted distance distribution
Compare the permuted distances to the true distance. Requires permuspliner run with the "retain_perm" option.
Parameters
----------
data: pd.DataFrame
The results object from the permuspliner function
xlabel: str
Name for the x axis in the plot
ylabel: str
Name for the y axis in the plot
"""
sns.set_style("whitegrid")
if xlabel is None:
xlabel = "longitudinal parameter"
dists = data["permuted_splines"]
cols = dists.columns[dists.columns.str.startswith("pdistance_")]
dists = dists.set_index("x")
dists = dists[cols]
num_perms = dists.shape[1]
dists = dists.reset_index()
dists = pd.melt(
dists, id_vars="x", var_name="permutation", value_name="permuted_distance"
)
if num_perms > 1000:
alpha_level = 0.002
elif num_perms >= 100:
alpha_level = 0.02
elif num_perms < 100:
alpha_level = 0.25
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
for p in dists.permutation.unique():
ax.plot(
dists[dists.permutation == p]["x"].values,
dists[dists.permutation == p]["permuted_distance"].values,
alpha=alpha_level,
solid_capstyle="butt",
c="k",
)
true_dist = data["true_distance"]
ax.plot(true_dist["x"].values, true_dist["abs_dist"].values, color="r")
plt.show()
def permuspliner_plot_permsplines(
data=None, xvar=None, yvar=None, xlabel="", ylabel=""
):
"""Plot permuted splines along the real data
Compare how the permuted splines fit with the real data. Provides visual support for p values.
Parameters
----------
data: pd.DataFrame
The results object from the permuspliner function
xvar: str
Name (as string) of the longitudinal x variable in the data
yvar: str
Name (as string) of the response/y variable in the data
xlabel: str
Name for the x axis in the plot
ylabel: str
Name for the y axis in the plot
"""
sns.set_style("whitegrid")
permsplines = data["permuted_splines"]
cols = permsplines.columns[permsplines.columns.str.contains("perm")]
permsplines = permsplines.set_index("x")
permsplines = permsplines[cols]
permsplines = permsplines.reset_index()
num_perms = permsplines.shape[1] / 2
permsplines = pd.melt(
permsplines, id_vars="x", var_name="permutation", value_name="y.par"
)
var_1 = data["category_1"]
var_2 = data["category_2"]
true_v1 = data["v1_interpolated"]
true_v1["Group"] = var_1
true_v2 = data["v2_interpolated"]
true_v2["Group"] = var_2
true_data = | pd.concat([true_v1, true_v2], axis=0) | pandas.concat |
"""
"Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
"""
import os
from timeit import default_timer as timer
from datetime import datetime
from functools import reduce
import pandas as pd
import src.common as common
import src.config.constants as constants
import src.munging as process_data
import src.modeling as model
from sklearn.model_selection import KFold
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import LogisticRegression
common.set_timezone()
start = timer()
# Create RUN_ID
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
SEED = 42
EXP_DETAILS = "Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
IS_TEST = False
PLOT_FEATURE_IMPORTANCE = False
TARGET = "claim"
MODEL_TYPE = "Ranking"
LOGGER_NAME = "ranking"
logger = common.get_logger(LOGGER_NAME, MODEL_NAME, RUN_ID, constants.LOG_DIR)
common.set_seed(SEED)
logger.info(f"Running for Model Number [{MODEL_NAME}] & [{RUN_ID}]")
common.update_tracking(RUN_ID, "model_number", MODEL_NAME, drop_incomplete_rows=True)
common.update_tracking(RUN_ID, "model_type", MODEL_TYPE)
common.update_tracking(RUN_ID, "metric", "roc_auc")
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger, constants.PROCESSED_DATA_DIR, train=True, test=True, sample_submission=True
)
# Read different submission files and merge them to create dataset
# for level 2
sub_1_predition_name = (
"sub_lgb_K5_nonull_mean_sum_max_no_imp_no_scaler_params_K_0924_1159_0.81605.gz"
)
sub_1_oof_name = (
"oof_lgb_K5_nonull_mean_sum_max_no_imp_no_scaler_params_K_0924_1159_0.81605.csv"
)
sub_1_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_1_predition_name}")
sub_1_oof_pred = pd.read_csv(f"{constants.OOF_DIR}/{sub_1_oof_name}")
logger.info(
f"Shape of submission and oof file {sub_1_test_pred.shape}, {sub_1_oof_pred.shape}"
)
sub_2_predition_name = (
"sub_lgb_K10_nonull_mean_sum_max_mean_imp_no_scaler_params_K_0924_1406_0.81633.gz"
)
sub_2_oof_name = (
"oof_lgb_K10_nonull_mean_sum_max_mean_imp_no_scaler_params_K_0924_1406_0.81633.csv"
)
sub_2_test_pred = | pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_2_predition_name}") | pandas.read_csv |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import re
import os
import time
import wget
timestr = time.strftime("%Y%m%d")
listaFinal = []
def rasparArtigo(itemFinal, pasta, nomeRevista, linkIssues):
'''
função para baixar PDF e dados do artigo
'''
item = itemFinal.find_all('td')
timestr = time.strftime("%Y-%m-%d")
# Acessa e itera em cada edição da revista
for issue in item:
try:
linkissue = issue.find('a')['href']
print('Acessando a edição: ' + linkissue +'\n')
req = urlopen(linkissue)
bs = BeautifulSoup(req.read(), 'lxml')
try:
em = bs.find('em').parent.parent
em = em.text
start = em.find('versão impressa ISSN ') + len('versão impressa ISSN ')
end = em.find('On-line')
ISSN1 = em[start:end - 7]
ISSN2 = em[end + len('On-line ISSN '):]
except:
ISSN1 = ''
ISSN2 = ''
tabelaGeral = bs.find(class_='content')
pdfArtigos = tabelaGeral.find_all('a',href=re.compile(r'/pdf/[a-z]*/'))
# Itera na lista de PDF de cada edição
for artigo in pdfArtigos:
link = artigo['href']
idArtigo = link.replace('/pdf/', '')
linkFinal = 'http://www.scielo.br' + link
fullName = idArtigo.replace('/','_')
path = os.path.join(pasta, fullName)
listaInterna = [nomeRevista, ISSN1, ISSN2, linkIssues, fullName]
listaFinal.append(listaInterna)
df = | pd.DataFrame(listaFinal, columns=['Nome da Revista', 'ISSN - impresso', 'ISSN - digital', 'Link', 'ID do artigo']) | pandas.DataFrame |
"""
Copyright (C) 2016 <NAME>.
Licensed under the Apache 2.0 License.
functions.py
~~~~~~~
Subfunctions definitions to allow overall analysis.
"""
import os
import shutil
import time
import yaml
import calliope
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from calliope import utils
def clean_temp(path):
"""Deletes allchanged and new files and copies the raw data."""
if os.path.exists(path + '/example_model'):
shutil.rmtree(path + '/example_model')
if os.path.exists(path + '/temp'):
shutil.rmtree(path + '/temp')
shutil.copytree(path+'/paste-folder/example_model',
path + '/example_model')
def get_time(t_start):
"""Returns the runtime since the input time."""
t_end = time.time()
t_running = t_end - t_start
time_string = 'Process executed after ' + str(round(t_running, 1)) \
+ ' seconds (i.e. ' + str(int(t_running/60)) + ':' \
+ str(int(round(t_running % 60, 0))).zfill(2) + ').'
return time_string
def get_stuff(solution):
"""
Gets used information from calliope.solution into more handy form.
Returns calliope.utils.AttrDict with information.
"""
info_attrdict = utils.AttrDict()
# Get model config (nodes, edges, capacities, ...)
info_attrdict['nodepower'] = solution['node']['e:power']
# Node_power
info_attrdict['e_cap'] = solution['parameters']['e_cap']
# Parameter_e_cap
info_attrdict['config_model'] = solution['config_model']
# Locations & metadata
info_attrdict['costs'] = solution['costs']
# Costs for each y in each x
info_attrdict['node_power'] = solution['node']['e:power']
# Node power for all times
info_attrdict['totals'] = solution['totals']
# Totals
frame_index = info_attrdict['costs']
location_list = list(frame_index.axes[1])
techs_list = list(frame_index.axes[2])
i = 0
for each_link in info_attrdict['config_model']['links'].keys():
for each_tech in \
info_attrdict['config_model']['links'][each_link].keys():
i += 1
frame_index = frame_index.to_frame()
info_attrdict['labels'] = {'frame': {'index': frame_index.axes,
'shape': frame_index.shape},
'techs': techs_list,
'locations': location_list,
'connections': i}
return info_attrdict
def get_stuff_from_hdf(path_to_solution):
"""
Gets used information from calliope solution.hdf into more
handy form. Returns calliope.utils.AttrDict with information.
"""
info_attrdict = utils.AttrDict()
info_compleate = calliope.read.read_hdf(path_to_solution)
# Get model config (nodes, edges, capacities, ...)
info_attrdict['nodepower'] = info_compleate['node']['e:power']
# Node_power
info_attrdict['e_cap'] = info_compleate['parameters']['e_cap']
# Parameter_e_cap
info_attrdict['config_model'] = info_compleate['config_model']
# Locations & metadata
info_attrdict['costs'] = info_compleate['costs']
# Costs for each y in each x
info_attrdict['node_power'] = info_compleate['node']['e:power']
# Node power for all times
info_attrdict['totals'] = info_compleate['totals']
# Totals
frame_index = info_attrdict['costs']
location_list = list(frame_index.axes[1])
techs_list = list(frame_index.axes[2])
frame_index = frame_index.to_frame()
i = 0
for each_link in info_attrdict['config_model']['links'].keys():
for each_tech in \
info_attrdict['config_model']['links'][each_link].keys():
i += 1
info_attrdict['labels'] = {'frame': {'index': frame_index.axes,
'shape': frame_index.shape},
'techs': techs_list,
'locations': location_list,
'connections': i}
return info_attrdict
def run_model(path):
"""Runs the model. Returns the calliope.solution."""
model = calliope.Model(config_run=path)
model.run()
solution = model.solution
return solution
def get_original_caps(modelinfo):
"""
Reforms the capacity pd.Dataframe to be used consistently.
Returns pd.Series that is reformed.
"""
e_cap = modelinfo['e_cap'].transpose()
original_cap_frame = e_cap.unstack()
return original_cap_frame
def copy_models_frame_based(modelinfos, initial_constraints,
create_parallel_script=False):
"""
Prepare copy run files with all needed overrides.
Returns dict with pd.Frame capacities and the folder name.
"""
with open('example_model/model_config/locations.yaml') as loc_reader:
locations_dict = yaml.load(loc_reader)
with open('example_model/model_config/techs.yaml') as tec_reader:
techs_dict = yaml.load(tec_reader)
# Force settings per override
cost_frame = modelinfos.costs.to_frame()
caps_frame = pd.DataFrame({
'max_caps': np.full(cost_frame.shape[0], np.nan),
'min_caps(original_model)': np.zeros(cost_frame.shape[0])
}, cost_frame.axes[0])
override_list = list()
for x in list(modelinfos.e_cap.index): # Locations and links
for y in list(pd.DataFrame(modelinfos. e_cap,
index=[x])): # Techs per location and link
if modelinfos['config_model']['locations'][x]['level'] != 0:
caps_frame.loc[x, y]['min_caps(original_model)'] = \
modelinfos.e_cap.ix[x][y]
else:
if ':' in y:
if int(str(x).split('r')[1]) < int(str(y).split(':r')[1]):
# Link overrides
split_link = str(x) + ',' + str(y).split(':')[1]
# Recreate link by location & destination
# ATTENTION:
# override structure in nested dict must already exist!
locations_dict['links'][split_link][
str(y).split(':')[0]
]['constraints']['e_cap.min'] = \
float(modelinfos.e_cap.loc[x][y])
caps_frame.loc[x, y]['min_caps(original_model)'] = \
modelinfos.e_cap.loc[x][y]
caps_frame.loc[x, y]['max_caps'] = np.inf
new_override_dict = {
'override.links.' + str(split_link) + '.'
+ str(y).split(':')[0]
+ '.constraints.e_cap.max': 0,
'override.links.' + str(split_link) + '.'
+ str(y).split(':')[0]
+ '.constraints.e_cap.min': 0
}
total_override_dict = {**new_override_dict,
**initial_constraints}
override_list.append(total_override_dict)
elif int(str(x).split('r')[1]) != \
int(str(y).split(':r')[1]):
# Necessary? see loop above
caps_frame.loc[x, y]['min_caps(original_model)'] = \
modelinfos.e_cap.loc[x][y]
else:
if techs_dict['techs'][y]['parent'] == 'supply' \
and y in locations_dict['locations'][x]['techs']:
locations_dict['locations'][
x
]['override'][y]['constraints']['e_cap.min'] = \
float(modelinfos.e_cap.loc[x][y])
caps_frame.loc[x, y][
'min_caps(original_model)'
] = modelinfos.e_cap.loc[x][y]
caps_frame.loc[x, y]['max_caps'] = \
techs_dict['techs'][
y
]['constraints']['e_cap.max']
else:
caps_frame.loc[x, y]['min_caps(original_model)'] = \
modelinfos.e_cap.loc[x][y]
# At this point all settings for the operate setting
# should be ready to create the exact copy
with open('example_model/model_config/copy_locations.yaml', 'w') \
as loc_override:
loc_override.write(yaml.dump(locations_dict))
# Set up copy_model to run copy settings
with open('example_model/model_config/model.yaml') as model_reader:
model_dict = yaml.load(model_reader)
model_dict['import'] = [item.replace('locations', 'copy_locations') for
item in model_dict['import']]
model_dict['name'] = 'copy '+model_dict['name']
with open('example_model/model_config/copy_model.yaml', 'w') \
as model_writer:
model_writer.write(yaml.dump(model_dict))
with open('example_model/run.yaml') as run_reader:
run_dict = yaml.load(run_reader)
cut_number = run_dict['model'].find('model.yaml')
run_dict['model'] = run_dict['model'][:cut_number] + 'copy_' \
+ run_dict['model'][cut_number:]
run_dict['output']['path'] += '/copy_runs'
run_dict['parallel']['iterations'] = override_list
folder_name = run_dict['parallel']['name']
with open('example_model/copy_run.yaml', 'w') as run_writer:
run_writer.write(yaml.dump(run_dict))
if create_parallel_script is True:
path_extension = os.path.abspath('')
parallel_sh = '#!/bin/bash\ncalliope generate '\
+ path_extension\
+ '/example_model/copy_run.yaml '\
+ path_extension \
+ '/example_model\n' # local paths!!
with open('example_model/parallel.sh', 'w') as create_shell:
create_shell.write(parallel_sh)
return_dict = {'caps': caps_frame, 'name': folder_name}
return return_dict
def read_results(model_info, run):
"""
Reads the costs and capacities from one run.
Returns a dict with pd.Dataframe of costs and capacities.
"""
if not type(run) == str:
run = str(run).zfill(4)
# else:
# run = run.zfill(4)
frame_index = model_info['labels']['frame']['index'][0]
cost_dict = {run+'_cost': list()}
for item in list(model_info['costs'].items):
for major in list(model_info['costs'].major_axis):
for minor in list(model_info['costs'].minor_axis):
cost_dict[run + '_cost'] += \
[model_info['costs'].loc[item, major, minor]]
cost_frame = pd.DataFrame(cost_dict, frame_index)
caps_dict = {run + '_caps': list()}
for x in model_info['labels']['locations']:
for y in model_info['labels']['techs']:
caps_dict[run + '_caps'] += [model_info['e_cap'].loc[x, y]]
caps_frame = pd.DataFrame(caps_dict, frame_index)
return_dict = {'costs': cost_frame, 'caps': caps_frame}
return return_dict
def create_parallel_sh(path_to_run, model_info, folder_name, folder_name_2):
"""
Creates, prepares and runs a bash script with the according
folder structure for the parallel run with calliope.
"""
# I need to add an intermediate step to give parallel script output
# and run read_results(see above) with consistent input
os.system('bash ' + path_to_run + 'parallel.sh')
os.mkdir(path_to_run + folder_name + '/Runs/model_config')
shutil.copytree(path_to_run + '/model_config/data', path_to_run + '/'
+ folder_name + '/Runs/model_config/data')
for i in range(model_info['labels']['connections']):
# number of all connections
cmd_base = 'cd '+os.path.abspath('') + '/' + folder_name_2 + '/' \
+ folder_name + ' && '
cmd_special = './run.sh ' + str(i+1)
cmd = cmd_base + cmd_special
os.system(cmd)
def create_parallel_sequentially(path_to_run, model_info):
"""Runs all calliope.Models sequencially instead of parallel."""
# Some problems and not necessary/usefull in most cases
with open(path_to_run + 'copy_run.yaml') as run_reader:
run_dict = yaml.load(run_reader)
folder_name = run_dict['parallel']['name']
if not os.path.exists(path_to_run + '/' + folder_name + '/Output'):
os.makedirs(path_to_run + '/' + folder_name + '/Output')
initial_override = run_dict['override']
initial_path = run_dict['output']['path']
for i in range(model_info['labels']['connections']):
if initial_override is None:
temp_dict = run_dict['parallel']['iterations'][i]
new_dict = dict()
for key in temp_dict:
new_dict[key] = temp_dict[key]
run_dict['override'] = new_dict
del new_dict
del temp_dict
else:
for keys in run_dict['parallel']['iterations'][i].keys():
run_dict['override'][keys] = \
run_dict['parallel']['iterations'][i][keys]
run_dict['output']['path'] = \
path_to_run + folder_name + '/Output/'+str(i+1).zfill(4)
if not os.path.exists(
path_to_run + '/' + folder_name + '/Output/'+str(i+1).zfill(4)
):
os.makedirs(
path_to_run + '/' + folder_name + '/Output/'+str(i+1).zfill(4)
)
with open(path_to_run + 'copy_run.yaml', 'w') as run_writer:
run_writer.write(yaml.dump(run_dict))
temp_model = calliope.Model(config_run=path_to_run + 'copy_run.yaml')
temp_model.run()
temp_model.save_solution('hdf')
run_dict['output']['path'] = initial_path
run_dict['override'] = initial_override
with open(path_to_run + 'copy_run.yaml', 'w') as run_writer:
run_writer.write(yaml.dump(run_dict))
def concat_frames(model_info, folder_name, path_name):
"""
Takes informations of all runs.
Returns dict with pd.Dataframes of costs and capacities.
"""
cap_dict = dict()
cost_dict = dict()
tot_list = list()
for i in range(1, model_info['labels']['connections']+1):
# i runs from first to last Model (starting with 1)
temp_copy_info = get_stuff_from_hdf(path_name + '/' + folder_name
+ '/Output/' + str(i).zfill(4)
+ '/solution.hdf')
answer_dict = read_results(temp_copy_info, i)
tot_list = tot_list + [answer_dict]
cap_dict[str(i).zfill(4)] = answer_dict['caps']
cost_dict[str(i).zfill(4)] = answer_dict['costs']
cap_frame = pd.concat(cap_dict, axis=1)
cost_frame = pd.concat(cost_dict, axis=1)
max_cap_list = list()
for i in range(int(cap_frame.shape[0])):
max_cap_list = max_cap_list + [max(cap_frame.iloc[i][:])]
cap_frame['max'] = max_cap_list
max_cost_list = list()
for i in range(cost_frame.shape[0]):
max_cost_list = max_cost_list + [max(cost_frame.iloc[i][:])]
cost_frame['max'] = max_cost_list
frame_dict = {'costs': cost_frame, 'caps': cap_frame}
return frame_dict
def analyse_results(cap_cost_dict, folder_name):
"""
Adds a column with the maximal values of each row.
Saves the tables of costs and capacities as csv.
Returns pd.Dataframe with capacities.
"""
path_extension = os.path.abspath('')
cost = cap_cost_dict['costs']
caps = cap_cost_dict['caps']
max_list = list()
for i in range(len(cost)):
max_list += [max(cost.iloc[i][:])]
cost['max_cost'] = pd.Series(max_list, index=cost.index)
cost.to_csv(path_extension + '/example_model/'
+ folder_name + '/Overview/cost.csv')
max_list = list()
for i in range(len(caps)):
max_list += [max(caps.iloc[i][:])]
caps['max_caps'] = pd.Series(max_list, index=caps.index)
caps.to_csv(path_extension + '/example_model/'
+ folder_name + '/Overview/caps.csv')
return caps
def plot_bars(frame, content='caps', fixed_scale=False):
"""Creates bar plots and saves them in respective directories."""
max_list = list()
for i in range(frame.shape[0]):
temp_frame = frame.iloc[i][:]
if not temp_frame[-1] == 0 and not temp_frame.eq(temp_frame[0]).all():
max_list.append(max(temp_frame))
if content == 'caps':
y_pos = list(range(frame.shape[1]-2))
x_label = [str(i) for i in range(len(y_pos))]
x_label[0] = 'original'
x_label[-1] = 'safe'
for i in range(frame.shape[0]):
if not frame.iloc[i][-1] < abs(10**-10) \
and not frame.iloc[i][1:].eq(frame.iloc[i][1]).all():
temp_fig = plt.figure()
plt.bar(y_pos[0], frame.iloc[i][1], align='center',
alpha=0.75, color='red')
plt.bar(y_pos[1:-1], frame.iloc[i][2:-2], align='center',
alpha=0.75, color='black')
plt.bar(y_pos[-1], frame.iloc[i][-1], align='center',
alpha=0.75, color='blue')
plt.xticks(y_pos, x_label)
plt.xticks(rotation=30)
plt.xlim(-2, 23)
plt.ylabel('installed capacity')
plt.title('Capacity comparison for '
+ str(frame.axes[0][i][1])
+ ' at ' + str(frame.axes[0][i][0]))
if os.path.exists('temp/figures/caps') is False:
os.makedirs('temp/figures/caps/')
if os.path.exists('temp/figures/cost') is False:
os.makedirs('temp/figures/cost/')
if os.path.exists('temp/figures/load') is False:
os.makedirs('temp/figures/load/')
if fixed_scale is True:
plt.ylim((0, max(max_list)))
temp_fig.savefig('temp/figures/caps/cap_fig_'
+ str(frame.axes[0][i][0]) + '_'
+ str(frame.axes[0][i][1])
+ '_fixed_scale.png')
else:
temp_fig.savefig('temp/figures/caps/cap_fig_'
+ str(frame.axes[0][i][0]) + '_'
+ str(frame.axes[0][i][1])+'.png')
elif content == 'costs':
y_pos = list(range(frame.shape[1]-1))
x_label = [str(i) for i in range(len(y_pos))]
x_label[0] = 'original'
x_label[-1] = 'safe'
for i in range(frame.shape[0]):
if not frame.iloc[i][-1] == 0 \
and not frame.iloc[i][:].eq(frame.iloc[i][0]).all():
temp_fig = plt.figure()
plt.bar(y_pos[0], frame.iloc[i][0], align='center',
alpha=0.75, color='red')
plt.bar(y_pos[1:-1], frame.iloc[i][1:-2], align='center',
alpha=0.75, color='black')
plt.bar(y_pos[-1], frame.iloc[i][-1], align='center',
alpha=0.75, color='blue')
plt.xticks(y_pos, x_label)
plt.xticks(rotation=30)
plt.ylabel('costs')
plt.title('Costs comparison for ' + str(frame.axes[0][i][1])
+ ' at ' + str(frame.axes[0][i][0]))
if fixed_scale is True:
plt.ylim((0, max(max_list)))
temp_fig.savefig('temp/figures/cost/cost_fig_'
+ str(frame.axes[0][i][0]) + '_'
+ str(frame.axes[0][i][1])
+ '_fixed_scale.png')
else:
temp_fig.savefig('temp/figures/cost/cost_fig_'
+ str(frame.axes[0][i][0]) + '_'
+ str(frame.axes[0][i][1]) + '.png')
def plot_lines(model_info, frame, cost_frame,
safe_info=False, bars=False, fixed_scale=False):
"""
Creates line plots and pie plots and saves them in
the respective directories.
"""
loc_dict = dict()
for i in range(model_info['node_power'].shape[2]):
if model_info['config_model']['locations'][
model_info['node_power'].axes[2][i]
]['level'] == 0:
loc_dict[model_info['node_power'].axes[2][i]] = dict()
for j in range(model_info['node_power'].shape[0]):
if model_info['config_model']['techs'][
model_info['node_power'].axes[0][j].split(':')[0]
]['parent'] == 'transmission' \
and not model_info[
'node_power'
].axes[0][j].split(':')[1] == \
model_info['node_power'].axes[2][i]:
if model_info['node_power'].axes[0][j].split(':')[-1] \
not in loc_dict[
model_info['node_power'].axes[2][i]
].keys():
loc_dict[model_info['node_power'].axes[2][i]][
model_info['node_power'].axes[0][j].split(':')[-1]
] = list()
loc_dict[model_info['node_power'].axes[2][i]][
model_info['node_power'].axes[0][j].split(':')[-1]
].append(model_info['node_power'].axes[0][j])
font = {'weight': 'light', 'size': 12}
matplotlib.rc('font', **font)
if fixed_scale is True:
max_list = list()
for l in range(frame.shape[0]):
temp_frame = frame.iloc[l][:]
if not temp_frame[-1] == 0 \
and not temp_frame.eq(temp_frame[0]).all():
max_list.append(max(temp_frame))
key_list = list(loc_dict.keys())
for i in range(len(key_list)-1):
for j in range(i+1, len(key_list)):
connections = len(loc_dict[key_list[i]][key_list[j]])
sub_plot = plt.figure(figsize=(12, 10), dpi=300)
for k in range(connections):
if bars is True:
plt.subplot(int(str(connections) + '2' + str(2*k+1)))
else:
plt.subplot(int(str(connections) + '1' + str(k+1)))
plt.plot(abs(model_info['node_power'].ix[
loc_dict[key_list[i]][key_list[j]][k],
:,
key_list[i]
]), 'r')
if safe_info:
plt.plot(abs(safe_info['node_power'].ix[
loc_dict[key_list[i]][key_list[j]][k],
:,
key_list[i]
]), 'b')
tmp = safe_info['node_power'].to_frame()
tmp.to_csv('safe_nodepower.csv')
tmp2 = model_info['node_power'].to_frame()
tmp2.to_csv('orig_nodepower.csv')
caps_origin = frame.ix[(key_list[i],
loc_dict[key_list[i]][key_list[j]][k]),
1]
caps_safe = frame.ix[(key_list[i],
loc_dict[key_list[i]][key_list[j]][k]),
-1]
origin_series = \
pd.Series(caps_origin, model_info['node_power'].axes[1])
safe_series = \
| pd.Series(caps_safe, model_info['node_power'].axes[1]) | pandas.Series |
# Copyright 2016 <NAME> and The Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import combinations
from pandas import melt, DataFrame
from bokeh.charts import Bar
from bokeh.io import show
def summary_plot(summary, dbs=['chebi', 'kegg', 'drugbank', 'pubchem']):
data = | DataFrame(summary) | pandas.DataFrame |
import numpy as np
import pandas as pd
#######################################################################################################################
def stringify_tuple(tup, sep=","):
"""
Takes in a tuple and concatante its elements as string seperated by a given char.
Parameters
----------
tup: tuple
Tuple to make string
sep: str, default ","
Seperator between tuple elements in the concataned string
"""
return str(sep.join([str(e) for e in tup]))
#######################################################################################################################
def to_utc(self, time_col="raw_time", reindex=False, drop=False):
"""
Parameters
----------
self : pandas DataFrame
Dataframe with raw time column
time_col : str, default 'raw_time'
Column name that holds raw time data
reindex : bool, default False
If True, use UTC time as index
drop : bool, default False
If true, original raw time column will be dropped for DataFrame
Returns
-------
self : pandas DataFrame
Dataframe with time as index
"""
if time_col in self.columns:
df = self.copy()
utc_col = pd.to_datetime(df[time_col], unit="ms")
if reindex:
df.index = utc_col
df.index.name = "utc"
else:
df["utc"] = utc_col
else:
raise KeyError("There's no time column in your dataframe!")
if drop:
df = df.drop([time_col], axis=1)
return df
pd.DataFrame.to_utc = to_utc
#######################################################################################################################
def count_na(self):
"""
Count nan values in self on every column
"""
num_of_values = self.count().rename("count") # number of values (not NaN) in each column
num_of_nan = self.isna().sum().rename("na_count") # number of NaN values in each column
nan_pct = self.isna().mean().rename("na_pct")
return | pd.concat([num_of_values, num_of_nan, nan_pct], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess
import argparse
from argparse import ArgumentParser
import re
import glob
import collections
import time
import os
import pysam
import pandas as pd
class parseError(Exception):
pass
def get_option():
argparser = ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
argparser.add_argument('-sam', '--sam', type=str,required=True,help="Sam file with MD-tag (by samtools calmd)")
# argparser.add_argument('-ref', '--needleReference', type=str,required=True,help="Reference fasta file for needle alignment")
argparser.add_argument('-annot', '--annotation', type=str,required=True,help="Reference annotation which include cut site")
argparser.add_argument('-idenrate', '--idenrate', type=float,default=0.85,help="Hit ratio in aligned region between query and reference")
argparser.add_argument('-identity', '--identity', type=int,default=90,help="Minimum number of matches between query and reference")
argparser.add_argument('-trim5prime', '--trim5prime', type=int,default=19,help="Trim sequence from 5prime end")
argparser.add_argument('-primer5', '--primer5', type=int,default=17,help="Primer binding site length started from 5' end (after trimmed)")
argparser.add_argument('-primer5ed', '--primer5ed', type=int,default=2,help="Maximum edit distance allowed at the 5' primer binding site")
argparser.add_argument('-noRemoveFiles', '--noRemoveFiles', action="store_true",help="Don't remove intermediate files")
argparser.add_argument('-no_annot', '--no_annot', action="store_true",help="Don't use cut site selection")
argparser.add_argument('-o', '--outname', type=str,default="gestalt",help='output file name prefix')
argparser.add_argument('-d', '--outdir', type=str, default=".", help='output directory')
argparser.add_argument('-extend', '--extend', type=int,default=0,help="Additional N bp upstream of cut site")
argparser.add_argument('-amplicon_length', '--amplicon_length', type=int,default=255,help="Amplicon size")
return argparser.parse_args()
def parseSam(sam):
# outParsedFile=outdir+outname+".mutationpattern"
samfile=pysam.AlignmentFile(sam,mode="r")
parsedMutationPattern={}
for line in samfile:
mut_temp={}
query=line.query_name
cigar=line.cigarstring
# cigar_tup=line.cigartuples
# md_tag=line.tags
seq=line.query_sequence
#parse cigar string
parsed_cigar=""
n=""
for c in cigar:
if re.search("[0-9]",c):
n+=c
else:
parsed_cigar+=c*int(n)
n=""
#parse seq
parsed_seq=""
d_len=0
for cnt,c in enumerate(parsed_cigar):
if c=="D":
d_len+=1
parsed_seq+="D"
else:
parsed_seq+=seq[cnt-d_len]
parsed_cigar=re.sub("D+$","",parsed_cigar)
parsed_seq=re.sub("D+$","",parsed_seq)
mut_temp={"cigar":parsed_cigar,"seq":parsed_seq}
parsedMutationPattern[query]=mut_temp
return parsedMutationPattern
def alignmentQC(parsedMutationPattern,ref_annot,primer5ed,idenrate,identity):
cigar_now=parsedMutationPattern.split(";")[0]
seq_now=parsedMutationPattern.split(";")[1]
primer5_matching=seq_now[ref_annot["primer_5prime"][0]:ref_annot["primer_5prime"][1]]
editDist=0
for c in primer5_matching:
if not c=="=":
editDist+=1
seq_match=0
seq_ident=0
for pos,cigar_c in enumerate(cigar_now):
if cigar_c=="M":
seq_match+=1
if seq_now[pos]=="=":
seq_ident+=1
seq_idenrate=seq_ident/seq_match
if seq_idenrate<=idenrate or seq_ident<=identity or editDist>primer5ed:
return None
else:
return parsedMutationPattern
def generateMutationTable(parsedMutationPattern):
detectedPatternList=[]
delflag=False
insflag=False
subflag=False
n_ins=0
mut_index=0
cigar_now=parsedMutationPattern.split(";")[0]
seq_now=parsedMutationPattern.split(";")[1]
for cnt,cigstr in enumerate(cigar_now):
if cigstr=="I":
n_ins+=1
pos=cnt-n_ins
if cigstr=="M":
delflag=False
insflag=False
if not seq_now[cnt]=="=":
if not subflag:
mut_index+=1
subflag=True
pat_now="_".join(["S",str(pos),seq_now[cnt]])
detectedPatternList.append(pat_now)
else:
detectedPatternList[mut_index-1]+=seq_now[cnt]
else:
subflag=False
if cigstr=="I":
delflag=False
subflag=False
if not insflag:
mut_index+=1
insflag=True
pat_now="_".join(["I",str(pos+0.5),seq_now[cnt]])
detectedPatternList.append(pat_now)
else:
detectedPatternList[mut_index-1]+=seq_now[cnt]
if cigstr=="D":
insflag=False
subflag=False
if not delflag:
mut_index+=1
delflag=True
pat_now="_".join(["D",str(pos),str(pos)])
detectedPatternList.append(pat_now)
else:
detectedPatternList[mut_index-1]=re.sub("[0-9]+$",str(pos),detectedPatternList[mut_index-1])
for pos,pat in enumerate(detectedPatternList):
pat_split=pat.split("_")
if pat_split[0]=="I" or pat_split[0]=="S":
insertion=pat_split[2]
if re.search("^N+$",insertion):
del detectedPatternList[pos]
return ";".join(detectedPatternList)
def judge_in_cutsite(pos,cutsite):
for each_cut in cutsite:
if pos>=each_cut[0] and pos<=each_cut[1]:
return True
return False
def judge_include_cutsite(pos1,pos2,cutsite):
for each_cut in cutsite:
if pos1<=each_cut[0] and pos2>=each_cut[1]:
return True
return False
def return_cutsite_edit(pats,cutsite):
pats=pats.split(";")
return_list=[]
for p in pats:
p_split=p.split("_")
if p_split[0]=="I":
pos=float(p_split[1])
if judge_in_cutsite(pos,cutsite):
return_list.append(p)
elif p_split[0]=="S":
pos=int(p_split[1])+len(p_split[2])-1
if judge_in_cutsite(pos,cutsite):
return_list.append(p)
elif p_split[0]=="D":
pos_1=int(p_split[1])
pos_2=int(p_split[2])
if judge_in_cutsite(pos_1,cutsite) or judge_in_cutsite(pos_2,cutsite) or judge_include_cutsite(pos_1,pos_2,cutsite):
return_list.append(p)
return ";".join(return_list)
def extract_valid_edits(table,cutsite,outfile):
table_df = pd.read_csv(table,sep="\t",header=None)
table_df[1]=table_df[1].apply(return_cutsite_edit,cutsite=cutsite)
table_df.to_csv(outfile,sep="\t",index=False,header=False)
return table_df
def generate_ins_set(extracted_pattern):
ins_set=set()
for row in extracted_pattern:
pat_split=row.split(";")
for each_pat in pat_split:
indelsub=each_pat.split("_")[0]
if indelsub=="I":
ins_set.add(each_pat)
return ins_set
def generate_ins_series(extracted_pattern):
ins_string=""
pat_split=extracted_pattern.split(";")
for each_pat in pat_split:
indelsub=each_pat.split("_")[0]
if indelsub=="I":
ins_string+=";"+each_pat
ins_string=re.sub("^;","",ins_string)
return ins_string
def generate_delsub(extracted_pattern,amplicon_length):
pat_split=extracted_pattern.split(";")
pointer=-1
matching_string=""
for each_pat in pat_split:
indelsub=each_pat.split("_")[0]
if indelsub=="I":
continue
pos=int(each_pat.split("_")[1])
additional=each_pat.split("_")[2]
if indelsub=="D":
additional=int(additional)
if additional>=amplicon_length:
additional=amplicon_length-1
matching_string+="=;"*(pos-pointer-1)
matching_string+="D;"*(additional-pos+1)
pointer=additional
elif indelsub=="S":
matching_string+="=;"*(pos-pointer-1)
p=[i for i in additional]
p=";".join(p)+";"
matching_string+=p
pointer=pos+len(additional)-1
if pointer < amplicon_length-1:
#if scGST(sequence read length is fixed and end deletion is regarded as failure to reach)
matching_string+="=;"*((amplicon_length-1)-pointer)
#if PRESUME(all amplicaon length is covered)
#matching_string+="D;"*((amplicon_length-1)-pointer)
matching_string=re.sub(";$","",matching_string)
return matching_string
def generate_ins_table(insertion_pattern,list_ins):
insertion_pattern=insertion_pattern.split(";")
binary_list=[]
for ref_ins in list_ins:
if ref_ins in insertion_pattern:
binary_list.append("1")
else:
binary_list.append("0")
return ";".join(binary_list)
if __name__ == "__main__":
opt=get_option()
sam=opt.sam
annotation=opt.annotation
idenrate=opt.idenrate
identity=opt.identity
trim5prime=opt.trim5prime
primer5=opt.primer5
primer5ed=opt.primer5ed
noremove=opt.noRemoveFiles
outname=opt.outname
extend=opt.extend
amplicon_length=opt.amplicon_length
no_annot=opt.no_annot
outdir=re.sub("/$","",opt.outdir)+"/"
outfile=outdir+"/"+outname+".MUTATIONTABLE.tsv"
ref_annot={}
with open(annotation,mode="rt") as r:
for nrow,line in enumerate(r):
if nrow>0:
line=line.split(",")
ref_annot[line[0]]=list(map(int,line[1:]))
parsedMutationPattern=parseSam(sam)
df_parsed_from_sam=pd.DataFrame.from_dict(parsedMutationPattern,orient="index")
ser_concat_parsedSam=df_parsed_from_sam["cigar"].str.cat(df_parsed_from_sam["seq"],sep=";")
ser_concat_parsedSam=ser_concat_parsedSam.map(lambda x:alignmentQC(x,ref_annot,primer5ed,idenrate,identity))
concat_df= | pd.DataFrame(ser_concat_parsedSam) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
| pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}) | pandas.DataFrame |
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import math
import time
import ruptures as rpt
from datetime import datetime
data = pd.read_csv("test_anom2.csv")
print(data.head())
data.set_index(np.arange(len(data.index)), inplace=True)
def check_if_shift_v0(data, column_name, start_index, end_index, check_period):
""" using median to see if it changes significantly in shift """
period_before = data[column_name][start_index - check_period: start_index]
period_in_the_middle = data[column_name][start_index:end_index]
period_after = data[column_name][end_index: end_index + check_period]
period_before_median = abs(np.nanmedian(period_before))
period_in_the_middle_median = abs(np.nanmedian(period_in_the_middle))
period_after_median = abs(np.nanmedian(period_after))
upper_threshold = period_in_the_middle_median * 2
down_threshold = period_in_the_middle_median / 2
if (upper_threshold < period_before_median and upper_threshold < period_after_median) or\
(down_threshold > period_before_median and down_threshold > period_after_median):
return True
else:
return False
def prepare_data_to_test(data, data_name: str):
""" datetime type """
data["time"] = pd.to_datetime(data.time)
""" sort values """
data.sort_values(by=['time'], inplace=True)
""" set index """
data.set_index("time", inplace=True)
""" drop duplicate time"""
data = data[~data.index.duplicated(keep='first')]
""" resample """
data = data.resample('5T').pad()
""" reset index """
data.reset_index("time", inplace=True)
""" rename column names """
data.columns = ['time', data_name]
data.drop_duplicates(subset="time", inplace=True)
return data
def prepare_data_dp(data, column_to_fix):
data['time'] = pd.to_datetime(data['time'])
data['hour'] = pd.to_datetime(data.time, unit='m').dt.strftime('%H:%M')
daily_pattern = data[column_to_fix].groupby(by=[data.time.map(lambda x: (x.hour, x.minute))]).mean()
daily_pattern = daily_pattern.reset_index()
daily_pattern['hour'] = pd.date_range('00:00:00', periods=len(daily_pattern), freq='5min')
daily_pattern['hour'] = pd.to_datetime(daily_pattern.hour, unit='m').dt.strftime('%H:%M')
daily_pattern = daily_pattern[['hour', column_to_fix]]
data['dp'] = data['hour']
mapping = dict(daily_pattern[['hour', column_to_fix]].values)
final_ = dict(data[['dp', column_to_fix]].values)
z = {**final_, **mapping}
data.index = np.arange(len(data))
data['daily_pattern_flow'] = data['dp'].map(z)
return data
class DpMissingValuesV0:
def __init__(self, df_data, fit_data):
self.data = df_data
self.fit_period = fit_data
@staticmethod
def prepare_data_dp(data, column_to_fix):
data['time'] = pd.to_datetime(data['time'])
data['hour'] = | pd.to_datetime(data.time, unit='m') | pandas.to_datetime |
import pandas as pd
import numpy as np
import os
try:
rows, columns = os.popen('stty size', 'r').read().split()
pd.set_option('display.width', int(columns))
pd.set_option('display.max_columns', 600)
pd.set_option('display.max_rows', 1000)
except:
print("running on server, otherwise please investigate")
read_public = pd.read_csv('tmp/READ_useful_final_public_merge_cnv_purity.csv', sep='\t')
read_protected = pd.read_csv('tmp/READ_useful_final_qc_merge_cnv_purity.csv', sep='\t')
coad_public = pd.read_csv('tmp/COAD_useful_final_public_merge_cnv_purity.csv', sep='\t')
coad_protected = | pd.read_csv('tmp/COAD_useful_final_qc_merge_cnv_purity.csv', sep='\t') | pandas.read_csv |
import warnings
warnings.filterwarnings("ignore")
import os
import math
import numpy as np
import tensorflow as tf
import pandas as pd
import argparse
import json
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
from scipy.stats import spearmanr, pearsonr
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
plt.rcParams["figure.figsize"] = (8,8)
import seaborn as sns
from umap import UMAP
import configparser as cp
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
###################################################
# Read in configuration file for network parameters
###################################################
config = cp.ConfigParser()
config.read('config.py')
rna_hidden_dim_1 = int(config.get('AE', 'RNA_Layer1'))
rna_hidden_dim_2 = int(config.get('AE', 'RNA_Layer2'))
rna_latent_dim = int(config.get('AE', 'RNA_Latent'))
translate_hidden_dim = int(config.get('AE', 'Translate_Layer1'))
l2_norm_dec = float(config.get('AE', 'L2_Norm_AE'))
l2_norm_kl = float(config.get('AE', 'L2_Norm_KL'))
learning_rate_kl = float(config.get('AE', 'Learning_Rate_KL'))
learning_rate_ae = float(config.get('AE', 'Learning_Rate_AE'))
Z_dim = int(config.get('CGAN', 'Z_dim'))
gen_dim1 = int(config.get('CGAN', 'Gen_Layer1'))
gen_dim2 = int(config.get('CGAN', 'Gen_Layer2'))
det_dim = int(config.get('CGAN', 'Det_Layer1'))
learning_rate_cgan = float(config.get('CGAN', 'Learning_Rate'))
l2_norm_cgan = float(config.get('CGAN', 'L2_Lambda'))
epoch_cgan = int(config.get('CGAN', 'Max_Epoch'))
###################################################
# Read in command line arguments
###################################################
parser = argparse.ArgumentParser(description='Perform Pseudocell Tracer Algorithm')
parser.add_argument('-d', '--data', help='Tab delimited file representing matrix of samples by genes', required=True)
parser.add_argument('-s', '--side_data', help= 'Tab delimited file for side information to be used', required=True)
parser.add_argument('-o', '--output', help='Output directory', required=True)
parser.add_argument('-p', '--plot_style', help= 'Plotting style to be used (UMAP or tSNE)', default="UMAP")
parser.add_argument('-n', '--num_cells_gen', help='Number of pseudocells to generate at each step', default=100, type=int)
parser.add_argument('-k', '--num_steps', help='Number of pseudocell states', default=100, type=int)
parser.add_argument('-a', '--start_states', help='Number of pseudocell states', nargs="+", required=True)
parser.add_argument('-b', '--end_states', help='Number of pseudocell states', nargs="+", required=True)
parser.add_argument('-g', '--genes_to_plot', help='Genes to plot in pseudocell trajectory', nargs="+")
args = parser.parse_args()
dset = args.data
sset = args.side_data
out = args.output
plot_method = args.plot_style
num_cells_gen = args.num_cells_gen
num_steps = args.num_steps
start_states = args.start_states
end_states = args.end_states
genes_to_plot = args.genes_to_plot
###################################################
# Function to plot
###################################################
def plot_data(rna, side):
if plot_method == "tSNE":
tsne = TSNE()
results = np.array(tsne.fit_transform(rna.values))
if plot_method == "UMAP":
umap = UMAP()
results = np.array(umap.fit_transform(rna.values))
plot_df = pd.DataFrame(data=results, columns=["X","Y"])
cl = np.argmax(side.values, axis=1)
plot_df["subtype"] = [side.columns[x] for x in cl]
return sns.scatterplot(x="X", y="Y", hue="subtype", data=plot_df)
###################################################
# Load Data
###################################################
rna_data = pd.read_csv(dset, header=0, index_col=0, sep="\t")
side_data = pd.read_csv(sset, index_col=0, header=0, sep="\t")
###################################################
# Filter common samples
###################################################
sample_list = np.intersect1d(rna_data.columns.values, side_data.columns.values)
gene_list = rna_data.index.values
side_data = side_data.filter(sample_list, axis=1)
rna_data = rna_data.filter(sample_list, axis=1)
rna_data = rna_data.transpose()
side_data = side_data.transpose()
print("Loaded Data...")
###################################################
# Create output directory
###################################################
print("Creating output directory...")
try:
os.mkdir("results")
except:
pass
out_dir = "results/" + out
try:
os.mkdir(out_dir)
print("Directory " , out_dir , " Created...")
except FileExistsError:
print("Warning: Directory " , out , " already exists...")
with open(out_dir + '/run_parameters.txt', 'w') as f:
json.dump(args.__dict__, f, indent=2)
with open(out_dir + '/run_network_config.txt', 'w') as f:
config.write(f)
###################################################
# Plot input data
###################################################
scatter = plot_data(rna_data, side_data)
plt.title("Input Data (" + str(plot_method) + ")")
plt.savefig(out_dir + "/input_scatter.png")
plt.clf()
###################################################
# Train supervised encoder
###################################################
print("Training supervised encoder...")
scaler = StandardScaler().fit(rna_data.values)
norm_rna_data = np.clip(scaler.transform(rna_data.values),-3,3)
reg_kl = tf.keras.regularizers.l2(l2_norm_kl)
kl_model = tf.keras.Sequential([
tf.keras.layers.Dense(rna_hidden_dim_1, activation='relu',
kernel_regularizer=reg_kl, input_shape=(len(gene_list),)),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(rna_hidden_dim_2, activation='relu',
kernel_regularizer=reg_kl),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(rna_latent_dim, activation='sigmoid', name='latent_layer',
kernel_regularizer=reg_kl),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(translate_hidden_dim, activation='relu',
kernel_regularizer=reg_kl),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(side_data.values.shape[1], activation=tf.nn.softmax,
kernel_regularizer=reg_kl, name='relative_prediction')
])
es_cb_kl = tf.keras.callbacks.EarlyStopping('val_kullback_leibler_divergence', patience=100, restore_best_weights=True)
kl_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate_kl), loss=tf.keras.losses.KLD, metrics=['kullback_leibler_divergence'])
kl_model.fit(norm_rna_data, side_data.values, epochs=10000, callbacks=[es_cb_kl], validation_split=0.1, verbose=0, batch_size=1024)
kl_model.fit(norm_rna_data, side_data.values, epochs=5, verbose=0, batch_size=1024)
kl_model.save(out_dir + "/encoder.h5")
###################################################
# Get latent data
###################################################
print("Getting latent data...")
latent_model = tf.keras.Model(inputs=kl_model.input, outputs=kl_model.get_layer('latent_layer').output)
latent = latent_model.predict(norm_rna_data)
latent_df = pd.DataFrame(data=latent, index=sample_list)
latent_df.to_csv("latent_values.tsv", sep="\t")
scatter = plot_data(latent_df, side_data)
plt.title("Latent Data (" + str(plot_method) + ")")
plt.savefig(out_dir + "/latent_scatter.png")
plt.clf()
###################################################
# Train decoder
###################################################
print("Training decoder...")
reg_dec = tf.keras.regularizers.l2(l2_norm_dec)
dec_model = tf.keras.Sequential([
tf.keras.layers.Dense(rna_hidden_dim_2, activation='relu', input_shape=(rna_latent_dim,),
kernel_regularizer=reg_dec, bias_regularizer=reg_dec),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(rna_hidden_dim_1, activation='relu',
kernel_regularizer=reg_dec, bias_regularizer=reg_dec),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Dense(len(gene_list), activation=None, name='rna_reconstruction',
kernel_regularizer=reg_dec, bias_regularizer=reg_dec)
])
es_cb_dec = tf.keras.callbacks.EarlyStopping('val_mean_squared_error', patience=100, restore_best_weights=True)
dec_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate_ae), loss=tf.keras.losses.MSE, metrics=['mse'])
dec_model.fit(latent, norm_rna_data, epochs=100000, callbacks=[es_cb_dec], validation_split=0.1,
verbose=0, batch_size=1024)
dec_model.fit(latent, norm_rna_data, epochs=5, verbose=0, batch_size=1024)
dec_model.save(out_dir + "/decoder.h5")
###################################################
# Get reconstructed values
###################################################
decoded = dec_model.predict(latent)
decoded_df = | pd.DataFrame(data=decoded, index=sample_list, columns=gene_list) | pandas.DataFrame |
"""Extended DataFrame functionality."""
from typing import Any, Iterator, Optional, Sequence, Text, Tuple, Union, cast
import numpy as np
import pandas as pd
from snmp_fetch.utils import cuint8_to_int
from .types import ip_address as ip
def column_names(
n: Optional[int] = None, alphabet: Sequence[Text] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
) -> Iterator[Text]:
"""Generate unique temporary column names."""
base_gen = column_names(alphabet=alphabet)
base = ''
letters = alphabet
while True:
if n is not None:
if n <= 0:
return
n = n - 1
if not letters:
base = next(base_gen) # pylint: disable=stop-iteration-return # infinite generator
letters = alphabet
column, letters = letters[0], letters[1:]
yield base + column
@pd.api.extensions.register_dataframe_accessor('inet')
class InetDataFrameAccessor:
# pylint: disable=too-few-public-methods
"""Inet DataFrame accessor."""
obj: Any
def __init__(self, obj: Any) -> None:
"""Initialize the pandas extension."""
self.obj = obj
def to_interface_address(self) -> Any:
"""Return a composable function to convert an IP address and mask/prefix to a network."""
def _to_int_address(
objs: Tuple[ip.IP_ADDRESS_T, Union[int, ip.IP_ADDRESS_T]]
) -> ip.IP_INTERFACE_T:
ip_address, cidr_or_mask = objs
if isinstance(cidr_or_mask, ip.IPv4Address):
cidr_or_mask = ip.IPV4_PREFIX_LOOKUP_TABLE[int(cidr_or_mask)]
if isinstance(cidr_or_mask, ip.IPv6Address):
cidr_or_mask = ip.IPV6_PREFIX_LOOKUP_TABLE[int(cidr_or_mask)]
return cast(
ip.IP_INTERFACE_T,
ip.ip_interface((ip_address, cidr_or_mask))
)
if self.obj.empty:
return pd.Series([])
return self.obj.apply(_to_int_address, axis=1)
def to_cidr_address(self, strict: bool = False) -> Any:
"""Return a composable function to convert an IP address and mask/prefix to a network."""
def _to_cidr_address(
objs: Tuple[ip.IP_ADDRESS_T, Union[int, ip.IP_ADDRESS_T]]
) -> ip.IP_NETWORK_T:
ip_address, cidr_or_mask = objs
if isinstance(cidr_or_mask, ip.IPv4Address):
cidr_or_mask = ip.IPV4_PREFIX_LOOKUP_TABLE[int(cidr_or_mask)]
if isinstance(cidr_or_mask, ip.IPv6Address):
cidr_or_mask = ip.IPV6_PREFIX_LOOKUP_TABLE[int(cidr_or_mask)]
return cast(
ip.IP_NETWORK_T,
ip.ip_network((ip_address, cidr_or_mask), strict=strict)
)
if self.obj.empty:
return pd.Series([])
return self.obj.apply(_to_cidr_address, axis=1)
@pd.api.extensions.register_series_accessor('inet')
class InetSeriesAccessor:
# pylint: disable=too-few-public-methods
"""Inet Series accessor."""
obj: Any
buffer: 'InetSeriesAccessor.InetSeriesBufferAccessor'
class InetSeriesBufferAccessor:
"""Inet Series buffer accessor."""
obj: Any
def __init__(self, obj: Any) -> None:
"""Initialize the pandas extension."""
self.obj = obj
def __getitem__(self, ss: Union[int, slice, Tuple[Union[int, slice], ...]]) -> Any:
"""Slice the buffer."""
if isinstance(ss, (int, slice)):
return self.obj.apply(lambda x: x[ss])
if isinstance(ss, tuple):
return pd.DataFrame(
self.obj.apply(lambda x: [x[s] for s in ss]).tolist(),
columns=list(column_names(len(ss))),
index=self.obj.index
)
raise RuntimeError(f'Not a valid input slice: {ss}')
def chunk(self) -> Any:
"""Slice the buffer by a sized parameter."""
return pd.DataFrame(
self.obj.apply(lambda x: (x[1:int(x[0])+1], x[int(x[0])+1:])).tolist(),
columns=list(column_names(2)),
index=self.obj.index
)
def __init__(self, obj: Any) -> None:
"""Initialize the pandas extension."""
self.obj = obj
self.buffer = self.InetSeriesBufferAccessor(obj)
def to_inet_address(self, default_zone: Any = None) -> Any:
"""Extract an inet address with leading address size and possible zone."""
def _to_inet_address(buffer: np.ndarray) -> Tuple[ip.IP_ADDRESS_T, Any]:
if len(buffer) == 4:
return (
ip.IPv4Address(cuint8_to_int(buffer)),
default_zone
)
if len(buffer) == 16:
return (
ip.IPv6Address(cuint8_to_int(buffer)),
default_zone
)
if len(buffer) == 8:
return (
ip.IPv4Address(cuint8_to_int(buffer[:4])),
cuint8_to_int(buffer[4:])
)
if len(buffer) == 20:
return (
ip.IPv6Address(cuint8_to_int(buffer[:16])),
cuint8_to_int(buffer[16:])
)
raise TypeError(f'INET Address size not understood: {buffer}')
return pd.DataFrame(
self.obj.apply(_to_inet_address).tolist(),
columns=list(column_names(2)),
index=self.obj.index
)
def to_object_identifier(self) -> Any:
"""Extract an object identifier buffer as a string."""
def _to_object_identifier(buffer: np.ndarray) -> Text:
return '.'+'.'.join(buffer.astype(str))
return self.obj.apply(_to_object_identifier)
def to_timedelta(
self, denominator: int = 1, unit: Text = 'seconds'
) -> Any:
"""Return a composable function to convert to a time delta."""
return | pd.to_timedelta(self.obj // denominator, unit=unit) | pandas.to_timedelta |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
plt.style.use('seaborn')
# # download the new datasheet form JH
# url ="https://s3-us-west-1.amazonaws.com/starschema.covid/JHU_COVID-19.csv"
# url ="https://covid.ourworldindata.org/data/ecdc/full_data.csv"
# output_directory = "."
printimage = 'on' # on ou off
# read a csv to DataFrame with pandas
data = pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/jhu/full_data.csv")
# data = pd.read_csv('/home/claudio/Documents/profissao_DS/projetos/corona/JHU_COVID-19.csv')
# data = pd.read_csv('/home/claudio/Documents/profissao_DS/projetos/corona/full_data.csv')
data.index = pd.to_datetime(data.date)
## sel data form Brazil
ctry = data[data["location"] == 'Brazil']
tested = ctry.copy()
dateCase = []
# do fiting3
ydata = tested.last("60W").resample('1W').mean().total_cases.values
# ydata = tested.last("3W").total_cases.values
xdata = np.arange(len(ydata))
ndate = tested.last("1D").index[0]
for i in range(5):
dateCase.append((ndate + timedelta(days=i)).date())
dateCase = | pd.to_datetime(dateCase) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import annotations
import typing
import warnings
import numpy as np
import pandas as pd
import scipy.signal
from endaq.calc import utils
def _np_histogram_nd(array, bins=10, weights=None, axis=-1, **kwargs):
"""Compute histograms over a specified axis."""
array = np.asarray(array)
bins = np.asarray(bins)
weights = np.asarray(weights) if weights is not None else None
# Collect all ND inputs
nd_params = {}
(nd_params if array.ndim > 1 else kwargs)["a"] = array
(nd_params if bins.ndim > 1 else kwargs)["bins"] = bins
if weights is not None:
(nd_params if weights.ndim > 1 else kwargs)["weights"] = weights
if len(nd_params) == 0:
return np.histogram(**kwargs)[0]
# Move the desired axes to the back
for k, v in nd_params.items():
nd_params[k] = np.moveaxis(v, axis, -1)
# Broadcast ND arrays to the same shape
ks, vs = zip(*nd_params.items())
vs_broadcasted = np.broadcast_arrays(*vs)
for k, v in zip(ks, vs_broadcasted):
nd_params[k] = v
# Generate output
bins = nd_params.get("bins", bins)
result_shape = ()
if len(nd_params) != 0:
result_shape = v.shape[:-1]
if bins.ndim >= 1:
result_shape = result_shape + (bins.shape[-1] - 1,)
else:
result_shape = result_shape + (bins,)
result = np.empty(
result_shape,
dtype=(weights.dtype if weights is not None else int),
)
loop_shape = v.shape[:-1]
for nd_i in np.ndindex(*loop_shape):
nd_kwargs = {k: v[nd_i] for k, v in nd_params.items()}
result[nd_i], edges = np.histogram(**nd_kwargs, **kwargs)
result = np.moveaxis(result, -1, axis)
return result
def welch(
df: pd.DataFrame,
bin_width: float = 1,
scaling: typing.Literal[None, "density", "spectrum", "parseval"] = None,
**kwargs,
) -> pd.DataFrame:
"""
Perform `scipy.signal.welch` with a specified frequency spacing.
:param df: the input data
:param bin_width: the desired width of the resulting frequency bins, in Hz;
defaults to 1 Hz
:param scaling: the scaling of the output; `"density"` & `"spectrum"`
correspond to the same options in `scipy.signal.welch`; `"parseval"`
will maintain the "energy" between the input & output, s.t.
`welch(df, scaling="parseval").sum(axis="rows")` is roughly equal to
`df.abs().pow(2).sum(axis="rows")`
:param kwargs: other parameters to pass directly to `scipy.signal.welch`
:return: a periodogram
.. seealso::
`SciPy Welch's method <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.welch.html>`_
Documentation for the periodogram function wrapped internally.
`Parseval's Theorem <https://en.wikipedia.org/wiki/Parseval's_theorem>_`
- the theorem relating the RMS of a time-domain signal to that of its
frequency spectrum
"""
dt = utils.sample_spacing(df)
fs = 1 / dt
if scaling == "parseval":
kwargs["scaling"] = "density"
elif scaling is not None:
kwargs["scaling"] = scaling
freqs, psd = scipy.signal.welch(
df.values, fs=fs, nperseg=int(fs / bin_width), **kwargs, axis=0
)
if scaling == "parseval":
psd = psd * freqs[1]
return pd.DataFrame(
psd, index=pd.Series(freqs, name="frequency (Hz)"), columns=df.columns
)
def differentiate(df: pd.DataFrame, n: float = 1) -> pd.DataFrame:
"""
Perform time-domain differentiation on periodogram data.
:param df: a periodogram
:param n: the time derivative order; negative orders represent integration
:return: a periodogram of the time-derivated data
"""
# Involves a division by zero for n < 0
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
factor = (2 * np.pi * df.index.values) ** (2 * n) # divide by zero
if n < 0:
factor[df.index == 0] = 0
return df * factor[..., np.newaxis]
def to_jagged(
df: pd.DataFrame,
freq_splits: np.array,
agg: typing.Union[
typing.Literal["mean", "sum"],
typing.Callable[[np.ndarray, int], float],
] = "mean",
) -> pd.DataFrame:
"""
Calculate a periodogram over non-uniformly spaced frequency bins.
:param df: the returned values from `endaq.calc.psd.welch`
:param freq_splits: the boundaries of the frequency bins; must be strictly
increasing
:param agg: the method for aggregating values into bins; 'mean' preserves
the PSD's area-under-the-curve, 'sum' preserves the PSD's "energy"
:return: a periodogram with the given frequency spacing
"""
freq_splits = np.asarray(freq_splits)
if len(freq_splits) < 2:
raise ValueError("need at least two frequency bounds")
if not np.all(freq_splits[:-1] <= freq_splits[1:]):
raise ValueError("frequency bounds must be strictly increasing")
# Check that PSD samples do not skip any frequency bins
spacing_test = np.diff(np.searchsorted(freq_splits, df.index))
if np.any(spacing_test < 1):
warnings.warn(
"empty frequency bins in re-binned PSD; "
"original PSD's frequency spacing is too coarse",
RuntimeWarning,
)
if isinstance(agg, str):
if agg not in ("sum", "mean"):
raise ValueError(f'invalid aggregation mode "{agg}"')
# Reshape frequencies for histogram function
f_ndim = np.broadcast_to(df.index.to_numpy()[..., np.newaxis], df.shape)
# Calculate sum via histogram function
psd_jagged = _np_histogram_nd(f_ndim, bins=freq_splits, weights=df, axis=0)
# Adjust values for mean calculation
if agg == "mean":
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning) # x/0
psd_jagged = np.nan_to_num( # <- fix divisions by zero
psd_jagged
/ np.histogram(df.index, bins=freq_splits)[0][..., np.newaxis]
)
else:
psd_binned = np.split(df, np.searchsorted(df.index, freq_splits), axis=0)[1:-1]
psd_jagged = np.stack([agg(a, axis=0) for a in psd_binned], axis=0)
return pd.DataFrame(
psd_jagged,
index=pd.Series((freq_splits[1:] + freq_splits[:-1]) / 2, name=df.index.name),
columns=df.columns,
)
def to_octave(
df: pd.DataFrame, fstart: float = 1, octave_bins: float = 12, **kwargs
) -> pd.DataFrame:
"""
Calculate a periodogram over log-spaced frequency bins.
:param df: the returned values from `endaq.calc.psd.welch`
:param fstart: the first frequency bin, in Hz; defaults to 1 Hz
:param octave_bins: the number of frequency bins in each octave; defaults
to 12
:param kwargs: other parameters to pass directly to `to_jagged`
:return: a periodogram with the given logarithmic frequency spacing
"""
max_f = df.index.max()
octave_step = 1 / octave_bins
center_freqs = 2 ** np.arange(
np.log2(fstart),
np.log2(max_f) + octave_step / 2,
octave_step,
)
freq_splits = 2 ** np.arange(
np.log2(fstart) - octave_step / 2,
np.log2(max_f) + octave_step,
octave_step,
)
assert len(center_freqs) + 1 == len(freq_splits)
result = to_jagged(df, freq_splits=freq_splits, **kwargs)
result.index = | pd.Series(center_freqs, name=df.index.name) | pandas.Series |
"""
Evaluates the estimated results of the Segmentation dataset against the
ground truth (human annotated data).
.. autosummary::
:toctree: generated/
process
"""
import jams
from joblib import Parallel, delayed
import logging
import mir_eval
import numpy as np
import os
import pandas as pd
import six
# Local stuff
import msaf
from msaf.exceptions import NoReferencesError
import msaf.input_output as io
from msaf import utils
def print_results(results):
"""Print all the results.
Parameters
----------
results: pd.DataFrame
Dataframe with all the results
"""
if len(results) == 0:
logging.warning("No results to print!")
return
res = results.mean()
logging.info("Results:\n%s" % res)
def compute_results(ann_inter, est_inter, ann_labels, est_labels, bins,
est_file, weight=0.58):
"""Compute the results using all the available evaluations.
Parameters
----------
ann_inter : np.array
Annotated intervals in seconds.
est_inter : np.array
Estimated intervals in seconds.
ann_labels : np.array
Annotated labels.
est_labels : np.array
Estimated labels
bins : int
Number of bins for the information gain.
est_file : str
Path to the output file to store results.
weight: float
Weight the Precision and Recall values of the hit rate boundaries
differently (<1 will weight Precision higher, >1 will weight Recall
higher).
The default parameter (0.58) is the one proposed in (Nieto et al. 2014)
Return
------
results : dict
Contains the results of all the evaluations for the given file.
Keys are the following:
track_id: Name of the track
HitRate_3F: F-measure of hit rate at 3 seconds
HitRate_3P: Precision of hit rate at 3 seconds
HitRate_3R: Recall of hit rate at 3 seconds
HitRate_0.5F: F-measure of hit rate at 0.5 seconds
HitRate_0.5P: Precision of hit rate at 0.5 seconds
HitRate_0.5R: Recall of hit rate at 0.5 seconds
HitRate_w3F: F-measure of hit rate at 3 seconds weighted
HitRate_w0.5F: F-measure of hit rate at 0.5 seconds weighted
HitRate_wt3F: F-measure of hit rate at 3 seconds weighted and
trimmed
HitRate_wt0.5F: F-measure of hit rate at 0.5 seconds weighted
and trimmed
HitRate_t3F: F-measure of hit rate at 3 seconds (trimmed)
HitRate_t3P: Precision of hit rate at 3 seconds (trimmed)
HitRate_t3F: Recall of hit rate at 3 seconds (trimmed)
HitRate_t0.5F: F-measure of hit rate at 0.5 seconds (trimmed)
HitRate_t0.5P: Precision of hit rate at 0.5 seconds (trimmed)
HitRate_t0.5R: Recall of hit rate at 0.5 seconds (trimmed)
DevA2E: Median deviation of annotation to estimation
DevE2A: Median deviation of estimation to annotation
D: Information gain
PWF: F-measure of pair-wise frame clustering
PWP: Precision of pair-wise frame clustering
PWR: Recall of pair-wise frame clustering
Sf: F-measure normalized entropy score
So: Oversegmentation normalized entropy score
Su: Undersegmentation normalized entropy score
"""
res = {}
# --Boundaries-- #
# Hit Rate standard
res["HitRate_3P"], res["HitRate_3R"], res["HitRate_3F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=False)
res["HitRate_0.5P"], res["HitRate_0.5R"], res["HitRate_0.5F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=False)
# Hit rate trimmed
res["HitRate_t3P"], res["HitRate_t3R"], res["HitRate_t3F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=True)
res["HitRate_t0.5P"], res["HitRate_t0.5R"], res["HitRate_t0.5F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=True)
# Hit rate weighted
_, _, res["HitRate_w3F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=3, trim=False, beta=weight)
_, _, res["HitRate_w0.5F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=.5, trim=False, beta=weight)
# Hit rate weighted and trimmed
_, _, res["HitRate_wt3F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=3, trim=True, beta=weight)
_, _, res["HitRate_wt0.5F"] = mir_eval.segment.detection(
ann_inter, est_inter, window=.5, trim=True, beta=weight)
# Information gain
res["D"] = compute_information_gain(ann_inter, est_inter, est_file,
bins=bins)
# Median Deviations
res["DevR2E"], res["DevE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=False)
res["DevtR2E"], res["DevtE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=True)
# --Labels-- #
if est_labels is not None and ("-1" in est_labels or "@" in est_labels):
est_labels = None
if est_labels is not None and len(est_labels) != 0:
# Align labels with intervals
ann_labels = list(ann_labels)
est_labels = list(est_labels)
ann_inter, ann_labels = mir_eval.util.adjust_intervals(ann_inter,
ann_labels)
est_inter, est_labels = mir_eval.util.adjust_intervals(
est_inter, est_labels, t_min=0.0, t_max=ann_inter.max())
# Pair-wise frame clustering
res["PWP"], res["PWR"], res["PWF"] = mir_eval.segment.pairwise(
ann_inter, ann_labels, est_inter, est_labels)
# Normalized Conditional Entropies
res["So"], res["Su"], res["Sf"] = mir_eval.segment.nce(
ann_inter, ann_labels, est_inter, est_labels)
# Names
base = os.path.basename(est_file)
res["track_id"] = base[:-5]
res["ds_name"] = base.split("_")[0]
return res
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
"""Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results).
"""
if config["hier"]:
ref_times, ref_labels, ref_levels = \
msaf.io.read_hier_references(
ref_file, annotation_id=annotator_id,
exclude_levels=["segment_salami_function"])
else:
jam = jams.load(ref_file, validate=False)
ann = jam.search(namespace='segment_.*')[annotator_id]
ref_inter, ref_labels = ann.to_interval_values()
# Read estimations with correct configuration
est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
labels_id, **config)
# Compute the results and return
logging.info("Evaluating %s" % os.path.basename(est_file))
if config["hier"]:
# Hierarchical
assert len(est_inter) == len(est_labels), "Same number of levels " \
"are required in the boundaries and labels for the hierarchical " \
"evaluation."
est_times = []
est_labels = []
# Sort based on how many segments per level
est_inter = sorted(est_inter, key=lambda level: len(level))
for inter in est_inter:
est_times.append(msaf.utils.intervals_to_times(inter))
# Add fake labels (hierarchical eval does not use labels --yet--)
est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)
# Align the times
utils.align_end_hierarchies(est_times, ref_times, thres=1)
# To intervals
est_hier = [utils.times_to_intervals(times) for times in est_times]
ref_hier = [utils.times_to_intervals(times) for times in ref_times]
# Compute evaluations
res = {}
res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10)
res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15)
res["track_id"] = os.path.basename(est_file)[:-5]
return res
else:
# Flat
return compute_results(ref_inter, est_inter, ref_labels, est_labels,
bins, est_file)
def compute_information_gain(ann_inter, est_inter, est_file, bins):
"""Computes the information gain of the est_file from the annotated
intervals and the estimated intervals."""
ann_times = utils.intervals_to_times(ann_inter)
est_times = utils.intervals_to_times(est_inter)
return mir_eval.beat.information_gain(ann_times, est_times, bins=bins)
def process_track(file_struct, boundaries_id, labels_id, config,
annotator_id=0):
"""Processes a single track.
Parameters
----------
file_struct : object (FileStruct) or str
File struct or full path of the audio file to be evaluated.
boundaries_id : str
Identifier of the boundaries algorithm.
labels_id : str
Identifier of the labels algorithm.
config : dict
Configuration of the algorithms to be evaluated.
annotator_id : int
Number identifiying the annotator.
Returns
-------
one_res : dict
Dictionary of the results (see function compute_results).
"""
# Convert to file_struct if string is passed
if isinstance(file_struct, six.string_types):
file_struct = io.FileStruct(file_struct)
est_file = file_struct.est_file
ref_file = file_struct.ref_file
# Sanity check
assert os.path.basename(est_file)[:-4] == \
os.path.basename(ref_file)[:-4], "File names are different %s --- %s" \
% (os.path.basename(est_file)[:-4], os.path.basename(ref_file)[:-4])
if not os.path.isfile(ref_file):
raise NoReferencesError("Reference file %s does not exist. You must "
"have annotated references to run "
"evaluations." % ref_file)
one_res = compute_gt_results(est_file, ref_file, boundaries_id, labels_id,
config, annotator_id=annotator_id)
return one_res
def get_results_file_name(boundaries_id, labels_id, config,
annotator_id):
"""Based on the config and the dataset, get the file name to store the
results."""
utils.ensure_dir(msaf.config.results_dir)
file_name = os.path.join(msaf.config.results_dir, "results")
file_name += "_boundsE%s_labelsE%s" % (boundaries_id, labels_id)
file_name += "_annotatorE%d" % (annotator_id)
sorted_keys = sorted(config.keys(), key=str.lower)
for key in sorted_keys:
file_name += "_%sE%s" % (key, str(config[key]).replace("/", "_"))
# Check for max file length
if len(file_name) > 255 - len(msaf.config.results_ext):
file_name = file_name[:255 - len(msaf.config.results_ext)]
return file_name + msaf.config.results_ext
def process(in_path, boundaries_id=msaf.config.default_bound_id,
labels_id=msaf.config.default_label_id, annot_beats=False,
framesync=False, feature="pcp", hier=False, save=False,
out_file=None, n_jobs=4, annotator_id=0, config=None,
feat_def=None):
"""Main process to evaluate algorithms' results.
Parameters
----------
in_path : str
Path to the dataset root folder.
boundaries_id : str
Boundaries algorithm identifier (e.g. siplca, cnmf)
labels_id : str
Labels algorithm identifier (e.g. siplca, cnmf)
ds_name : str
Name of the dataset to be evaluated (e.g. SALAMI). * stands for all.
annot_beats : boolean
Whether to use the annotated beats or not.
framesync: str
Whether to use framesync features or not (default: False -> beatsync)
feature: str
String representing the feature to be used (e.g. pcp, mfcc, tonnetz)
hier : bool
Whether to compute a hierarchical or flat segmentation.
save: boolean
Whether to save the results into the `out_file` csv file.
out_file: str
Path to the csv file to save the results (if `None` and `save = True`
it will save the results in the default file name obtained by
calling `get_results_file_name`).
n_jobs: int
Number of processes to run in parallel. Only available in collection
mode.
annotator_id : int
Number identifiying the annotator.
config: dict
Dictionary containing custom configuration parameters for the
algorithms. If None, the default parameters are used.
Return
------
results : pd.DataFrame
DataFrame containing the evaluations for each file.
"""
# Set up configuration based on algorithms parameters
if config is None:
config = io.get_configuration(feature, annot_beats, framesync,
boundaries_id, labels_id, feat_def)
if isinstance(feature, list):
feature.sort()
# Hierarchical segmentation
config["hier"] = hier
# Remove actual features
config.pop("features", None)
# Get out file in case we want to save results
if out_file is None:
out_file = get_results_file_name(boundaries_id, labels_id, config,
annotator_id)
# If out_file already exists, read and return them
if os.path.exists(out_file):
logging.warning("Results already exists, reading from file %s" %
out_file)
results = | pd.read_csv(out_file) | pandas.read_csv |
import pandas as pd
import numpy as np
import pickle
import os
import configparser
import argparse
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.preprocessing import QuantileTransformer
from sklearn.linear_model import LinearRegression, LogisticRegression
from keras.models import model_from_json
from utils import evaluate_clf, evaluate_reg, roc_cutoff
def load_lgb(path):
if 'LGB' in path:
with open(path, 'rb') as f:
model = pickle.load(f)
return model
def load_xgb(path):
if 'XGB' in path:
with open(path, 'rb') as f:
model = pickle.load(f)
return model
def load_nn(path_json, path_h5, path_transformer):
if 'NN' in path_json:
with open(path_json, 'r') as f:
json_string = f.read()
model = model_from_json(json_string)
model.load_weights(path_h5)
if os.path.exists(path_transformer):
with open(path_transformer, 'rb') as f:
transformer = pickle.load(f)
else:
transformer = None
return model, transformer
def sort_preds(PREDS, IDXES):
va_idxes = np.concatenate(IDXES)
order = np.argsort(va_idxes)
va_preds = np.concatenate(PREDS)
return va_preds[order]
def gen_stk_features(models_dict, models_dir, task, X_train, y_train,
X_test, n_splits, random_state):
# settings of cross validation
if task == 'clf':
cv = StratifiedKFold(n_splits=n_splits, random_state=random_state, shuffle=True)
elif task == 'reg':
cv = KFold(n_splits=n_splits, random_state=random_state, shuffle=True)
# generate prediction features
DF_PRED_VA = []
DF_PRED_TE = []
for model_name, use_this_model in models_dict.items():
if use_this_model:
VA_IDXES = []
VA_PREDS = []
TE_PREDS = []
# cross validation
for i, (_, va_idx) in enumerate(cv.split(X_train, y_train)):
if model_name == 'LGB':
if task == 'clf':
path_model = os.path.join(models_dir, f'LGBMClassifier_{i}_trainedmodel.pkl')
model = load_lgb(path_model)
y_pred_va = model.predict_proba(
X_train[va_idx],
num_iterations=model.best_iteration_
)[:, 1]
y_pred_te = model.predict_proba(
X_test,
num_iterations=model.best_iteration_
)[:, 1]
elif task == 'reg':
path_model = os.path.join(models_dir, f'LGBMRegressor_{i}_trainedmodel.pkl')
model = load_lgb(path_model)
y_pred_va = model.predict(
X_train[va_idx],
num_iterations=model.best_iteration_
)
y_pred_te = model.predict(
X_test,
num_iterations=model.best_iteration_
)
elif model_name == 'XGB':
if task == 'clf':
path_model = os.path.join(models_dir, f'XGBClassifier_{i}_trainedmodel.pkl')
model = load_xgb(path_model)
y_pred_va = model.predict_proba(
X_train[va_idx],
ntree_limit=model.best_iteration
)[:, 1]
y_pred_te = model.predict(
X_test,
ntree_limit=model.best_iteration
)[:, 1]
elif task == 'reg':
path_model = os.path.join(models_dir, f'XGBRegressor_{i}_trainedmodel.pkl')
model = load_xgb(path_model)
y_pred_va = model.predict(
X_train[va_idx],
ntree_limit=model.best_iteration
)
y_pred_te = model.predict(
X_test,
ntree_limit=model.best_iteration
)
elif model_name == 'NN':
if task == 'clf':
path_json = os.path.join(models_dir, 'NNClassifier_architecture.json')
path_h5 = os.path.join(models_dir, f'NNClassifier_{i}_trainedweight.h5')
path_transformer = os.path.join(models_dir, f'NNClassifier_{i}_transformer.pkl')
elif task == 'reg':
path_json = os.path.join(models_dir, 'NNRegressor_architecture.json')
path_h5 = os.path.join(models_dir, f'NNRegressor_{i}_trainedweight.h5')
path_transformer = os.path.join(models_dir, f'NNRegressor_{i}_transformer.pkl')
model, transformer = load_nn(path_json, path_h5, path_transformer)
if transformer:
x_va = transformer.transform(np.nan_to_num(X_train[va_idx]))
x_te = transformer.transform(np.nan_to_num(X_test))
else:
x_va = np.nan_to_num(X_train[va_idx])
x_te = np.nan_to_num(X_test)
y_pred_va = model.predict(x_va).astype("float64")
y_pred_va = y_pred_va.flatten()
y_pred_te = model.predict(x_te).astype("float64")
y_pred_te = y_pred_te.flatten()
# stack prediction features
VA_IDXES.append(va_idx)
VA_PREDS.append(y_pred_va)
TE_PREDS.append(y_pred_te)
# sort prediction features
va_preds = sort_preds(VA_PREDS, VA_IDXES)
te_preds = np.mean(TE_PREDS, axis=0)
# convert prediction features to dataframe
df_pred_va = pd.DataFrame(va_preds, columns=[model_name])
df_pred_te = pd.DataFrame(te_preds, columns=[model_name])
DF_PRED_VA.append(df_pred_va)
DF_PRED_TE.append(df_pred_te)
return pd.concat(DF_PRED_VA, axis=1), pd.concat(DF_PRED_TE, axis=1)
if __name__ == '__main__':
# argments settings
parser = argparse.ArgumentParser()
parser.add_argument('conf')
args = parser.parse_args()
# load config
config = configparser.ConfigParser()
config.read(f'{args.conf}')
use_lgb = config.getboolean('stacking', 'LGB')
use_xgb = config.getboolean('stacking', 'XGB')
use_nn = config.getboolean('stacking', 'NN')
models_dir = config.get('stacking', 'models_dir')
train_data = config.get('general', 'train_data')
test_data = config.get('stacking', 'test_data')
# load logging
config = configparser.ConfigParser()
config.read('../results/logging/logging.ini')
n_splits = int(config.get('logging_trainer', 'n_splits'))
random_state = int(config.get('logging_trainer', 'random_state'))
# load datasets
df_train, df_test = pd.read_csv(train_data), pd.read_csv(test_data)
X_train, y_train = np.array(df_train.iloc[:, 2:]), np.array(df_train.iloc[:, 1])
X_test = np.array(df_test.iloc[:, 2:])
# task declaration
if len(set(y_train)) <= 2:
task = 'clf'
elif len(set(y_train)) > 2:
task = 'reg'
# generate prediction feaures
models_dict = {'LGB': use_lgb, 'XGB': use_xgb, "NN": use_nn}
df_pred_va, df_pred_te = gen_stk_features(
models_dict,
models_dir,
task,
X_train,
y_train,
X_test,
n_splits,
random_state
)
df_pred_va.to_csv('../results/stk_feature/prediction_features_valid.csv')
df_pred_te.to_csv('../results/stk_feature/prediction_features_test.csv')
# load features to create stacking model
X_train = np.array( | pd.read_csv('../results/stk_feature/prediction_features_valid.csv') | pandas.read_csv |
import os
import sys
from numpy.core.numeric import zeros_like
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-poster')
# I hate this too but it allows everything to use the same helper functions.
sys.path.insert(0, 'model')
from helper_functions import read_in_NNDSS
from Reff_constants import *
from params import alpha_start_date, delta_start_date, omicron_start_date, vaccination_start_date
def read_in_posterior(date):
"""
read in samples from posterior from inference
"""
df = pd.read_hdf("results/soc_mob_posterior"+date+".h5", key='samples')
return df
def read_in_google(Aus_only=True, local=False, moving=False):
"""
Read in the Google data set
"""
if local:
if type(local) == str:
df = | pd.read_csv(local, parse_dates=['date']) | pandas.read_csv |
'''
We will create an ANN to output the class of a wheat seed based on 7 numerical inputs,
there is a total of 3 classes on the wheat-seeds.csv dataset, and our ANN with fully
connected Dense layers, will predict the class of the inputs.
Data src: https://raw.githubusercontent.com/jbrownlee/Datasets/master/wheat-seeds.csv
Our ANN is made with the tensorflow 2.0 keras API.
Input example:
15.26,14.84,0.871,5.763,3.312,2.221,5.22
Output example:
1
Our model will predict the class, as its seen on the input and output example.
'''
#We read the libraries
import pandas as pd
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler,LabelEncoder
from sklearn.metrics import confusion_matrix, classification_report
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
#We read the csv file
dataset = pd.read_csv('wheat-seeds.csv')
#CHANGE THE FILE PATH TO wheat-seeds.csv TO READ THE FILE CORRECTLY
dataset.columns = ['t1','t2','t3','t4','t5','t6','t7','class']
X = dataset.drop(['class'],axis=1)
Y = dataset['class']
#We encode Y
encoder = LabelEncoder()
encoder.fit(Y)
encoded_labels = encoder.transform(Y)
cat_y = to_categorical(encoded_labels,dtype='float64')
#We create the model
X_train, X_test, y_train, y_test = train_test_split(X, cat_y, test_size=0.2, random_state=10)
model = Sequential()
model.add(Dense(7,activation='relu'))
model.add(Dense(6,activation='relu'))
model.add(Dense(6,activation='relu'))
model.add(Dense(3,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam')
#We create an early stop call
early_stop = EarlyStopping(monitor='val_loss',mode='min',patience=20)
#We train the model for 50 epochs
model.fit(X_train,y_train,epochs=500,batch_size=32,callbacks=[early_stop],validation_data=(X_test,y_test))
#We plot the model's loss history
history = | pd.DataFrame(model.history.history) | pandas.DataFrame |
import itertools
import numpy as np
import pandas as pd
import random
import time
from overlappingspheres.functionality import advance
from overlappingspheres.functionality import fractional
from overlappingspheres.functionality import metricpoints
from overlappingspheres.functionality import shift
from typing import List
Vector = List[float]
def perform_experiment(relax: bool, spring_constant: Vector, random_strength: Vector, seed: Vector):
"""
A function that takes potential values of parameters and returns a dataframe with experiment results
for all possible combinations of these parameters.
Parameters
----------
relax : bool
Whether we implement Lloyd's algorithm before the mechanical descriptions of cell-cell interactions are derived
spring_constant : list[float]
List of values of the spring constant parameter to test
random_strength : list[float]
List of values of the random force strength parameter to test
seed : list[float]
List of values of the state of random functions to observe varying behaviour
Returns
-------
pandas.DataFrame
The dataframe with experiment results
"""
d = []
# choosing the maximum number of iterations we want performed in the experiment
iterations = int(1e3)
# We want to loop through all possible combinations of parameter values
for iterator in itertools.product(spring_constant, random_strength, seed):
seconds = time.time()
test = shift(205, iterator[2])
for i in range(iterations):
if i == int(1e1):
test10 = advance(test, 0.015, "news", i, iterator[0], iterator[1])
if i == int(1e2):
test100 = advance(test, 0.015, "news", i, iterator[0], iterator[1])
test = advance(test, 0.015, "news", i, iterator[0], iterator[1])
metric1 = metricpoints(test)
metric2 = fractional(test)
m10etric1 = metricpoints(test10)
m10etric2 = fractional(test10)
m100etric1 = metricpoints(test100)
m100etric2 = fractional(test100)
runningtime = (time.time() - seconds)
filter0 = np.asarray([0])
filter1 = np.asarray([1])
filterd0 = test[np.in1d(test[:, 2], filter0)]
filterd1 = test[np.in1d(test[:, 2], filter1)]
f10ilterd0 = test10[np.in1d(test10[:, 2], filter0)]
f10ilterd1 = test10[np.in1d(test10[:, 2], filter1)]
f100ilterd0 = test100[np.in1d(test100[:, 2], filter0)]
f100ilterd1 = test100[np.in1d(test100[:, 2], filter1)]
d.append(
{
'relax': relax,
'spring_constant': iterator[0],
'random_strength': iterator[1],
'seed': iterator[2],
'iterations': iterations,
'runningtime': runningtime,
'Distance metric': metric1,
'Fractional length': metric2,
'0 points': np.delete(filterd0, np.s_[2:3], axis=1),
'1 points': np.delete(filterd1, np.s_[2:3], axis=1)
}
)
d.append(
{
'relax': relax,
'spring_constant': iterator[0],
'random_strength': iterator[1],
'seed': iterator[2],
'iterations': 101,
'runningtime': runningtime,
'Distance metric': m100etric1,
'Fractional length': m100etric2,
'0 points': np.delete(f100ilterd0, np.s_[2:3], axis=1),
'1 points': np.delete(f100ilterd1, np.s_[2:3], axis=1)
}
)
d.append(
{
'relax': relax,
'spring_constant': iterator[0],
'random_strength': iterator[1],
'seed': iterator[2],
'iterations': 11,
'runningtime': runningtime,
'Distance metric': m10etric1,
'Fractional length': m10etric2,
'0 points': np.delete(f10ilterd0, np.s_[2:3], axis=1),
'1 points': np.delete(f10ilterd1, np.s_[2:3], axis=1)
}
)
df1 = | pd.DataFrame(d) | pandas.DataFrame |
from pyspark.sql.types import ArrayType, DoubleType
import glow.gwas.log_reg as lr
import glow.gwas.functions as gwas_fx
import statsmodels.api as sm
import pandas as pd
import numpy as np
import pytest
def run_score_test(genotype_df,
phenotype_df,
covariate_df,
correction=lr.correction_none,
add_intercept=True):
C = covariate_df.to_numpy(copy=True)
if add_intercept:
C = gwas_fx._add_intercept(C, phenotype_df.shape[0])
Y = phenotype_df.to_numpy(copy=True)
Y_mask = ~np.isnan(Y)
Y[~Y_mask] = 0
state_rows = [
lr._prepare_one_phenotype(C, pd.Series({
'label': p,
'values': phenotype_df[p]
}), correction, add_intercept) for p in phenotype_df
]
phenotype_names = phenotype_df.columns.to_series().astype('str')
state = lr._pdf_to_log_reg_state(pd.DataFrame(state_rows), phenotype_names, C.shape[1])
values_df = pd.DataFrame({gwas_fx._VALUES_COLUMN_NAME: list(genotype_df.to_numpy().T)})
return lr._logistic_regression_inner(values_df, state, C, Y, Y_mask, None, lr.correction_none,
0.05,
phenotype_df.columns.to_series().astype('str'), None)
def statsmodels_baseline(genotype_df,
phenotype_df,
covariate_df,
offset_dfs=None,
add_intercept=True):
if add_intercept:
covariate_df = sm.add_constant(covariate_df)
p_values = []
chisq = []
for phenotype in phenotype_df:
for genotype_idx in range(genotype_df.shape[1]):
mask = ~np.isnan(phenotype_df[phenotype].to_numpy())
if offset_dfs is not None:
offset = offset_dfs[genotype_idx][phenotype]
else:
offset = None
model = sm.GLM(phenotype_df[phenotype],
covariate_df,
offset=offset,
family=sm.families.Binomial(),
missing='drop')
params = model.fit().params
results = model.score_test(params,
exog_extra=genotype_df.iloc[:, genotype_idx].array[mask])
chisq.append(results[0])
p_values.append(results[1])
return pd.DataFrame({
'chisq': np.concatenate(chisq),
'pvalue': np.concatenate(p_values),
'phenotype': phenotype_df.columns.to_series().astype('str').repeat(genotype_df.shape[1])
})
def run_logistic_regression_spark(spark,
genotype_df,
phenotype_df,
covariate_df=pd.DataFrame({}),
extra_cols=pd.DataFrame({}),
values_column='values',
**kwargs):
pdf = pd.DataFrame({values_column: genotype_df.to_numpy().T.tolist()})
if not extra_cols.empty:
pdf = pd.concat([pdf, extra_cols], axis=1)
pdf['idx'] = pdf.index
results = (lr.logistic_regression(spark.createDataFrame(pdf),
phenotype_df,
covariate_df,
correction=lr.correction_none,
values_column=values_column,
**kwargs).toPandas().sort_values(['phenotype',
'idx']).drop('idx', axis=1))
return results
def regression_results_equal(df1, df2, rtol=1e-5):
df1 = df1.sort_values('phenotype', kind='mergesort')
df2 = df2.sort_values('phenotype', kind='mergesort')
strings_equal = np.array_equal(df1.phenotype.array, df2.phenotype.array)
numerics_equal = np.allclose(df1.select_dtypes(exclude=['object']),
df2.select_dtypes(exclude=['object']),
rtol=rtol)
return strings_equal and numerics_equal
def assert_glow_equals_golden(genotype_df, phenotype_df, covariate_df, add_intercept=True):
glow = run_score_test(genotype_df, phenotype_df, covariate_df, add_intercept=add_intercept)
golden = statsmodels_baseline(genotype_df,
phenotype_df,
covariate_df,
add_intercept=add_intercept)
assert regression_results_equal(glow, golden)
def random_phenotypes(shape, rg):
return rg.integers(low=0, high=2, size=shape).astype(np.float64)
def test_spector_non_missing():
ds = sm.datasets.spector.load_pandas()
phenotype_df = pd.DataFrame({ds.endog.name: ds.endog})
genotype_df = ds.exog.loc[:, ['GPA']]
covariate_df = ds.exog.drop('GPA', axis=1)
assert_glow_equals_golden(genotype_df, phenotype_df, covariate_df)
def test_spector_missing():
ds = sm.datasets.spector.load_pandas()
phenotype_df = pd.DataFrame({ds.endog.name: ds.endog})
phenotype_df.iloc[[0, 3, 10, 25], 0] = np.nan
genotype_df = ds.exog.loc[:, ['GPA']]
covariate_df = ds.exog.drop('GPA', axis=1)
assert_glow_equals_golden(genotype_df, phenotype_df, covariate_df)
def test_spector_no_intercept():
ds = sm.datasets.spector.load_pandas()
phenotype_df = pd.DataFrame({ds.endog.name: ds.endog})
phenotype_df.iloc[[0, 3, 10, 25], 0] = np.nan
genotype_df = ds.exog.loc[:, ['GPA']]
covariate_df = ds.exog.drop('GPA', axis=1)
assert_glow_equals_golden(genotype_df, phenotype_df, covariate_df, add_intercept=False)
def test_multiple(rg):
n_sample = 50
n_cov = 10
n_pheno = 25
phenotype_df = pd.DataFrame(random_phenotypes((n_sample, n_pheno), rg))
covariate_df = pd.DataFrame(rg.random((n_sample, n_cov)))
genotype_df = pd.DataFrame(rg.random((n_sample, 1)))
assert_glow_equals_golden(genotype_df, phenotype_df, covariate_df)
def test_multiple_missing(rg):
n_sample = 50
n_cov = 2
n_pheno = 31
phenotype_df = pd.DataFrame(random_phenotypes((n_sample, n_pheno), rg))
Y = phenotype_df.to_numpy()
Y[np.tril_indices_from(Y, k=-20)] = np.nan
assert phenotype_df.isna().sum().sum() > 0
covariate_df = pd.DataFrame(rg.random((n_sample, n_cov)))
genotype_df = pd.DataFrame(rg.random((n_sample, 1)))
assert_glow_equals_golden(genotype_df, phenotype_df, covariate_df)
@pytest.mark.min_spark('3')
def test_multiple_spark(spark, rg):
n_sample = 40
n_cov = 5
n_pheno = 5
phenotype_df = pd.DataFrame(random_phenotypes((n_sample, n_pheno), rg))
covariate_df = pd.DataFrame(rg.random((n_sample, n_cov)))
genotype_df = pd.DataFrame(rg.random((n_sample, 1)))
run_score_test(genotype_df, phenotype_df, covariate_df)
glow = run_logistic_regression_spark(spark, genotype_df, phenotype_df, covariate_df)
golden = statsmodels_baseline(genotype_df, phenotype_df, covariate_df)
assert regression_results_equal(glow, golden)
def random_mask(size, missing_per_column, rg):
base = np.ones(size[0], dtype=bool)
base[:missing_per_column] = False
return np.column_stack([rg.permutation(base) for _ in range(size[1])])
@pytest.mark.min_spark('3')
def test_multiple_spark_missing(spark, rg):
n_sample = 50
n_cov = 5
n_pheno = 5
phenotype_df = pd.DataFrame(random_phenotypes((n_sample, n_pheno), rg))
Y = phenotype_df.to_numpy()
Y[~random_mask(Y.shape, 10, rg)] = np.nan
assert phenotype_df.isna().sum().sum() > 0
covariate_df = pd.DataFrame(rg.random((n_sample, n_cov)))
genotype_df = pd.DataFrame(rg.random((n_sample, 1)))
glow = run_logistic_regression_spark(spark, genotype_df, phenotype_df, covariate_df)
golden = statsmodels_baseline(genotype_df, phenotype_df, covariate_df)
assert regression_results_equal(glow, golden)
@pytest.mark.min_spark('3')
def test_missing_and_intersect_samples_spark(spark, rg):
n_sample = 50
n_cov = 5
n_pheno = 5
genotype_df = pd.DataFrame(rg.random((n_sample, 1)))
#here we're simulating dropping a sample from phenotypes and covariates
phenotype_df = pd.DataFrame(random_phenotypes((n_sample, n_pheno), rg))[1:]
phenotype_df.loc[[1, 3, 5], 1] = np.nan
covariate_df = pd.DataFrame(rg.random((n_sample, n_cov)))[1:]
glow = run_logistic_regression_spark(
spark,
genotype_df,
phenotype_df,
covariate_df,
intersect_samples=True,
genotype_sample_ids=genotype_df.index.values.astype(str).tolist())
#drop sample from genotypes so that input samples are aligned
baseline = statsmodels_baseline(genotype_df[1:], phenotype_df, covariate_df)
assert regression_results_equal(glow, baseline)
@pytest.mark.min_spark('3')
def test_spark_no_intercept(spark, rg):
n_sample = 50
n_cov = 5
n_pheno = 5
phenotype_df = pd.DataFrame(random_phenotypes((n_sample, n_pheno), rg))
Y = phenotype_df.to_numpy()
Y[~random_mask(Y.shape, 15, rg)] = np.nan
assert phenotype_df.isna().sum().sum() > 0
covariate_df = pd.DataFrame(rg.random((n_sample, n_cov)))
genotype_df = pd.DataFrame(rg.random((n_sample, 1)))
glow = run_logistic_regression_spark(spark,
genotype_df,
phenotype_df,
covariate_df,
add_intercept=False)
golden = statsmodels_baseline(genotype_df, phenotype_df, covariate_df, add_intercept=False)
assert regression_results_equal(glow, golden)
@pytest.mark.min_spark('3')
def test_simple_offset(spark, rg):
num_samples = 25
num_pheno = 6
num_geno = 10
genotype_df = pd.DataFrame(rg.random((num_samples, num_geno)))
phenotype_df = pd.DataFrame(random_phenotypes((num_samples, num_pheno), rg))
covariate_df = pd.DataFrame(rg.random((num_samples, 2)))
offset_df = pd.DataFrame(rg.random((num_samples, num_pheno)))
results = run_logistic_regression_spark(spark,
genotype_df,
phenotype_df,
covariate_df,
offset_df=offset_df)
baseline = statsmodels_baseline(genotype_df, phenotype_df, covariate_df, [offset_df] * num_geno)
assert regression_results_equal(results, baseline)
@pytest.mark.min_spark('3')
def test_missing_and_simple_offset_out_of_order_with_intersect(spark, rg):
n_sample = 50
n_cov = 5
n_pheno = 5
num_geno = 10
genotype_df = pd.DataFrame(rg.random((n_sample, num_geno)))
#here we're simulating dropping samples from phenotypes and covariates
phenotype_df = pd.DataFrame(random_phenotypes((n_sample, n_pheno), rg))[0:-2]
phenotype_df.loc[[1, 3, 5], 1] = np.nan
covariate_df = pd.DataFrame(rg.random((n_sample, n_cov)))[0:-2]
offset_df = pd.DataFrame(rg.random((n_sample, n_pheno)))
glow = run_logistic_regression_spark(
spark,
genotype_df,
phenotype_df,
covariate_df,
offset_df=offset_df.sample(frac=1),
intersect_samples=True,
genotype_sample_ids=genotype_df.index.values.astype(str).tolist())
#drop samples from genotypes so that input samples are aligned
baseline = statsmodels_baseline(genotype_df[0:-2], phenotype_df, covariate_df,
[offset_df[0:-2]] * num_geno)
assert regression_results_equal(glow, baseline)
@pytest.mark.min_spark('3')
def test_multi_offset(spark, rg):
num_samples = 50
num_pheno = 25
num_geno = 10
genotype_df = pd.DataFrame(rg.random((num_samples, num_geno)))
phenotype_df = pd.DataFrame(random_phenotypes((num_samples, num_pheno), rg))
covariate_df = pd.DataFrame(rg.random((num_samples, 10)))
offset_index = pd.MultiIndex.from_product([phenotype_df.index, ['chr1', 'chr2']])
offset_df = pd.DataFrame(rg.random((num_samples * 2, num_pheno)), index=offset_index)
extra_cols = | pd.DataFrame({'contigName': ['chr1', 'chr2'] * 5}) | pandas.DataFrame |
"""
"""
__version__='192.168.3.11.dev1'
import sys
import os
import logging
import pandas as pd
import re
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
logger = logging.getLogger('PT3S')
try:
from PT3S import Rm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Rm - trying import Rm instead ... maybe pip install -e . is active ...'))
import Rm
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
def addResVecToDfAlarmEreignisse(
dfAlarmEreignisse
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
):
"""
dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
BZKat: Betriebszustandskategorie des Alarms
Returns:
dfAlarmEreignisse with 2 Cols added:
resIDBase: die 1. OrtID von OrteIDs
dfResVec: der resVec des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
dfAlarmEreignisse['resIDBase']=dfAlarmEreignisse['OrteIDs'].apply(lambda x: x[0])
### Ergebnisvektor fuer alle Orte bestimmen
dfResVecs={}
dfResVecsLst=[]
for indexAlarm, rowAlarm in dfAlarmEreignisse.iterrows():
resIDBase=rowAlarm['resIDBase']
if resIDBase in dfResVecs.keys():
# resIDBase schon behandelt
dfResVecsLst.append(dfResVecs[resIDBase])
continue
# Spalten basierend auf resIDBase bestimmen
ErgIDs=[resIDBase+ext for ext in Rm.ResChannelTypesAll]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs] # jede Spalte koennte anstatt "normal" als IMDI. vorhanden sein
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Ergebnisspalten
if rowAlarm['LDSResBaseType']=='SEG':
dfFiltered=TCsLDSRes1.filter(items=ErgIDsAll,axis=1)
else:
dfFiltered=TCsLDSRes2.filter(items=ErgIDsAll,axis=1)
# Ergebnisspalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfFiltered.name=resIDBase
dfResVec=dfFiltered.rename(columns=colDct)
# Ergebnisvektor merken
dfResVecs[resIDBase]=dfResVec
dfResVecsLst.append(dfResVec)
logger.debug("{:s}resIDBase: {:50s} Anzahl gefundener Spalten in TCsLDSRes: {:d}".format(logStr, resIDBase, len(dfResVec.columns.to_list())))
dfAlarmEreignisse['dfResVec']=dfResVecsLst
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise e
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fGenAlarmVisTimeSpan(
tA
,tE
# alle nachfolgenden Werte sollten dieselbe Einheit haben und in dieser Einheit ganzzahlig sein
,timeSpan=pd.Timedelta('25 Minutes')
,timeRoundStr='1T'
,timeBoundaryMin=pd.Timedelta('3 Minutes')
,timeRef='A' # Alarme die laenger sind: Anfang oder Ende werden mit timeSpan dargestellt
):
"""
erzeugt eine Zeitspanne in welcher ein Alarm Zwecks Analyse dargestellt wird
tA, tE sind Anfang und Ende des Alarms
diese werden ab- (tA) bzw. aufgerundet (tE) mit timeRoundStr
zwischen den gerundeten Zeiten und tA/tE soll mindestens timeBoundaryMin liegen
wenn nicht, wird timeBoundaryMin auf tA/tE angewendet und dann wird gerundet
timeSpan ist die gewuenschte minimale Zeitspanne
Alarme die kuerzer sind werden mit timeSpan dargestellt
Alarme die laenger sind: Anfang oder Ende wird mit timeSpan dargestellt
"""
# Zeiten ab- und aufrunden
timeStart=tA.floor(freq=timeRoundStr)
timeEnd=tE.ceil(freq=timeRoundStr)
# gerundete Zeiten auf Mindestabstand pruefen
if tA-timeStart < timeBoundaryMin:
timeStart=tA-timeBoundaryMin
timeStart= timeStart.floor(freq=timeRoundStr)
if timeEnd-tE < timeBoundaryMin:
timeEnd=tE+timeBoundaryMin
timeEnd= timeEnd.ceil(freq=timeRoundStr)
# gerundete Zeiten auf Zeitspanne pruefen
timeLeft=timeSpan-(timeEnd-timeStart)
if timeLeft > pd.Timedelta('0 Seconds'): # die aufgerundete Alarmzeit ist kuerzer als timeSpan; timeSpan wird dargestellt
timeStart=timeStart-timeLeft/2
timeStart= timeStart.floor(freq=timeRoundStr)
timeEnd=timeEnd+timeLeft/2
timeEnd= timeEnd.ceil(freq=timeRoundStr)
else:
# die aufgerundete Alarmzeit ist laenger als timeSpan; A oder E wird mit timeSpan wird dargestellt
if timeRef=='A':
timeM=tA.floor(freq=timeRoundStr)
else:
timeM=tE.ceil(freq=timeRoundStr)
timeStart=timeM-timeSpan/2
timeEnd=timeM+timeSpan/2
if timeEnd-timeStart > timeSpan:
timeEnd=timeStart+timeSpan
ZeitbereichSel=timeEnd-timeStart
if ZeitbereichSel <= pd.Timedelta('1 Minutes'):
bysecond=list(np.arange(0,60,1))
byminute=None
elif ZeitbereichSel <= pd.Timedelta('3 Minutes'):
bysecond=list(np.arange(0,60,5))
byminute=None
elif ZeitbereichSel > pd.Timedelta('3 Minutes') and ZeitbereichSel <= pd.Timedelta('5 Minutes'):
bysecond=list(np.arange(0,60,15))
byminute=None
elif ZeitbereichSel > pd.Timedelta('5 Minutes') and ZeitbereichSel <= pd.Timedelta('20 Minutes'):
bysecond=list(np.arange(0,60,30))
byminute=None
elif ZeitbereichSel > pd.Timedelta('20 Minutes') and ZeitbereichSel <= pd.Timedelta('30 Minutes'):
bysecond=None
byminute=list(np.arange(0,60,1))
else:
bysecond=None
byminute=list(np.arange(0,60,3))
return timeStart, timeEnd, byminute, bysecond
def rptAlarms(
pdfErgFile='rptAlarms.pdf'
,figsize=Rm.DINA2q
,dpi=Rm.dpiSize
,dfAlarmStatistik=pd.DataFrame() # 1 Zeile pro SEG; Spalten mit Alarm-Informationen zum SEG
,dfAlarmEreignisse=pd.DataFrame() # 1 Zeile pro Alarm; Spalten mit Informationen zum Alarm
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2= | pd.DataFrame() | pandas.DataFrame |
# -*- coding:utf-8 -*-
import trdb2py.trading2_pb2
from trdb2py.utils import str2asset, asset2str
from datetime import datetime
import time
import pandas as pd
import numpy as np
import math
from trdb2py.timeutils import str2timestamp, getDayInYear, getYearDays, calcYears
CtrlTypeStr = [
'INIT',
'BUY',
'SELL',
'STOPLOSS',
'TAKEPROFIT',
'WITHDRAW',
'DEPOSIT',
]
def buildPNLReport(lstpnl: list) -> pd.DataFrame:
"""
buildPNLReport - 将PNL列表转换为pandas.DataFrame,方便计算
"""
fv0 = {
'title': [],
'asset': [],
'maxDrawdown': [],
'maxDrawdownStart': [],
'maxDrawdownEnd': [],
'maxDrawup': [],
'maxDrawupStart': [],
'maxDrawupEnd': [],
'sharpe': [],
'annualizedReturns': [],
'annualizedVolatility': [],
'totalReturns': [],
'variance': [],
'buyTimes': [],
'sellTimes': [],
'stoplossTimes': [],
'maxUpDay': [],
'maxPerUpDay': [],
'maxDownDay': [],
'maxPerDownDay': [],
'maxUpWeek': [],
'maxPerUpWeek': [],
'maxDownWeek': [],
'maxPerDownWeek': [],
'maxUpMonth': [],
'maxPerUpMonth': [],
'maxDownMonth': [],
'maxPerDownMonth': [],
'maxUpYear': [],
'maxPerUpYear': [],
'maxDownYear': [],
'maxPerDownYear': [],
'perWinRate': [],
'values': [],
}
for v in lstpnl:
fv0['title'].append(v['title'])
fv0['asset'].append(asset2str(v['pnl'].asset))
fv0['maxDrawdown'].append(v['pnl'].maxDrawdown)
fv0['maxDrawdownStart'].append(datetime.fromtimestamp(
v['pnl'].maxDrawdownStartTs).strftime('%Y-%m-%d'))
fv0['maxDrawdownEnd'].append(datetime.fromtimestamp(
v['pnl'].maxDrawdownEndTs).strftime('%Y-%m-%d'))
fv0['maxDrawup'].append(v['pnl'].maxDrawup)
fv0['maxDrawupStart'].append(datetime.fromtimestamp(
v['pnl'].maxDrawupStartTs).strftime('%Y-%m-%d'))
fv0['maxDrawupEnd'].append(datetime.fromtimestamp(
v['pnl'].maxDrawupEndTs).strftime('%Y-%m-%d'))
fv0['sharpe'].append(v['pnl'].sharpe)
fv0['annualizedReturns'].append(v['pnl'].annualizedReturns)
fv0['annualizedVolatility'].append(v['pnl'].annualizedVolatility)
fv0['totalReturns'].append(v['pnl'].totalReturns)
fv0['variance'].append(v['pnl'].variance)
fv0['buyTimes'].append(v['pnl'].buyTimes)
fv0['sellTimes'].append(v['pnl'].sellTimes)
fv0['stoplossTimes'].append(v['pnl'].stoplossTimes)
fv0['maxUpDay'].append(datetime.fromtimestamp(
v['pnl'].maxUpDayTs).strftime('%Y-%m-%d'))
fv0['maxPerUpDay'].append(v['pnl'].maxPerUpDay)
fv0['maxDownDay'].append(datetime.fromtimestamp(
v['pnl'].maxDownDayTs).strftime('%Y-%m-%d'))
fv0['maxPerDownDay'].append(v['pnl'].maxPerDownDay)
fv0['maxUpWeek'].append(datetime.fromtimestamp(
v['pnl'].maxUpWeekTs).strftime('%Y-%m-%d'))
fv0['maxPerUpWeek'].append(v['pnl'].maxPerUpWeek)
fv0['maxDownWeek'].append(datetime.fromtimestamp(
v['pnl'].maxDownWeekTs).strftime('%Y-%m-%d'))
fv0['maxPerDownWeek'].append(v['pnl'].maxPerDownWeek)
fv0['maxUpMonth'].append(datetime.fromtimestamp(
v['pnl'].maxUpMonthTs).strftime('%Y-%m-%d'))
fv0['maxPerUpMonth'].append(v['pnl'].maxPerUpMonth)
fv0['maxDownMonth'].append(datetime.fromtimestamp(
v['pnl'].maxDownMonthTs).strftime('%Y-%m-%d'))
fv0['maxPerDownMonth'].append(v['pnl'].maxPerDownMonth)
fv0['maxUpYear'].append(datetime.fromtimestamp(
v['pnl'].maxUpYearTs).strftime('%Y-%m-%d'))
fv0['maxPerUpYear'].append(v['pnl'].maxPerUpYear)
fv0['maxDownYear'].append(datetime.fromtimestamp(
v['pnl'].maxDownYearTs).strftime('%Y-%m-%d'))
fv0['maxPerDownYear'].append(v['pnl'].maxPerDownYear)
fv0['values'].append(len(v['pnl'].values))
if v['pnl'].sellTimes + v['pnl'].stoplossTimes == 0:
fv0['perWinRate'].append(0)
else:
fv0['perWinRate'].append(
v['pnl'].winTimes * 1.0 / (v['pnl'].sellTimes + v['pnl'].stoplossTimes))
return | pd.DataFrame(fv0) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 10:06:21 2018
@author: rucsa
"""
import pandas as pd
import datetime
import numpy as np
import tables
import check_data_and_prices_helpers as help
def add_returns():
#fundamentals_2016 = pd.read_hdf("../sources/fundamentals_2016_msci_regions.hdf5", "dataset1/x")
fundamentals_2017 = pd.read_hdf("../sources/fundamentals_2017_msci_regions.hdf5", "dataset1/x")
print ("Uploaded from: ../sources/fundamentals_2017_msci_regions.hdf5 ")
common_tickers = fundamentals_2017.T.columns.tolist()
st_2017 = datetime.datetime(2017, 1, 3, 0, 0)
en_2017 = datetime.datetime(2017, 12, 29, 0, 0)
prices_2017 = pd.read_hdf("../sources/Prices_nasdaq_allmydata_total_data.hdf5")[st_2017:en_2017]#.dropna(axis = 1, how = 'any')
print ("Uploaded from: ../sources/Prices_nasdaq_allmydata_total_data.hdf5")
prices_2017 = prices_2017[common_tickers]
# parse IRX prices
prices_irx = pd.read_csv("../sources/IRX_prices_2017.csv")[['Date', 'Adj Close']].rename(columns = {'Adj Close':'IRX'})
rf_date = prices_irx.Date
prices_irx = prices_irx.drop('Date', axis = 1)
prices_irx['Date'] = help.parse_date(rf_date)
prices_irx = prices_irx.set_index('Date')
prices_irx = prices_irx[st_2017:en_2017]
# parse S&P prices
prices_snp = | pd.read_csv("../sources/GSPC_prices_2017.csv") | pandas.read_csv |
import numpy as np
from scipy import signal
import math
import matplotlib.pyplot as plt
import pandas as pd
from peakutils.peak import indexes
class DataExtractor:
"""Provides functions to extract relevant features (i.e. breathing or heartbeat features)
from the wirelessly acuired data (from GNU Radio)."""
def __init__(self, t, y, samp_rate):
"""
:param t: (Array) Timestamps of phase values
:param y: (Array) Phase values
:param samp_rate: sampling rate
"""
self.t = t
self.y = y
self.samp_rate = int(samp_rate)
def raw_windowing_breathing(self, window_size, step, highpass_beat_f=5):
"""Return breathing features across the signal, according to the given window size, step and cutoff freq
:param window_size: (int) Sliding time window length (in seconds)
:param step: (int) Sliding time window step (in seconds)
:param highpass_beat_f: Cutoff frequency to remove slow drift noise.
:return: Dataframe of features for each time window.
"""
pd_df = pd.DataFrame()
times = []
for i in range(min(window_size*self.samp_rate, len(self.t)-1), len(self.t), step*self.samp_rate):
end_ind = min(len(self.t)-1, window_size*self.samp_rate)
win_t = self.t[i-end_ind:i]
win_y = self.y[i-end_ind:i]
curr_df = self.extract_from_breathing_time(win_t, win_y)
times = np.append(times, win_t[-1])
pd_df = pd_df.append(curr_df, ignore_index=True)
pd_df['times'] = times
return pd_df
def raw_windowing_heartrate(self, window_size, step, bandpass_beat_fs=[60, 150]):
"""Return heartbeat features across the signal, according to the given window size, step and cutoff freq
:param window_size: (int) Sliding time window length (in seconds)
:param step: (int) Sliding time window step (in seconds)
:param bandpass_beat_fs: Array with two values. Bandpass frequency values (low and high).
:return: Dataframe of features for each time window.
"""
pd_df = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import jaydebeapi
import sys
import requests
import jks
import base64, textwrap
import time
import warnings
import json
from jpype import JException
from xml.etree import ElementTree
from jolokia import JolokiaClient
class H2Tools(object):
def get_latest_version(self):
"""Returns the latest version string for the h2 database"""
r = requests.get('http://h2database.com/m2-repo/com/h2database/h2/maven-metadata.xml')
tree = ElementTree.fromstring(r.content)
return tree.find('versioning').find('release').text
def get_h2jar_url(self,version):
url = "https://mvnrepository.com/artifact/com.h2database/h2/"
url += version + "/h2-" + version + ".jar"
return url
def download_h2jar(self,filepath='./h2.jar', version=None):
"""Downloads h2 jar and copies it to a file in filepath"""
if version is None:
version = self.get_latest_version()
r = requests.get(self.get_h2jar_url(version))
with open(filepath, 'wb') as jarfile:
jarfile.write(r.content)
class Node(object):
"""Node object for connecting to H2 database and getting table dataframes
Use get_tbname methods to get dataframe for table TBNAME. For example,
calling node.get_vault_states() will return the dataframe for VAULT_STATES.
After using the node, call the close() method to close the connection.
"""
# --- Notes for future support of PostgreSQL clients ---
# To support pg clients, url must be parsed into psycopg2.connect arguments
# and the password must not be empty
# After parsing, use "self._conn = psycopg2.connect(...)"
# --- Notes regarding get methods ---
# If table names will change often, it may be worth to
# dynamically generate methods with some careful metaprogramming
path_to_jar = None
def __init__(self, url, username, password, path_to_jar='./h2.jar',node_root=None,web_server_url=None,name=''):
"""
Parameters
----------
url : str
JDBC url to be connected
username : str
username of database user
passowrd : str
password of <PASSWORD> user
path_to_jar : str
path to h2 jar file
"""
if self.path_to_jar:
warnings.warn("Node constructor has already been called, causing the JVM to start.")
warnings.warn("The JVM cannot be restarted, so the original path to jar " + self.path_to_jar + " will be used.")
else:
self.path_to_jar = path_to_jar
self.set_name(name)
try:
self._conn = jaydebeapi.connect(
"org.h2.Driver",
url,
[username, password],
self.path_to_jar,
)
except TypeError as e:
raise OSError('path to jar is invalid')
except JException as e:
raise OSError('cannot connect to ' + url)
self._curs = self._conn.cursor()
if node_root != None:
self.set_node_root(node_root)
if web_server_url != None:
self.set_web_server_url(web_server_url)
self.rpc_server_nid = 'org.apache.activemq.artemis:broker="RPC",component=addresses,address="rpc.server",subcomponent=queues,routing-type="multicast",queue="rpc.server"'
def set_name(self,name):
self._name = name
def send_api_get_request(self, api_path):
if self._web_server_url != None:
request_url = self._web_server_url + api_path
resp = requests.get(request_url)
return resp.text
else:
return "No web_server set i.e. http://localhost:10007. Call set_web_server_url()"
def send_api_post_request(self, api_path, data):
if self._web_server_url != None:
request_url = self._web_server_url + api_path
try:
resp = requests.post(request_url, json=data, timeout=3)
return resp.json()
except requests.exceptions.ConnectionError as e:
raise OSError('cannot connect to ' + request_url)
except json.decoder.JSONDecodeError as e:
raise OSError('request fails with response status ' + str(resp.status_code))
else:
return "No web_server set i.e. http://localhost:10007. Call set_web_server_url()"
def set_web_server_url(self,web_server_url):
self._web_server_url = web_server_url
def set_node_root(self,node_root):
self._node_root = node_root
self._node_cert = node_root + '/certificates'
self._node_cert_jks = self._node_cert + '/nodekeystore.jks'
def display_keys_from_jks(self,password='<PASSWORD>'):
ks = jks.KeyStore.load(self._node_cert_jks,password)
columns = ['ALIAS','PRIVATE_KEY']
keys = | pd.DataFrame([],columns=columns) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from popsyntools import plotstyle
try:
from astropy.timeseries import LombScargle
except ModuleNotFoundError:
# the timeserie module moved at astropy 3.1 -> 3.2
from astropy.stats import LombScargle
# plt.ion()
activityFile = 'data/TIC237913194_activity.dat'
plot_dir = 'plots/'
transit_per = 15.168914
def plot_periodograms(activityFile, plot_dir, results):
"""
Produce periodograms of RV and activity indices
@author: <NAME>
adapted by <NAME>
Parameters
------------
activityFile : string
file containing the reduced time series
plot_dir : string
directory for the created plots
results : results object
a results object returned by juliet.fit()
Returns
--------
fig : matplotlib figure
figure containing the plot
ax : matplotlib axis
axis object with the plot
"""
font = {'size' : 15}
mpl.rc('font', **font)
bbox_props = {'boxstyle':"round",
'fc':"w",
'edgecolor':"w",
# 'ec':0.5,
'alpha':0.9
}
# =============================================================================
# Read data in
# FEROS data
feros_dat = np.genfromtxt(activityFile, names=True)
feros_dat = | pd.DataFrame(feros_dat) | pandas.DataFrame |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
pio.templates.default = "simple_white"
pio.renderers.default = "browser"
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
X = np.random.uniform(low=-1.2,high=2,size=n_samples)
eps = np.random.normal(0, noise, n_samples)
f= lambda x: (x+3)*(x+2)*(x+1)*(x-1)*(x-2)
y_noiseless = f(X)
y = y_noiseless + eps
x_train, y_train, x_test, y_test = split_train_test(pd.DataFrame(X), | pd.Series(y) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Consensus non-negative matrix factorization (cNMF) adapted from (Kotliar, et al. 2019)
"""
import numpy as np
import pandas as pd
import os, errno
import glob
import shutil
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
import warnings
from scipy.spatial.distance import squareform
from sklearn.decomposition import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from sklearn.preprocessing import normalize
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
from ._version import get_versions
def save_df_to_npz(obj, filename):
"""
Saves numpy array to `.npz` file
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
np.savez_compressed(
filename,
data=obj.values,
index=obj.index.values,
columns=obj.columns.values,
)
def save_df_to_text(obj, filename):
"""
Saves numpy array to tab-delimited text file
"""
obj.to_csv(filename, sep="\t")
def load_df_from_npz(filename):
"""
Loads numpy array from `.npz` file
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (
p for i, p in enumerate(iterable) if (i - worker_index) % total_workers == 0
)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1, 1))
D += squared_norms.reshape((1, -1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return beta
def fast_ols_all_cols_df(X, Y):
beta = fast_ols_all_cols(X, Y)
beta = pd.DataFrame(beta, index=X.columns, columns=Y.columns)
return beta
def var_sparse_matrix(X):
mean = np.array(X.mean(axis=0)).reshape(-1)
Xcopy = X.copy()
Xcopy.data **= 2
var = np.array(Xcopy.mean(axis=0)).reshape(-1) - (mean ** 2)
return var
def get_highvar_genes_sparse(
expression, expected_fano_threshold=None, minimal_mean=0.01, numgenes=None
):
# Find high variance genes within those cells
gene_mean = np.array(expression.mean(axis=0)).astype(float).reshape(-1)
E2 = expression.copy()
E2.data **= 2
gene2_mean = np.array(E2.mean(axis=0)).reshape(-1)
gene_var = pd.Series(gene2_mean - (gene_mean ** 2))
del E2
gene_mean = pd.Series(gene_mean)
gene_fano = gene_var / gene_mean
# Find parameters for expected fano line
top_genes = gene_mean.sort_values(ascending=False)[:20].index
A = (np.sqrt(gene_var) / gene_mean)[top_genes].min()
w_mean_low, w_mean_high = gene_mean.quantile([0.10, 0.90])
w_fano_low, w_fano_high = gene_fano.quantile([0.10, 0.90])
winsor_box = (
(gene_fano > w_fano_low)
& (gene_fano < w_fano_high)
& (gene_mean > w_mean_low)
& (gene_mean < w_mean_high)
)
fano_median = gene_fano[winsor_box].median()
B = np.sqrt(fano_median)
gene_expected_fano = (A ** 2) * gene_mean + (B ** 2)
fano_ratio = gene_fano / gene_expected_fano
# Identify high var genes
if numgenes is not None:
highvargenes = fano_ratio.sort_values(ascending=False).index[:numgenes]
high_var_genes_ind = fano_ratio.index.isin(highvargenes)
T = None
else:
if not expected_fano_threshold:
T = 1.0 + gene_counts_fano[winsor_box].std()
else:
T = expected_fano_threshold
high_var_genes_ind = (fano_ratio > T) & (gene_counts_mean > minimal_mean)
gene_counts_stats = pd.DataFrame(
{
"mean": gene_mean,
"var": gene_var,
"fano": gene_fano,
"expected_fano": gene_expected_fano,
"high_var": high_var_genes_ind,
"fano_ratio": fano_ratio,
}
)
gene_fano_parameters = {
"A": A,
"B": B,
"T": T,
"minimal_mean": minimal_mean,
}
return (gene_counts_stats, gene_fano_parameters)
def get_highvar_genes(
input_counts, expected_fano_threshold=None, minimal_mean=0.01, numgenes=None
):
# Find high variance genes within those cells
gene_counts_mean = pd.Series(input_counts.mean(axis=0).astype(float))
gene_counts_var = pd.Series(input_counts.var(ddof=0, axis=0).astype(float))
gene_counts_fano = | pd.Series(gene_counts_var / gene_counts_mean) | pandas.Series |
# INSERT LICENSE
from collections import Counter
from typing import List, Optional
import navis
import navis.interfaces.neuprint as nvneu
import pandas as pd
from neuroboom import morphoelectro as nbm
# Create an adjacency matrix from synapse connections (neuprint)
def adjx_from_syn_conn(
x: List[int],
presyn_postsyn: str = "pre",
roi: Optional = None,
ct: tuple = (0.0, 0.0),
rename_index: bool = False,
):
"""
Creates an adjacency matrix from synapse connections
Parameters
----------
presyn_postsyn:
x : list
a list of bodyIds of which you want to find their synaptic (pre/post) connections
presyn_postsyn : str
a string of either 'pre' or 'post'
If 'pre', the function will search for the
presynaptic connections / downstream neurons of x
If 'post', the function will search for the
postsynaptic connections / upstream neurons of x
roi : navis.Volume
A region of interest within which you are filtering the connections for
ct : tuple
Confidence threshold tuple containing the confidence value to filter above
The first value is presynaptic confidence and the second value postsynaptic confidence
e.g. (0.9, 0.8) will filter for connections where the presynaptic confidence > 0.9
and the postsynaptic confidence > 0.8
rename_index : bool
Whether to rename the index using the type of the connected neuron
Returns
-------
df : a DataFrame where x are the columns, the connection type (pre/post) are the rows
and the values the number of connections
partner_type_dict : a dictionary where the keys are bodyIds of the
upstream/downstream neurons and the values are their types
Examples
--------
"""
if presyn_postsyn == "pre":
con = nvneu.fetch_synapse_connections(source_criteria=x)
if roi:
tt = navis.in_volume(con[["x_pre", "y_pre", "z_pre"]].values, roi)
con = con[tt].copy()
if ct[0] or ct[1] > 0.0:
con = con[(con.confidence_pre > ct[0]) & (con.confidence_post > ct[1])].copy()
neurons = con.bodyId_post.unique()
n, _ = nvneu.fetch_neurons(neurons)
partner_type_dict = dict(zip(n.bodyId.tolist(), n.type.tolist()))
count = Counter(con.bodyId_post)
count = count.most_common()
count_dict = dict(count)
df = pd.DataFrame(columns=[x], index=[i for i, j in count], data=[count_dict[i] for i, j in count])
elif presyn_postsyn == "post":
con = nvneu.fetch_synapse_connections(target_criteria=x)
if roi:
tt = navis.in_volume(con[["x_post", "y_post", "z_post"]].values, roi)
con = con[tt].copy()
if ct[0] or ct[1] > 0.0:
con = con[(con.confidence_pre > ct[0]) & (con.confidence_post > ct[1])].copy()
neurons = con.bodyId_pre.unique()
n, _ = nvneu.fetch_neurons(neurons)
partner_type_dict = dict(zip(n.bodyId.tolist(), n.type.tolist()))
count = Counter(con.bodyId_pre)
count = count.most_common()
count_dict = dict(count)
df = | pd.DataFrame(index=[i for i, j in count], columns=[x], data=[count_dict[i] for i, j in count]) | pandas.DataFrame |
from rdkit import Chem
from .utils import *
from rdkit.Chem.MolStandardize import rdMolStandardize
import os
import pandas as pd
#==========================================================
# process SMILES of chemotypes
def normChemotypes(compounds,
getChemotypes=False,
getChemotypesIdx=False,
normalizeChemotypes=False,
printlogs=True):
#------------------------
if getChemotypesIdx:
if getChemotypes == False:
print("!!!ERROR: 'getChemotypesIdx=True' argument goes with 'getChemotypes=True'!!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
compounds_ = molStructVerify(compounds, printlogs=False)
Unverified_count = len(molStructVerify(compounds, getFailedStruct=True, printlogs=False))
NonChemotypesList, ChemotypesList, ChemotypesIdxList = [], [], []
Chemotype_count = 0
idx = 0
for compound in compounds_:
Premol = Chem.MolFromSmiles(compound)
canonicalized_mol = rdMolStandardize.Normalize(Premol)
canonicalized_SMILES = Chem.MolToSmiles(canonicalized_mol)
if canonicalized_SMILES == compound:
NonChemotypesList.append(canonicalized_SMILES)
else:
Chemotype_count +=1
if normalizeChemotypes:
NonChemotypesList.append(canonicalized_SMILES)
ChemotypesList.append(compound)
ChemotypesIdxList.append(idx)
else:
ChemotypesList.append(compound)
ChemotypesIdxList.append(idx)
idx += 1
if printlogs:
print("=======================================================")
if Unverified_count > 0:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
print("Failed to verify {}/{} structures".format(Unverified_count, len(compounds)))
print("Use function 'utils.molValidate' and set 'getFailedStruct=True' to get the list of unverified structures")
if Chemotype_count > 0:
if normalizeChemotypes:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList)-Chemotype_count, len(compounds)))
print("{}/{} structure(s) is/are chemotype(s) BUT was/were normalized".format(Chemotype_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Chemotype normalization has been applied!!!!!")
else:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList), len(compounds)))
print("{}/{} structure(s) is/are chemotype(s) BUT was/were NOT normalized".format(Chemotype_count, len(compounds)))
else:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList), len(compounds)))
else:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
if Chemotype_count > 0:
if normalizeChemotypes:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList)-Chemotype_count, len(compounds)))
print("{}/{} structure(s) is/are chemotype(s) BUT was/were normalized".format(Chemotype_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Chemotype normalization has been applied!!!!!")
else:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList), len(compounds)))
print("{}/{} structure(s) is/are chemotype(s) BUT was/were NOT normalized".format(Chemotype_count, len(compounds)))
else:
print("{}/{} structures are NOT chemotypes".format(len(NonChemotypesList), len(compounds)))
print("=======================================================")
if getChemotypes:
if getChemotypesIdx:
return ChemotypesList, ChemotypesIdxList
else:
return ChemotypesList
else:
return NonChemotypesList
#==========================================================
# process SMILES of tautomers
def normTautomers(compounds,
getTautomers=False,
getTautomersIdx=False,
deTautomerize=False,
printlogs=True):
#------------------------
if getTautomersIdx:
if getTautomers == False:
print("!!!ERROR: 'getTautomersIdx=True' argument goes with 'getTautomers=True'!!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
compounds_ = molStructVerify(compounds, printlogs=False)
Unverified_count = len(molStructVerify(compounds, getFailedStruct=True, printlogs=False))
NonTautomersList, TautomersList, TautomersIdxList = [], [], []
enumerator = rdMolStandardize.TautomerEnumerator()
Tautomer_count = 0
idx = 0
for compound in compounds_:
Premol = Chem.MolFromSmiles(compound)
canonicalized_mol = enumerator.Canonicalize(Premol)
canonicalized_SMILES = Chem.MolToSmiles(canonicalized_mol)
if canonicalized_SMILES == compound:
NonTautomersList.append(canonicalized_SMILES)
else:
Tautomer_count +=1
if deTautomerize:
NonTautomersList.append(canonicalized_SMILES)
TautomersList.append(compound)
TautomersIdxList.append(idx)
else:
TautomersList.append(compound)
TautomersIdxList.append(idx)
idx += 1
if printlogs:
print("=======================================================")
if Unverified_count > 0:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
print("Failed to verify {}/{} structures".format(Unverified_count, len(compounds)))
print("Use function 'utils.molValidate' and set 'getFailedStruct=True' to get the list of unverified structures")
if Tautomer_count > 0:
if deTautomerize:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList)-Tautomer_count, len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were detautomerized".format(Tautomer_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Detautomerizing has been applied!!!!!")
else:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList), len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were NOT detautomerized".format(Tautomer_count, len(compounds)))
else:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList), len(compounds)))
else:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
if Tautomer_count > 0:
if deTautomerize:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList)-Tautomer_count, len(compounds)))
print("{}/{} structure(s) í/are tautomer(s) BUT was/were detautomerized".format(Tautomer_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Detautomerizing has been applied!!!!!")
else:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList), len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were NOT detautomerized".format(Tautomer_count, len(compounds)))
else:
print("{}/{} structures are NOT tautomers".format(len(NonTautomersList), len(compounds)))
print("=======================================================")
if getTautomers:
if getTautomersIdx:
return TautomersList, TautomersIdxList
else:
return TautomersList
else:
return NonTautomersList
#==========================================================
# process SMILES of stereoisomers
def normStereoisomers(compounds,
getStereoisomers=False,
getStereoisomersIdx=False,
deSterioisomerize=False,
printlogs=True):
#------------------------
if getStereoisomersIdx:
if getStereoisomers == False:
print("!!!ERROR: 'getStereoisomersIdx=True' argument goes with 'getStereoisomers=True'!!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
compounds_ = molStructVerify(compounds, printlogs=False)
Unverified_count = len(molStructVerify(compounds, getFailedStruct=True, printlogs=False))
NonStereoisomersList, StereoisomersList, StereoisomersIdxList = [], [], []
Stereoisomer_count = 0
idx = 0
for compound in compounds_:
if compound.find("@") == -1 and compound.find("/") == -1 and compound.find("\\") == -1:
NonStereoisomersList.append(compound)
else:
Stereoisomer_count +=1
if deSterioisomerize:
NonStereoisomersList.append(compound.replace("@", "").replace("/","").replace("\\",""))
StereoisomersList.append(compound)
StereoisomersIdxList.append(idx)
else:
StereoisomersList.append(compound)
StereoisomersIdxList.append(idx)
idx += 1
if printlogs:
print("=======================================================")
if Unverified_count > 0:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
print("Failed to verify {}/{} structures".format(Unverified_count, len(compounds)))
print("Use function 'utils.molValidate' and set 'getFailedStruct=True' to get the list of unverified structures")
if Stereoisomer_count > 0:
if deSterioisomerize:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList)-Stereoisomer_count, len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were destereoisomerized".format(Stereoisomer_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Destereoisomerization has been applied!!!!!")
else:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList), len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were NOT destereoisomerized".format(Stereoisomer_count, len(compounds)))
else:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList), len(compounds)))
else:
print("Succeeded to verify {}/{} structures".format(len(compounds_), len(compounds)))
if Stereoisomer_count > 0:
if deSterioisomerize:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList)-Stereoisomer_count, len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were destereoisomerized".format(Stereoisomer_count, len(compounds)))
print("=======================================================")
print("!!!!!Notice: Destereoisomerization has been applied!!!!!")
else:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList), len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were NOT destereoisomerized".format(Stereoisomer_count, len(compounds)))
else:
print("{}/{} structures are NOT stereoisomers".format(len(NonStereoisomersList), len(compounds)))
print("=======================================================")
if getStereoisomers:
if getStereoisomersIdx:
return StereoisomersList, StereoisomersIdxList
else:
return StereoisomersList
else:
return NonStereoisomersList
#==========================================================
# Complete normalization
def normalizeComplete(compounds,
getUnnormalizedStruct=False,
normalizeChemotypes=True,
deTautomerize=True,
deSterioisomerize=True,
removeDuplicates=False,
getDuplicatedIdx=False,
exportCSV=False,
outputPath=None,
printlogs=True):
#------------------------
if getUnnormalizedStruct:
if removeDuplicates:
print("!!!ERROR: 'removeDuplicates=True' argument goes with 'getUnnormalizedStruct=False' only !!!")
return None
if getDuplicatedIdx:
print("!!!ERROR: 'getDuplicatedIdx=True' argument goes with 'getUnnormalizedStruct=False' only !!!")
if getDuplicatedIdx:
if removeDuplicates == False:
print("!!!ERROR: 'getDuplicatedIdx=True' argument goes with 'removeDuplicates=True'!!!")
return None
if exportCSV:
if outputPath == None:
print("!!!ERROR 'exportCSV=True' needs 'outputPath=<Directory>' to be filled !!!")
return None
if outputPath:
if exportCSV == False:
print("!!!ERROR 'outputPath=<Directory>' needs to set 'exportCSV=True' !!!")
return None
#------------------------
if isinstance(compounds, pd.core.series.Series):
compounds = compounds.tolist()
if isinstance(compounds, pd.core.frame.DataFrame):
compounds = compounds.iloc[:,0].tolist()
if isinstance(compounds, str):
compounds = [compounds]
if isinstance(compounds, list):
compounds = compounds
#------------------------
compounds_r1 = molStructVerify(compounds, printlogs=False)
UnverifiedList, UnverifiedIdxList = molStructVerify(compounds, getFailedStruct=True, getFailedStructIdx=True, printlogs=False)
Unverified_count = len(UnverifiedList)
#------------------------
if deSterioisomerize:
compounds_r2 = normStereoisomers(compounds_r1, deSterioisomerize=True, printlogs=False)
else:
compounds_r2 = normStereoisomers(compounds_r1, deSterioisomerize=False, printlogs=False)
StereoisomersList, StereoisomersIdxList = normStereoisomers(compounds_r1, getStereoisomers=True, getStereoisomersIdx=True, printlogs=False)
Stereoisomers_count = len(StereoisomersList)
#------------------------
if normalizeChemotypes:
compounds_r3 = normChemotypes(compounds_r2, normalizeChemotypes=True, printlogs=False)
else:
compounds_r3 = normChemotypes(compounds_r2, normalizeChemotypes=False, printlogs=False)
ChemotypesList, ChemotypesIdxList = normChemotypes(compounds_r1, getChemotypes=True, getChemotypesIdx=True, printlogs=False)
Chemotypes_count = len(ChemotypesList)
#------------------------
if deTautomerize:
compounds_r4 = normTautomers(compounds_r3, deTautomerize=True, printlogs=False)
else:
compounds_r4 = normTautomers(compounds_r3, deTautomerize=False, printlogs=False)
TautomersList, TautomersIdxList = normTautomers(compounds_r1, getTautomers=True, getTautomersIdx=True, printlogs=False)
Tautomers_count = len(TautomersList)
#------------------------
if printlogs:
if Unverified_count > 0:
print("=======================================================")
print("Succeeded to verify {}/{} structures".format(len(compounds_r1), len(compounds)))
print("Failed to verify {} structures \n".format(Unverified_count))
else:
print("=======================================================")
print("Succeeded to validate {}/{} structures \n".format(len(compounds_r1), len(compounds)))
if Stereoisomers_count > 0:
if deSterioisomerize:
print("=======================================================")
print("{}/{} structures are NOT stereoisomers".format(len(compounds_r2)-Stereoisomers_count, len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were destereoisomerized \n".format(Stereoisomers_count, len(compounds)))
else:
print("=======================================================")
print("{}/{} structures are NOT stereoisomers".format(len(compounds_r2), len(compounds)))
print("{}/{} structure(s) is/are stereoisomer(s) BUT was/were NOT destereoisomerized \n".format(Stereoisomers_count, len(compounds)))
else:
print("=======================================================")
print("{}/{} structures are NOT stereoisomers".format(len(compounds_r2), len(compounds)))
if Chemotypes_count > 0:
if normalizeChemotypes:
print("=======================================================")
compounds_r3_ = normChemotypes(compounds_r2, normalizeChemotypes=True, printlogs=False)
print("{}/{} structures are NOT chemotypes".format(len(compounds_r3_)-Chemotypes_count, len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were normalized \n".format(Chemotypes_count, len(compounds)))
else:
print("=======================================================")
compounds_r3_ = normChemotypes(compounds_r2, normalizeChemotypes=False, printlogs=False)
print("{}/{} structures are NOT chemotypes".format(len(compounds_r3_), len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) but was/were NOT normalized \n".format(Chemotypes_count, len(compounds)))
else:
print("=======================================================")
print("{}/{} structures are NOT chemotypes \n".format(len(compounds_r2), len(compounds)))
if Tautomers_count > 0:
if deTautomerize:
print("=======================================================")
compounds_r4_ = normTautomers(compounds_r2, deTautomerize=True, printlogs=False)
print("{}/{} structures are NOT tautomers".format(len(compounds_r4_)-Tautomers_count, len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) BUT was/were detautomerized \n".format(Tautomers_count, len(compounds)))
else:
print("=======================================================")
compounds_r4_ = normTautomers(compounds_r2, deTautomerize=False, printlogs=False)
print("{}/{} structures are NOT tautomers".format(len(compounds_r4_), len(compounds)))
print("{}/{} structure(s) is/are tautomer(s) but was/were NOT detautomerized \n".format(Tautomers_count, len(compounds)))
else:
print("=======================================================")
print("{}/{} structures are NOT tautomers \n".format(len(compounds_r2), len(compounds)))
#------------------------
NormalizedList = compounds_r4
UnNormalizedList = UnverifiedList + TautomersList + StereoisomersList
UnNormalizedLabel = len(UnverifiedList)*["UnverifiedStruct"] + len(ChemotypesList)*["Chemotype"] + len(TautomersList)*["Tautomer"] + len(StereoisomersList)*["Stereoisomer"]
FunctionLabel = len(UnverifiedList)*["molStructVerify()"] + len(ChemotypesList)*["normChemotypes()"] + len(TautomersList)*["normTautomers()"] + len(StereoisomersList)*["normStereoisomers()"]
IdxLabel = UnverifiedIdxList + ChemotypesIdxList + TautomersIdxList + StereoisomersIdxList
df1 = pd.DataFrame(zip(UnNormalizedList, UnNormalizedLabel, FunctionLabel, IdxLabel), columns=['SMILES', 'errorTag', 'fromFunction', 'idx'])
#------------------------
if printlogs:
print("=======================================================")
print("SUMMARY:")
if len(UnverifiedList) > 0:
print("{}/{} structures were successfully verfied".format(len(compounds_r1), len(compounds)))
print("{}/{} structure(s) was/were unsuccessfully verfied and need to be rechecked".format(len(UnverifiedList), len(compounds)))
else:
print("{}/{} structure were successfully verfied".format(len(compounds_r1), len(compounds)))
if len(UnNormalizedList) > 0:
print("{}/{} structures were successfully normalized".format(len(NormalizedList), len(compounds)))
if len(compounds_r1) > len(NormalizedList):
print("{}/{} structure(s) was/were unsuccessfully normalized and need to be rechecked".format(len(compounds_r1)-len(NormalizedList), len(compounds)))
print("=======================================================")
else:
print("{}/{} structures were successfully normalized".format(len(NormalizedList), len(compounds)))
print("-------------------------------------------------------")
if len(UnNormalizedList) > 0:
if getUnnormalizedStruct == False:
print("set 'getUnnormalizedStruct=True' to get the list of all unnormalized structures. \n")
#------------------------
if getUnnormalizedStruct:
if exportCSV:
filePath = outputPath + "UnnormalizedList.csv"
if os.path.isdir(outputPath):
df1.to_csv(filePath, index=False)
else:
os.makedirs(outputPath)
df1.to_csv(filePath, index=False)
else:
return df1
else:
if removeDuplicates:
DeduplicatedNormalizedList = molDeduplicate(NormalizedList, printlogs=False)
_, DuplicatedNormalizedIdxList = molDeduplicate(NormalizedList, getDuplicates=True, getDuplicatesIdx=True, printlogs=False)
if len(DeduplicatedNormalizedList) == len(NormalizedList):
if printlogs:
print("No duplicate was found (in {} normalized structures)".format(len(NormalizedList)))
if getDuplicatedIdx:
df0 = pd.DataFrame(DuplicatedNormalizedIdxList)
df0.columns = ['idx', 'matchedIdx']
df0['fromFunction'] = 'normalizeComplete()'
if exportCSV:
filePath = outputPath + "DuplicatedNormalizedIdxList.csv"
if os.path.isdir(outputPath):
df0.to_csv(filePath, index=False)
else:
os.makedirs(outputPath)
df0.to_csv(filePath, index=False)
else:
return df0
else:
df0 = pd.DataFrame(DeduplicatedNormalizedList)
df0.columns = ['SMILES']
if exportCSV:
filePath = outputPath + "NoDuplicatedNormalizedIdxList.csv"
if os.path.isdir(outputPath):
df0.to_csv(filePath, index=False)
else:
os.makedirs(outputPath)
df0.to_csv(filePath, index=False)
else:
return df0
else:
if printlogs:
print("=============================================================================")
print("There are {} unique structures filtered from {} initial normalized structures".format(len(DeduplicatedNormalizedList), len(NormalizedList)))
print("=============================================================================")
print("To get detailed information, please follow steps below:")
print("(1) Rerun normalizeComplete() with setting 'removeDuplicates=False' to get the list of all normalized structures")
print("(2) Run ultils.molDeduplicate() with setting 'getDuplicates=True'to get the list of duplicated structures \n")
print("--OR--")
print("Rerun normalizeComplete() with setting 'getDuplicates=True', 'exportCSV'=True, and 'outputPath=<Directory>' to export a csv file containing the list of duplicated structures \n")
print("--OR--")
print("Run ultils.molDeduplicate() with settings 'getDuplicates=True' and 'getDuplicatesIndex=True' to get the list of duplicated structures with detailed indices")
if getDuplicatedIdx:
df2 = pd.DataFrame(DuplicatedNormalizedIdxList)
df2.columns = ['idx', 'matchedIdx']
df2['fromFunction'] = 'normalizeComplete()'
if exportCSV:
filePath = outputPath + "DuplicatedNormalizedIdxList.csv"
if os.path.isdir(outputPath):
df2.to_csv(filePath, index=False)
else:
os.makedirs(outputPath)
df2.to_csv(filePath, index=False)
else:
return df2
else:
df2 = | pd.DataFrame(DeduplicatedNormalizedList) | pandas.DataFrame |
import itertools
from collections import OrderedDict, Counter
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt # necessary?
# import seaborn as sns
import scipy.spatial.distance as sdist
import scipy.stats as stats
import scipy.sparse as sparse
from sklearn.neighbors import NearestNeighbors, kneighbors_graph
import fastcluster
import networkx
from networkx.drawing.nx_agraph import graphviz_layout
import plotly
import plotly.graph_objs as go
import common
import sparse_dataframe
import matplotlib.figure
import matplotlib.colors
from matplotlib.patches import Circle
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.legend_handler import HandlerPatch
class HandlerCircle(HandlerPatch):
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
p = Circle(xy=center, radius=width / 4.0, alpha=0.4)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
def plot_labelprop_mpl(coords, communities, file_name=None, title=''):
u_community = np.unique(communities)
cmap = matplotlib.cm.tab20
cmap.set_over('black')
ix = np.random.permutation(np.arange(coords.shape[0], dtype=int))
x = coords[ix, 0]
y = coords[ix, 1]
fig = matplotlib.figure.Figure(figsize=(12, 12))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.scatter(x, y, s=60, alpha=0.8, linewidth=0,
color=cmap(communities[ix]))
ax.tick_params(left='off', labelleft='off', bottom='off', labelbottom='off')
ax.set_title(title)
lbl_rects = [(Circle((0, 0), 1, color=cmap(c)), c) for c in u_community]
fig.legend(*zip(*lbl_rects), **{'handler_map': {Circle: HandlerCircle()},
'loc': 7, 'fontsize': 'large'})
if file_name:
FigureCanvasAgg(fig).print_figure(file_name)
def plot_labelprop_mpl_tissues(coords, tissue_list, file_name=None, title=''):
u_tissue = set(tissue_list)
tissue_d = {t:i for i,t in enumerate(u_tissue)}
cmap = matplotlib.cm.tab20
cmap.set_over('black')
ix = np.random.permutation(np.arange(coords.shape[0], dtype=int))
x = coords[ix, 0]
y = coords[ix, 1]
fig = matplotlib.figure.Figure(figsize=(12, 12))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.scatter(x, y, s=60, alpha=0.8, linewidth=0,
color=[cmap(tissue_d[tissue_list[i]]) for i in ix])
ax.tick_params(left='off', labelleft='off', bottom='off', labelbottom='off')
ax.set_title(title)
lbl_rects = [(Circle((0, 0), 1, color=cmap(tissue_d[t])), t) for t in u_tissue]
fig.legend(*zip(*lbl_rects), **{'handler_map': {Circle: HandlerCircle()},
'loc': 7, 'fontsize': 'large'})
if file_name:
FigureCanvasAgg(fig).print_figure(file_name)
def label_propagation(G, verbose=False):
node_labels = {node: i for i, node in enumerate(G.nodes())}
n_changes = 1
while n_changes:
n_changes = 0
for node in G.nodes():
neighbor_labels = Counter([node_labels[n] for n in G.neighbors(node)])
pop_label = neighbor_labels.most_common(1)[0][0]
if node_labels[node] != pop_label:
node_labels[node] = pop_label
n_changes += 1
if (verbose):
print("Round with ", n_changes, " to labels.")
labels = list(set(node_labels.values()))
label_renames = {label: i for i, (label, c)
in enumerate(Counter(node_labels.values()).most_common())}
for node in node_labels:
node_labels[node] = label_renames[node_labels[node]]
if (verbose):
print("Most common labels, in the form label, frequency")
print(Counter(node_labels.values()).most_common())
return node_labels
def network_layout(matrix, k=30):
nbrs = NearestNeighbors(k, algorithm='brute', metric='cosine').fit(matrix)
G = networkx.from_scipy_sparse_matrix(nbrs.kneighbors_graph(matrix))
node_labels = label_propagation(G, verbose=True)
communities_labelprop = np.array([node_labels[i] for i in range(matrix.shape[0])])
pos = graphviz_layout(G, prog="sfdp")
coords = np.array([pos[i] for i in range(len(pos))])
print(coords.shape)
return coords, communities_labelprop
def plot_labelprop(coords, communities, filename=None, title=''):
scatters = []
n_communities = len(np.unique(communities))
scatter = go.Scatter(x=coords[0],
y=coords[1],
mode='markers',
name=communities,
hoverinfo='text',
text=communities,
marker=dict(color=communities,
cmin=0.0, cmax=n_communities - 1,
colorscale='Viridis'
))
fig = {
'data': [scatter],
"layout": go.Layout(title=f'Graph layout of cell type clusters: {title}',
xaxis={'showticklabels': False},
yaxis={'showticklabels': False},
hovermode='closest', dragmode='select',
show_legend=True)
}
if filename is None:
plotly.offline.iplot(fig)
else:
plotly.offline.plot(fig, filename=filename, image='png')
def tenx_diff_exp(matrix, columns, communities):
diff_expr_dfs = []
for c in np.unique(communities):
group1 = (communities == c)
group2 = (communities != c)
diff_expr_df = sparse_diff_exp(matrix,
group1, group2,
columns).sort_values('p')
diff_expr_df['community'] = c
diff_expr_dfs.append(diff_expr_df)
diff_expr = pd.concat(diff_expr_dfs)
print(diff_expr.shape)
print(diff_expr.head())
return diff_expr
def tenx_diff_exp_all(tenx_data, communities):
diff_expr_dfs = []
for c1, c2 in itertools.combinations(np.unique(communities), 2):
group1 = (communities == c1)
group2 = (communities == c2)
diff_expr_df = sparse_diff_exp(tenx_data.genes.matrix,
group1, group2, tenx_data.genes.columns).sort_values('p')
diff_expr_df['community1'] = c1
diff_expr_df['community2'] = c2
diff_expr_dfs.append(diff_expr_df)
diff_expr = pd.concat(diff_expr_dfs)
print(diff_expr.shape)
print(diff_expr.head())
return diff_expr
def diff_exp(counts, group1, group2):
"""Computes differential expression between group 1 and group 2
for each column in the dataframe counts.
Returns a dataframe of Z-scores and p-values."""
mean_diff = counts.loc[group1].mean() - counts.loc[group2].mean()
pooled_sd = np.sqrt(counts.loc[group1].var() / len(group1)
+ counts.loc[group2].var() / len(group2))
z_scores = mean_diff / pooled_sd
z_scores = z_scores.fillna(0)
# t-test
p_vals = (1 - stats.norm.cdf(np.abs(z_scores))) * 2
df = pd.DataFrame({'z': z_scores})
df['p'] = p_vals
return df
def sparse_diff_exp(matrix, group1, group2, index):
"""Computes differential expression between group 1 and group 2
for each column in the dataframe counts.
Returns a dataframe of Z-scores and p-values."""
g1 = matrix[group1, :]
g2 = matrix[group2, :]
g1mu = np.asarray(g1.mean(0)).flatten()
g2mu = np.asarray(g2.mean(0)).flatten()
mean_diff = g1mu - g2mu
# E[X^2] - (E[X])^2
pooled_sd = np.sqrt(((g1.power(2)).mean(0) - g1mu ** 2) / len(group1)
+ ((g2.power(2)).mean(0) - g2mu ** 2) / len(group2))
pooled_sd = np.asarray(pooled_sd).flatten()
z_scores = np.zeros_like(pooled_sd)
nz = pooled_sd > 0
z_scores[nz] = np.nan_to_num(mean_diff[nz] / pooled_sd[nz])
# t-test
p_vals = np.clip((1 - stats.norm.cdf(np.abs(z_scores))) * 2 * matrix.shape[1], 0, 1)
df = pd.DataFrame(OrderedDict([('z', z_scores), ('p', p_vals),
('mean1', g1mu), ('mean2', g2mu)]),
index=index)
return df
def run_tissue(tissue, data_folder, samples, k, channels_to_drop = []):
genes_to_drop = 'Malat1|Rn45s|Rpl10|Rpl10a|Rpl10l|Rpl11|Rpl12|Rpl13|Rpl13a|Rpl14|Rpl15|Rpl17|Rpl18|Rpl18a|Rpl19|Rpl21|Rpl22|Rpl22l1|Rpl23|Rpl23a|Rpl24|Rpl26|Rpl27|Rpl27a|Rpl28|Rpl29|Rpl3|Rpl30|Rpl31|Rpl31-ps12|Rpl32|Rpl34|Rpl34-ps1|Rpl35|Rpl35a|Rpl36|Rpl36a|Rpl36al|Rpl37|Rpl37a|Rpl38|Rpl39|Rpl39l|Rpl3l|Rpl4|Rpl41|Rpl5|Rpl6|Rpl7|Rpl7a|Rpl7l1|Rpl8|Rpl9|Rplp0|Rplp1|Rplp2|Rplp2-ps1|Rps10|Rps11|Rps12|Rps13|Rps14|Rps15|Rps15a|Rps15a-ps4|Rps15a-ps6|Rps16|Rps17|Rps18|Rps19|Rps19-ps3|Rps19bp1|Rps2|Rps20|Rps21|Rps23|Rps24|Rps25|Rps26|Rps27|Rps27a|Rps27l|Rps28|Rps29|Rps3|Rps3a|Rps4x|Rps4y2|Rps5|Rps6|Rps6ka1|Rps6ka2|Rps6ka3|Rps6ka4|Rps6ka5|Rps6ka6|Rps6kb1|Rps6kb2|Rps6kc1|Rps6kl1|Rps7|Rps8|Rps9|Rpsa'.split(
'|')
file_prefix = data_folder + '/10x_data/tissues/'
file_suffix = f'-{tissue}-{samples}-{k}'
tenx = common.TenX_Runs(data_folder, tissue=tissue, verbose=True, genes_to_drop=genes_to_drop,
channels_to_drop=channels_to_drop)
skip = tenx.genes.matrix.shape[0] // samples
skip = max(skip, 1)
top_genes = pd.DataFrame(index=tenx.genes.columns, data=np.asarray(tenx.genes.matrix.mean(axis=0)).flatten())[
0].nlargest(10)
top_genes = pd.DataFrame(top_genes)
top_genes.columns = ['Mean UMIs']
top_genes.to_csv(f'{tissue} top genes')
coords, communities_labelprop = network_layout(tenx.genes.matrix[::skip], k=k)
coords_df = pd.DataFrame({'0': coords[:, 0], '1': coords[:, 1], 'cluster': communities_labelprop},
index=tenx.genes.rows[::skip])
coords_df.to_csv(file_prefix + 'smushed' + file_suffix + '.csv')
plot_labelprop_mpl(coords, communities_labelprop, title=f'{tissue}: Graph layout of clusters',
file_name=file_prefix + 'embedding' + file_suffix + '.png')
diff_expr_df = tenx_diff_exp(tenx.genes[::skip, :], tenx.genes.columns, communities_labelprop)
de = diff_expr_df[diff_expr_df['p'] < 0.001]
de = de[de['z'] > 0]
de['scale'] = np.abs(de['mean1'] - de['mean2'])
bigten = de.groupby('community')['mean1'].nlargest(50)
bigten = | pd.DataFrame(bigten) | pandas.DataFrame |
import pandas as pd
def row_to_tex(row):
if 'good_cand_ibata' in row.index:
flag = '' if pd.isnull(row['good_cand_ibata']) else row['good_cand_ibata']
else:
flag = ''
string = str(row['source_id']) + ' & '
string += "${0:.3f}$ & ".format(row['ra'])
string += "${0:.3f}$ & ".format(row['dec'])
string += "{0:.3f} & ".format(row['g0'])
string += "{0:.3f} & ".format(row['bp_rp0'])
string += "${0:.2f} \pm {1:.2f}$ & ".format(row['parallax'], row['parallax_error'])
string += "${0:.2f} \pm {1:.2f}$ & ".format(row['pmra'], row['pmra_error'])
string += "${0:.2f} \pm {1:.2f}$ & ".format(row['pmdec'], row['pmdec_error'])
if 'v_hel' in row.index:
if pd.isnull(row['v_hel']):
string += ''
else:
string += "${0:.2f} \pm {1:.2f}$".format(row['v_hel'], row['v_hel_error'])
if ~ | pd.isnull(row['other_id']) | pandas.isnull |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from pandas.core.index import Index, Factor, MultiIndex, NULL_INDEX
from pandas.util.testing import assert_almost_equal
import pandas.util.testing as tm
import pandas._tseries as tseries
class TestIndex(unittest.TestCase):
def setUp(self):
self.strIndex = tm.makeStringIndex(100)
self.dateIndex = tm.makeDateIndex(100)
self.intIndex = tm.makeIntIndex(100)
self.empty = Index([])
self.tuples = Index(zip(['foo', 'bar', 'baz'], [1, 2, 3]))
def test_hash_error(self):
self.assertRaises(TypeError, hash, self.strIndex)
def test_deepcopy(self):
from copy import deepcopy
copy = deepcopy(self.strIndex)
self.assert_(copy is self.strIndex)
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertRaises(Exception, idx._verify_integrity)
def test_sort(self):
self.assertRaises(Exception, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(Exception, self.strIndex.__setitem__, 5, 0)
self.assertRaises(Exception, self.strIndex.__setitem__, slice(1,5), 0)
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = arr.view(Index)
tm.assert_contains_all(arr, index)
self.assert_(np.array_equal(self.strIndex, index))
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(Exception, Index, 0)
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assert_(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_asOfDate(self):
d = self.dateIndex[0]
self.assert_(self.dateIndex.asOfDate(d) is d)
self.assert_(self.dateIndex.asOfDate(d - timedelta(1)) is None)
d = self.dateIndex[-1]
self.assert_(self.dateIndex.asOfDate(d + timedelta(1)) is d)
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_(np.array_equal(result, expected))
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assert_(isinstance(index_result, np.ndarray))
self.assert_(not isinstance(index_result, Index))
self.assert_(np.array_equal(arr_result, index_result))
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
tm.assert_dict_equal(tseries.map_indices(subIndex),
subIndex.indexMap)
subIndex = self.strIndex[list(boolIdx)]
tm.assert_dict_equal(tseries.map_indices(subIndex),
subIndex.indexMap)
def test_fancy(self):
sl = self.strIndex[[1,2,3]]
for i in sl:
self.assertEqual(i, sl[sl.indexMap[i]])
def test_getitem(self):
arr = np.array(self.dateIndex)
self.assertEquals(self.dateIndex[5], arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assert_(shifted is self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_(np.array_equal(shifted, self.dateIndex + timedelta(5)))
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assert_(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assert_(inter is first)
# non-iterable input
self.assertRaises(Exception, first.intersection, 0.5)
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assert_(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assert_(union is first)
union = first.union([])
self.assert_(union is first)
# non-iterable input
self.assertRaises(Exception, first.union, 0.5)
def test_add(self):
firstCat = self.strIndex + self.dateIndex
secondCat = self.strIndex + self.strIndex
self.assert_(tm.equalContents(np.append(self.strIndex,
self.dateIndex), firstCat))
self.assert_(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat.indexMap)
tm.assert_contains_all(self.strIndex, secondCat.indexMap)
tm.assert_contains_all(self.dateIndex, firstCat.indexMap)
# this is valid too
shifted = self.dateIndex + timedelta(1)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assert_('a' not in index2.indexMap)
self.assert_('afoo' in index2.indexMap)
def test_diff(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
result = first - second
self.assert_(tm.equalContents(result, answer))
diff = first.diff(first)
self.assert_(len(diff) == 0)
# non-iterable input
self.assertRaises(Exception, first.diff, 0.5)
def test_pickle(self):
def testit(index):
pickled = pickle.dumps(index)
unpickled = pickle.loads(pickled)
self.assert_(isinstance(unpickled, Index))
self.assert_(np.array_equal(unpickled, index))
tm.assert_dict_equal(unpickled.indexMap, index.indexMap)
testit(self.strIndex)
testit(self.dateIndex)
# def test_always_get_null_index(self):
# empty = Index([])
# self.assert_(empty is NULL_INDEX)
# self.assert_(self.dateIndex[15:15] is NULL_INDEX)
def test_is_all_dates(self):
self.assert_(self.dateIndex.is_all_dates())
self.assert_(not self.strIndex.is_all_dates())
self.assert_(not self.intIndex.is_all_dates())
def test_summary(self):
self._check_method_works(Index.summary)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = str(index[0])
self.assertEquals(formatted, expected)
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assert_(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1, r2 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
assert_almost_equal(r2, [True, True, False])
r1, r2 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
assert_almost_equal(r2, [False, True, True, True, True])
rffill1, rffill2 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
assert_almost_equal(r2, rffill2)
r1, r2 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
assert_almost_equal(r2, [True, True, True, True, True])
rbfill1, rbfill2 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
assert_almost_equal(r2, rbfill2)
def test_slice_locs(self):
idx = | Index([0, 1, 2, 5, 6, 7, 9, 10]) | pandas.core.index.Index |
from PySDDP.dessem.script.templates.dadger import DadgerTemplate
import pandas as pd
import os
from typing import IO
COMENTARIO = '&'
class Dadger(DadgerTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo Entdados do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.comentarios = list()
self.tm = dict()
self.sist = dict()
self.ree = dict()
self.uh = dict()
self.tviag = dict()
self.ut = dict()
self.usie = dict()
self.dp = dict()
self.de = dict()
self.cd = dict()
self.ri = dict()
self.ia = dict()
self.rd = dict()
self.rivar = dict()
self.it = dict()
self.gp = dict()
self.ni = dict()
self.ve = dict()
self.ci_ce = dict()
self.re = dict()
self.lu = dict()
self.fh = dict()
self.ft = dict()
self.fi = dict()
self.fe = dict()
self.fr = dict()
self.fc = dict()
self.ac = dict()
self.da = dict()
self.fp = dict()
self.ez = dict()
self.ag = dict()
self.mh = dict()
self.mt = dict()
self.tx = dict()
self.pq = dict()
self.secr = dict()
self.cr = dict()
self.r11 = dict()
self.vr = dict()
self.pd = dict()
self.vm = dict()
self.df = dict()
self.me = dict()
self.meta_cjsist = dict()
self.meta_sist = dict()
self.meta_usit = dict()
self.sh = dict()
self.tf = dict()
self.rs = dict()
self.sp = dict()
self.ps = dict()
self.pp = dict()
def ler(self, file_name: str) -> None:
self.entdados = list()
# listas referentes ao dicionário TM
self.tm['mne'] = list()
self.tm['dd'] = list()
self.tm['hr'] = list()
self.tm['mh'] = list()
self.tm['durac'] = list()
self.tm['rede'] = list()
self.tm['patamar'] = list()
# listas referentes ao dicionário SIST
self.sist['mne'] = list()
self.sist['num'] = list()
self.sist['mne_iden'] = list()
self.sist['flag'] = list()
self.sist['nome'] = list()
# listas referentes ao dicionário REE
self.ree['mne'] = list()
self.ree['num_ree'] = list()
self.ree['num_sub'] = list()
self.ree['nome'] = list()
# listas referentes ao dicionário UH
self.uh['mne'] = list()
self.uh['ind'] = list()
self.uh['nome'] = list()
self.uh['ss'] = list()
self.uh['vinic'] = list()
self.uh['evap'] = list()
self.uh['di'] = list()
self.uh['hi'] = list()
self.uh['m'] = list()
self.uh['vmor'] = list()
self.uh['prod'] = list()
self.uh['rest'] = list()
# listas referentes ao dicionário TVIAG
self.tviag['mne'] = list()
self.tviag['mont'] = list()
self.tviag['jus'] = list()
self.tviag['tp'] = list()
self.tviag['hr'] = list()
self.tviag['tpTviag'] = list()
# listas referentes ao dicionário UT
self.ut['mne'] = list()
self.ut['num'] = list()
self.ut['nome'] = list()
self.ut['ss'] = list()
self.ut['flag'] = list()
self.ut['di'] = list()
self.ut['hi'] = list()
self.ut['mi'] = list()
self.ut['df'] = list()
self.ut['hf'] = list()
self.ut['mf'] = list()
self.ut['rest'] = list()
self.ut['gmin'] = list()
self.ut['gmax'] = list()
self.ut['g_anterior'] = list()
# listas referentes ao dicionário USIE
self.usie['mne'] = list()
self.usie['num'] = list()
self.usie['ss'] = list()
self.usie['nome'] = list()
self.usie['mont'] = list()
self.usie['jus'] = list()
self.usie['qmin'] = list()
self.usie['qmax'] = list()
self.usie['taxa_consumo'] = list()
# listas referentes ao dicionário DP
self.dp['mne'] = list()
self.dp['ss'] = list()
self.dp['di'] = list()
self.dp['hi'] = list()
self.dp['mi'] = list()
self.dp['df'] = list()
self.dp['hf'] = list()
self.dp['mf'] = list()
self.dp['demanda'] = list()
# listas referentes ao dicionário DE
self.de['mne'] = list()
self.de['nde'] = list()
self.de['di'] = list()
self.de['hi'] = list()
self.de['mi'] = list()
self.de['df'] = list()
self.de['hf'] = list()
self.de['mf'] = list()
self.de['demanda'] = list()
self.de['justific'] = list()
# listas referentes ao dicionário CD
self.cd['mne'] = list()
self.cd['is'] = list()
self.cd['cd'] = list()
self.cd['di'] = list()
self.cd['hi'] = list()
self.cd['mi'] = list()
self.cd['df'] = list()
self.cd['hf'] = list()
self.cd['mf'] = list()
self.cd['custo'] = list()
self.cd['limsup'] = list()
# listas referentes ao dicionário RI
self.ri['mne'] = list()
self.ri['di'] = list()
self.ri['hi'] = list()
self.ri['mi'] = list()
self.ri['df'] = list()
self.ri['hf'] = list()
self.ri['mf'] = list()
self.ri['gh50min'] = list()
self.ri['gh50max'] = list()
self.ri['gh60min'] = list()
self.ri['gh60max'] = list()
self.ri['ande'] = list()
# listas referentes ao dicionário IA
self.ia['mne'] = list()
self.ia['ss1'] = list()
self.ia['ss2'] = list()
self.ia['di'] = list()
self.ia['hi'] = list()
self.ia['mi'] = list()
self.ia['df'] = list()
self.ia['hf'] = list()
self.ia['mf'] = list()
self.ia['ss1_ss2'] = list()
self.ia['ss2_ss1'] = list()
# listas referentes ao dicionário RD
self.rd['mne'] = list()
self.rd['flag_fol'] = list()
self.rd['ncirc'] = list()
self.rd['dbar'] = list()
self.rd['lim'] = list()
self.rd['dlin'] = list()
self.rd['perd'] = list()
self.rd['formato'] = list()
# listas referentes ao dicionário RIVAR
self.rivar['mne'] = list()
self.rivar['num'] = list()
self.rivar['ss'] = list()
self.rivar['cod'] = list()
self.rivar['penalidade'] = list()
# listas referentes ao dicionário IT
self.it['mne'] = list()
self.it['num'] = list()
self.it['coef'] = list()
# listas referentes ao dicionário GP
self.gp['mne'] = list()
self.gp['tol_conv'] = list()
self.gp['tol_prob'] = list()
# listas referentes ao dicionário NI
self.ni['mne'] = list()
self.ni['flag'] = list()
self.ni['nmax'] = list()
# listas referentes ao dicionário VE
self.ve['mne'] = list()
self.ve['ind'] = list()
self.ve['di'] = list()
self.ve['hi'] = list()
self.ve['mi'] = list()
self.ve['df'] = list()
self.ve['hf'] = list()
self.ve['mf'] = list()
self.ve['vol'] = list()
# listas referentes ao dicionário CI/CE
self.ci_ce['mne'] = list()
self.ci_ce['num'] = list()
self.ci_ce['nome'] = list()
self.ci_ce['ss_busf'] = list()
self.ci_ce['flag'] = list()
self.ci_ce['di'] = list()
self.ci_ce['hi'] = list()
self.ci_ce['mi'] = list()
self.ci_ce['df'] = list()
self.ci_ce['hf'] = list()
self.ci_ce['mf'] = list()
self.ci_ce['unid'] = list()
self.ci_ce['linf'] = list()
self.ci_ce['lsup'] = list()
self.ci_ce['custo'] = list()
self.ci_ce['energia'] = list()
# listas referentes ao dicionário RE
self.re['mne'] = list()
self.re['ind'] = list()
self.re['di'] = list()
self.re['hi'] = list()
self.re['mi'] = list()
self.re['df'] = list()
self.re['hf'] = list()
self.re['mf'] = list()
# listas referentes ao dicionário LU
self.lu['mne'] = list()
self.lu['ind'] = list()
self.lu['di'] = list()
self.lu['hi'] = list()
self.lu['mi'] = list()
self.lu['df'] = list()
self.lu['hf'] = list()
self.lu['mf'] = list()
self.lu['linf'] = list()
self.lu['lsup'] = list()
# listas referentes ao dicionário FH
self.fh['mne'] = list()
self.fh['ind'] = list()
self.fh['di'] = list()
self.fh['hi'] = list()
self.fh['mi'] = list()
self.fh['df'] = list()
self.fh['hf'] = list()
self.fh['mf'] = list()
self.fh['ush'] = list()
self.fh['unh'] = list()
self.fh['fator'] = list()
# listas referentes ao dicionário FT
self.ft['mne'] = list()
self.ft['ind'] = list()
self.ft['di'] = list()
self.ft['hi'] = list()
self.ft['mi'] = list()
self.ft['df'] = list()
self.ft['hf'] = list()
self.ft['mf'] = list()
self.ft['ust'] = list()
self.ft['fator'] = list()
# listas referentes ao dicionário FI
self.fi['mne'] = list()
self.fi['ind'] = list()
self.fi['di'] = list()
self.fi['hi'] = list()
self.fi['mi'] = list()
self.fi['df'] = list()
self.fi['hf'] = list()
self.fi['mf'] = list()
self.fi['ss1'] = list()
self.fi['ss2'] = list()
self.fi['fator'] = list()
# listas referentes ao dicionário FE
self.fe['mne'] = list()
self.fe['ind'] = list()
self.fe['di'] = list()
self.fe['hi'] = list()
self.fe['mi'] = list()
self.fe['df'] = list()
self.fe['hf'] = list()
self.fe['mf'] = list()
self.fe['num_contrato'] = list()
self.fe['fator'] = list()
# listas referentes ao dicionário FR
self.fr['mne'] = list()
self.fr['ind'] = list()
self.fr['di'] = list()
self.fr['hi'] = list()
self.fr['mi'] = list()
self.fr['df'] = list()
self.fr['hf'] = list()
self.fr['mf'] = list()
self.fr['useol'] = list()
self.fr['fator'] = list()
# listas referentes ao dicionário FC
self.fc['mne'] = list()
self.fc['ind'] = list()
self.fc['di'] = list()
self.fc['hi'] = list()
self.fc['mi'] = list()
self.fc['df'] = list()
self.fc['hf'] = list()
self.fc['mf'] = list()
self.fc['demanda'] = list()
self.fc['fator'] = list()
# listas referentes ao dicionário AC
self.ac['mne'] = list()
self.ac['usi'] = list()
self.ac['mneumonico'] = list()
self.ac['ind'] = list()
self.ac['valor'] = list()
# listas referentes ao dicionário DA
self.da['mne'] = list()
self.da['ind'] = list()
self.da['di'] = list()
self.da['hi'] = list()
self.da['mi'] = list()
self.da['df'] = list()
self.da['hf'] = list()
self.da['mf'] = list()
self.da['taxa'] = list()
self.da['obs'] = list()
# listas referentes ao dicionário FP
self.fp['mne'] = list()
self.fp['usi'] = list()
self.fp['f'] = list()
self.fp['nptQ'] = list()
self.fp['nptV'] = list()
self.fp['concavidade'] = list()
self.fp['min_quadraticos'] = list()
self.fp['deltaV'] = list()
self.fp['tr'] = list()
# listas referentes ao dicionário EZ
self.ez['mne'] = list()
self.ez['usi'] = list()
self.ez['perc_vol'] = list()
# listas referentes ao dicionário AG
self.ag['mne'] = list()
self.ag['num_estagios'] = list()
# listas referentes ao dicionário MH
self.mh['mne'] = list()
self.mh['num'] = list()
self.mh['gr'] = list()
self.mh['id'] = list()
self.mh['di'] = list()
self.mh['hi'] = list()
self.mh['mi'] = list()
self.mh['df'] = list()
self.mh['hf'] = list()
self.mh['mf'] = list()
self.mh['f'] = list()
# listas referentes ao dicionário MT
self.mt['mne'] = list()
self.mt['ute'] = list()
self.mt['ug'] = list()
self.mt['di'] = list()
self.mt['hi'] = list()
self.mt['mi'] = list()
self.mt['df'] = list()
self.mt['hf'] = list()
self.mt['mf'] = list()
self.mt['f'] = list()
# listas referentes ao dicionário TX
self.tx['mne'] = list()
self.tx['taxa_fcf'] = list()
# listas referentes ao dicionário PQ
self.pq['mne'] = list()
self.pq['ind'] = list()
self.pq['nome'] = list()
self.pq['ss/b'] = list()
self.pq['di'] = list()
self.pq['hi'] = list()
self.pq['mi'] = list()
self.pq['df'] = list()
self.pq['hf'] = list()
self.pq['mf'] = list()
self.pq['geracao'] = list()
# listas referentes ao dicionário SECR
self.secr['mne'] = list()
self.secr['num'] = list()
self.secr['nome'] = list()
self.secr['usi_1'] = list()
self.secr['fator_1'] = list()
self.secr['usi_2'] = list()
self.secr['fator_2'] = list()
self.secr['usi_3'] = list()
self.secr['fator_3'] = list()
self.secr['usi_4'] = list()
self.secr['fator_4'] = list()
self.secr['usi_5'] = list()
self.secr['fator_5'] = list()
# listas referentes ao dicionário CR
self.cr['mne'] = list()
self.cr['num'] = list()
self.cr['nome'] = list()
self.cr['gr'] = list()
self.cr['A0'] = list()
self.cr['A1'] = list()
self.cr['A2'] = list()
self.cr['A3'] = list()
self.cr['A4'] = list()
self.cr['A5'] = list()
self.cr['A6'] = list()
# listas referentes ao dicionário R11
self.r11['mne'] = list()
self.r11['di'] = list()
self.r11['hi'] = list()
self.r11['mi'] = list()
self.r11['df'] = list()
self.r11['hf'] = list()
self.r11['mf'] = list()
self.r11['cotaIni'] = list()
self.r11['varhora'] = list()
self.r11['vardia'] = list()
self.r11['coef'] = list()
# listas referentes ao dicionário VR
self.vr['mne'] = list()
self.vr['dia'] = list()
self.vr['mneumo_verao'] = list()
# listas referentes ao dicionário PD
self.pd['mne'] = list()
self.pd['tol_perc'] = list()
self.pd['tol_MW'] = list()
# listas referentes ao dicionário VM
self.vm['mne'] = list()
self.vm['ind'] = list()
self.vm['di'] = list()
self.vm['hi'] = list()
self.vm['mi'] = list()
self.vm['df'] = list()
self.vm['hf'] = list()
self.vm['mf'] = list()
self.vm['taxa_enchimento'] = list()
# listas referentes ao dicionário DF
self.df['mne'] = list()
self.df['ind'] = list()
self.df['di'] = list()
self.df['hi'] = list()
self.df['mi'] = list()
self.df['df'] = list()
self.df['hf'] = list()
self.df['mf'] = list()
self.df['taxa_descarga'] = list()
# listas referentes ao dicionário ME
self.me['mne'] = list()
self.me['ind'] = list()
self.me['di'] = list()
self.me['hi'] = list()
self.me['mi'] = list()
self.me['df'] = list()
self.me['hf'] = list()
self.me['mf'] = list()
self.me['fator'] = list()
# listas referentes ao dicionário META CJSIST
self.meta_cjsist['mneumo'] = list()
self.meta_cjsist['ind'] = list()
self.meta_cjsist['nome'] = list()
# listas referentes ao dicionário META SIST
self.meta_sist['mne'] = list()
self.meta_sist['ind'] = list()
self.meta_sist['tp'] = list()
self.meta_sist['num'] = list()
self.meta_sist['meta'] = list()
self.meta_sist['tol_MW'] = list()
self.meta_sist['tol_perc'] = list()
# listas referentes ao dicionário META USIT
self.meta_usit['mne'] = list()
self.meta_usit['ind'] = list()
self.meta_usit['tp'] = list()
self.meta_usit['num'] = list()
self.meta_usit['meta'] = list()
self.meta_usit['tol_MW'] = list()
self.meta_usit['tol_perc'] = list()
# listas referentes ao dicionário SH
self.sh['mne'] = list()
self.sh['flag_simul'] = list()
self.sh['flag_pl'] = list()
self.sh['num_min'] = list()
self.sh['num_max'] = list()
self.sh['flag_quebra'] = list()
self.sh['ind_1'] = list()
self.sh['ind_2'] = list()
self.sh['ind_3'] = list()
self.sh['ind_4'] = list()
self.sh['ind_5'] = list()
# listas referentes ao dicionário TF
self.tf['mne'] = list()
self.tf['custo'] = list()
# listas referentes ao dicionário RS
self.rs['mne'] = list()
self.rs['cod'] = list()
self.rs['ind'] = list()
self.rs['subs'] = list()
self.rs['tp'] = list()
self.rs['comentario'] = list()
# listas referentes ao dicionário SP
self.sp['mne'] = list()
self.sp['flag'] = list()
# listas referentes ao dicionário PS
self.ps['mne'] = list()
self.ps['flag'] = list()
# listas referentes ao dicionário PP
self.pp['mne'] = list()
self.pp['flag'] = list()
self.pp['iteracoes'] = list()
self.pp['num'] = list()
self.pp['tp'] = list()
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
continua = True
while continua:
self.next_line(f)
linha = self.linha
if linha[0] == COMENTARIO:
self.comentarios.append(linha)
self.entdados.append(linha)
continue
mne = linha[:6].strip().lower()
mne_sigla = linha[:3].strip().lower()
mneumo = linha[:13].strip().lower()
self.entdados.append(linha[:6])
# Leitura dos dados de acordo com o mneumo correspondente
if mne_sigla == 'tm':
self.tm['mne'].append(self.linha[:2])
self.tm['dd'].append(self.linha[4:6])
self.tm['hr'].append(self.linha[9:11])
self.tm['mh'].append(self.linha[14:15])
self.tm['durac'].append(self.linha[19:24])
self.tm['rede'].append(self.linha[29:30])
self.tm['patamar'].append(self.linha[33:39])
continue
if mne == 'sist':
self.sist['mne'].append(self.linha[:6])
self.sist['num'].append(self.linha[7:9])
self.sist['mne_iden'].append(self.linha[10:12])
self.sist['flag'].append(self.linha[13:15])
self.sist['nome'].append(self.linha[16:26])
continue
if mne == 'ree':
self.ree['mne'].append(self.linha[:3])
self.ree['num_ree'].append(self.linha[6:8])
self.ree['num_sub'].append(self.linha[9:11])
self.ree['nome'].append(self.linha[12:22])
continue
if mne_sigla == 'uh':
self.uh['mne'].append(self.linha[:2])
self.uh['ind'].append(self.linha[4:7])
self.uh['nome'].append(self.linha[9:21])
self.uh['ss'].append(self.linha[24:26])
self.uh['vinic'].append(self.linha[29:39])
self.uh['evap'].append(self.linha[39:40])
self.uh['di'].append(self.linha[41:43])
self.uh['hi'].append(self.linha[44:46])
self.uh['m'].append(self.linha[47:48])
self.uh['vmor'].append(self.linha[49:59])
self.uh['prod'].append(self.linha[64:65])
self.uh['rest'].append(self.linha[69:70])
continue
if mne == 'tviag':
self.tviag['mne'].append(self.linha[:6])
self.tviag['mont'].append(self.linha[6:9])
self.tviag['jus'].append(self.linha[10:13])
self.tviag['tp'].append(self.linha[14:15])
self.tviag['hr'].append(self.linha[19:22])
self.tviag['tpTviag'].append(self.linha[24:25])
continue
if mne_sigla == 'ut':
self.ut['mne'].append(self.linha[:2])
self.ut['num'].append(self.linha[4:7])
self.ut['nome'].append(self.linha[9:21])
self.ut['ss'].append(self.linha[22:24])
self.ut['flag'].append(self.linha[25:26])
self.ut['di'].append(self.linha[27:29])
self.ut['hi'].append(self.linha[30:32])
self.ut['mi'].append(self.linha[33:34])
self.ut['df'].append(self.linha[35:37])
self.ut['hf'].append(self.linha[38:40])
self.ut['mf'].append(self.linha[41:42])
self.ut['rest'].append(self.linha[46:47])
self.ut['gmin'].append(self.linha[47:57])
self.ut['gmax'].append(self.linha[57:67])
self.ut['g_anterior'].append(self.linha[67:77])
continue
if mne == 'usie':
self.usie['mne'].append(self.linha[:4])
self.usie['num'].append(self.linha[5:8])
self.usie['ss'].append(self.linha[9:11])
self.usie['nome'].append(self.linha[14:26])
self.usie['mont'].append(self.linha[29:32])
self.usie['jus'].append(self.linha[34:37])
self.usie['qmin'].append(self.linha[39:49])
self.usie['qmax'].append(self.linha[49:59])
self.usie['taxa_consumo'].append(self.linha[59:69])
continue
if mne_sigla == 'dp':
self.dp['mne'].append(self.linha[:2])
self.dp['ss'].append(self.linha[4:6])
self.dp['di'].append(self.linha[8:10])
self.dp['hi'].append(self.linha[11:13])
self.dp['mi'].append(self.linha[14:15])
self.dp['df'].append(self.linha[16:18])
self.dp['hf'].append(self.linha[19:21])
self.dp['mf'].append(self.linha[22:23])
self.dp['demanda'].append(self.linha[24:34])
continue
if mne_sigla == 'de':
self.de['mne'].append(self.linha[:2])
self.de['nde'].append(self.linha[4:7])
self.de['di'].append(self.linha[8:10])
self.de['hi'].append(self.linha[11:13])
self.de['mi'].append(self.linha[14:15])
self.de['df'].append(self.linha[16:18])
self.de['hf'].append(self.linha[19:21])
self.de['mf'].append(self.linha[22:23])
self.de['demanda'].append(self.linha[24:34])
self.de['justific'].append(self.linha[35:45])
continue
if mne_sigla == 'cd':
self.cd['mne'].append(self.linha[:2])
self.cd['is'].append(self.linha[3:5])
self.cd['cd'].append(self.linha[6:8])
self.cd['di'].append(self.linha[9:11])
self.cd['hi'].append(self.linha[12:14])
self.cd['mi'].append(self.linha[15:16])
self.cd['df'].append(self.linha[17:19])
self.cd['hf'].append(self.linha[20:22])
self.cd['mf'].append(self.linha[23:24])
self.cd['custo'].append(self.linha[25:35])
self.cd['limsup'].append(self.linha[35:45])
continue
if mne_sigla == 'ri':
self.ri['mne'].append(self.linha[:2])
self.ri['di'].append(self.linha[8:10])
self.ri['hi'].append(self.linha[11:13])
self.ri['mi'].append(self.linha[14:15])
self.ri['df'].append(self.linha[16:18])
self.ri['hf'].append(self.linha[19:21])
self.ri['mf'].append(self.linha[22:23])
self.ri['gh50min'].append(self.linha[26:36])
self.ri['gh50max'].append(self.linha[36:46])
self.ri['gh60min'].append(self.linha[46:56])
self.ri['gh60max'].append(self.linha[56:66])
self.ri['ande'].append(self.linha[66:76])
continue
if mne_sigla == 'ia':
self.ia['mne'].append(self.linha[:2])
self.ia['ss1'].append(self.linha[4:6])
self.ia['ss2'].append(self.linha[9:11])
self.ia['di'].append(self.linha[13:15])
self.ia['hi'].append(self.linha[16:18])
self.ia['mi'].append(self.linha[19:20])
self.ia['df'].append(self.linha[21:23])
self.ia['hf'].append(self.linha[24:26])
self.ia['mf'].append(self.linha[27:28])
self.ia['ss1_ss2'].append(self.linha[29:39])
self.ia['ss2_ss1'].append(self.linha[39:49])
continue
if mne_sigla == 'rd':
self.rd['mne'].append(self.linha[:2])
self.rd['flag_fol'].append(self.linha[4:5])
self.rd['ncirc'].append(self.linha[8:12])
self.rd['dbar'].append(self.linha[14:15])
self.rd['lim'].append(self.linha[16:17])
self.rd['dlin'].append(self.linha[18:19])
self.rd['perd'].append(self.linha[20:21])
self.rd['formato'].append(self.linha[22:23])
continue
if mne == 'rivar':
self.rivar['mne'].append(self.linha[:5])
self.rivar['num'].append(self.linha[7:10])
self.rivar['ss'].append(self.linha[11:14])
self.rivar['cod'].append(self.linha[15:17])
self.rivar['penalidade'].append(self.linha[19:29])
continue
if mne_sigla == 'it':
self.it['mne'].append(self.linha[:2])
self.it['num'].append(self.linha[4:6])
self.it['coef'].append(self.linha[9:84])
continue
if mne_sigla == 'gp':
self.gp['mne'].append(self.linha[:2])
self.gp['tol_conv'].append(self.linha[4:14])
self.gp['tol_prob'].append(self.linha[15:25])
continue
if mne_sigla == 'ni':
self.ni['mne'].append(self.linha[:2])
self.ni['flag'].append(self.linha[4:5])
self.ni['nmax'].append(self.linha[9:12])
continue
if mne_sigla == 've':
self.ve['mne'].append(self.linha[:2])
self.ve['ind'].append(self.linha[4:7])
self.ve['di'].append(self.linha[8:10])
self.ve['hi'].append(self.linha[11:13])
self.ve['mi'].append(self.linha[14:15])
self.ve['df'].append(self.linha[16:18])
self.ve['hf'].append(self.linha[19:21])
self.ve['mf'].append(self.linha[22:23])
self.ve['vol'].append(self.linha[24:34])
continue
if mne_sigla == 'ci' or mne_sigla == 'ce':
self.ci_ce['mne'].append(self.linha[:2])
self.ci_ce['num'].append(self.linha[3:6])
self.ci_ce['nome'].append(self.linha[7:17])
self.ci_ce['ss_busf'].append(self.linha[18:23])
self.ci_ce['flag'].append(self.linha[23:24])
self.ci_ce['di'].append(self.linha[25:27])
self.ci_ce['hi'].append(self.linha[28:30])
self.ci_ce['mi'].append(self.linha[31:32])
self.ci_ce['df'].append(self.linha[33:35])
self.ci_ce['hf'].append(self.linha[36:38])
self.ci_ce['mf'].append(self.linha[39:40])
self.ci_ce['unid'].append(self.linha[41:42])
self.ci_ce['linf'].append(self.linha[43:53])
self.ci_ce['lsup'].append(self.linha[53:63])
self.ci_ce['custo'].append(self.linha[63:73])
self.ci_ce['energia'].append(self.linha[73:83])
continue
if mne_sigla == 're':
self.re['mne'].append(self.linha[:2])
self.re['ind'].append(self.linha[4:7])
self.re['di'].append(self.linha[9:11])
self.re['hi'].append(self.linha[12:14])
self.re['mi'].append(self.linha[15:16])
self.re['df'].append(self.linha[17:19])
self.re['hf'].append(self.linha[20:22])
self.re['mf'].append(self.linha[23:24])
continue
if mne_sigla == 'lu':
self.lu['mne'].append(self.linha[:2])
self.lu['ind'].append(self.linha[4:7])
self.lu['di'].append(self.linha[8:10])
self.lu['hi'].append(self.linha[11:13])
self.lu['mi'].append(self.linha[14:15])
self.lu['df'].append(self.linha[16:18])
self.lu['hf'].append(self.linha[19:21])
self.lu['mf'].append(self.linha[22:23])
self.lu['linf'].append(self.linha[24:34])
self.lu['lsup'].append(self.linha[34:44])
continue
if mne_sigla == 'fh':
self.fh['mne'].append(self.linha[:2])
self.fh['ind'].append(self.linha[4:7])
self.fh['di'].append(self.linha[8:10])
self.fh['hi'].append(self.linha[11:13])
self.fh['mi'].append(self.linha[14:15])
self.fh['df'].append(self.linha[16:18])
self.fh['hf'].append(self.linha[19:21])
self.fh['mf'].append(self.linha[22:23])
self.fh['ush'].append(self.linha[24:27])
self.fh['unh'].append(self.linha[27:29])
self.fh['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'ft':
self.ft['mne'].append(self.linha[:2])
self.ft['ind'].append(self.linha[4:7])
self.ft['di'].append(self.linha[8:10])
self.ft['hi'].append(self.linha[11:13])
self.ft['mi'].append(self.linha[14:15])
self.ft['df'].append(self.linha[16:18])
self.ft['hf'].append(self.linha[19:21])
self.ft['mf'].append(self.linha[22:23])
self.ft['ust'].append(self.linha[24:27])
self.ft['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fi':
self.fi['mne'].append(self.linha[:2])
self.fi['ind'].append(self.linha[4:7])
self.fi['di'].append(self.linha[8:10])
self.fi['hi'].append(self.linha[11:13])
self.fi['mi'].append(self.linha[14:15])
self.fi['df'].append(self.linha[16:18])
self.fi['hf'].append(self.linha[19:21])
self.fi['mf'].append(self.linha[22:23])
self.fi['ss1'].append(self.linha[24:26])
self.fi['ss2'].append(self.linha[29:31])
self.fi['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fe':
self.fe['mne'].append(self.linha[:2])
self.fe['ind'].append(self.linha[4:7])
self.fe['di'].append(self.linha[8:10])
self.fe['hi'].append(self.linha[11:13])
self.fe['mi'].append(self.linha[14:15])
self.fe['df'].append(self.linha[16:18])
self.fe['hf'].append(self.linha[19:21])
self.fe['mf'].append(self.linha[22:23])
self.fe['num_contrato'].append(self.linha[24:27])
self.fe['fator'].append(self.linha[34:44])
continue
if mne_sigla == 'fr':
self.fr['mne'].append(self.linha[:2])
self.fr['ind'].append(self.linha[4:9])
self.fr['di'].append(self.linha[10:12])
self.fr['hi'].append(self.linha[13:15])
self.fr['mi'].append(self.linha[16:17])
self.fr['df'].append(self.linha[18:20])
self.fr['hf'].append(self.linha[21:23])
self.fr['mf'].append(self.linha[24:25])
self.fr['useol'].append(self.linha[26:31])
self.fr['fator'].append(self.linha[36:46])
continue
if mne_sigla == 'fc':
self.fc['mne'].append(self.linha[:2])
self.fc['ind'].append(self.linha[4:7])
self.fc['di'].append(self.linha[10:12])
self.fc['hi'].append(self.linha[13:15])
self.fc['mi'].append(self.linha[16:17])
self.fc['df'].append(self.linha[18:20])
self.fc['hf'].append(self.linha[21:23])
self.fc['mf'].append(self.linha[24:25])
self.fc['demanda'].append(self.linha[26:29])
self.fc['fator'].append(self.linha[36:46])
continue
if mne_sigla == 'ac':
self.ac['mne'].append(self.linha[:2])
self.ac['usi'].append(self.linha[4:7])
self.ac['mneumonico'].append(self.linha[9:15])
self.ac['ind'].append(self.linha[15:19])
self.ac['valor'].append(self.linha[19:])
continue
if mne_sigla == 'da':
self.da['mne'].append(self.linha[:2])
self.da['ind'].append(self.linha[4:7])
self.da['di'].append(self.linha[8:10])
self.da['hi'].append(self.linha[11:13])
self.da['mi'].append(self.linha[14:15])
self.da['df'].append(self.linha[16:18])
self.da['hf'].append(self.linha[19:21])
self.da['mf'].append(self.linha[22:23])
self.da['taxa'].append(self.linha[24:34])
self.da['obs'].append(self.linha[35:47])
continue
if mne_sigla == 'fp':
self.fp['mne'].append(self.linha[:2])
self.fp['usi'].append(self.linha[3:6])
self.fp['f'].append(self.linha[7:8])
self.fp['nptQ'].append(self.linha[10:13])
self.fp['nptV'].append(self.linha[15:18])
self.fp['concavidade'].append(self.linha[20:21])
self.fp['min_quadraticos'].append(self.linha[24:25])
self.fp['deltaV'].append(self.linha[29:39])
self.fp['tr'].append(self.linha[39:49])
continue
if mne_sigla == 'ez':
self.ez['mne'].append(self.linha[:2])
self.ez['usi'].append(self.linha[4:7])
self.ez['perc_vol'].append(self.linha[9:14])
continue
if mne_sigla == 'ag':
self.ag['mne'].append(self.linha[:2])
self.ag['num_estagios'].append(self.linha[3:6])
continue
if mne_sigla == 'mh':
self.mh['mne'].append(self.linha[:2])
self.mh['num'].append(self.linha[4:7])
self.mh['gr'].append(self.linha[9:11])
self.mh['id'].append(self.linha[12:14])
self.mh['di'].append(self.linha[14:16])
self.mh['hi'].append(self.linha[17:19])
self.mh['mi'].append(self.linha[20:21])
self.mh['df'].append(self.linha[22:24])
self.mh['hf'].append(self.linha[25:27])
self.mh['mf'].append(self.linha[28:29])
self.mh['f'].append(self.linha[30:31])
continue
if mne_sigla == 'mt':
self.mt['mne'].append(self.linha[:2])
self.mt['ute'].append(self.linha[4:7])
self.mt['ug'].append(self.linha[8:11])
self.mt['di'].append(self.linha[13:15])
self.mt['hi'].append(self.linha[16:18])
self.mt['mi'].append(self.linha[19:20])
self.mt['df'].append(self.linha[21:23])
self.mt['hf'].append(self.linha[24:26])
self.mt['mf'].append(self.linha[27:28])
self.mt['f'].append(self.linha[29:30])
continue
if mne_sigla == 'tx':
self.tx['mne'].append(self.linha[:2])
self.tx['taxa_fcf'].append(self.linha[4:14])
continue
if mne_sigla == 'pq':
self.pq['mne'].append(self.linha[:2])
self.pq['ind'].append(self.linha[4:7])
self.pq['nome'].append(self.linha[9:19])
self.pq['ss/b'].append(self.linha[19:24])
self.pq['di'].append(self.linha[24:26])
self.pq['hi'].append(self.linha[27:29])
self.pq['mi'].append(self.linha[30:31])
self.pq['df'].append(self.linha[32:34])
self.pq['hf'].append(self.linha[35:37])
self.pq['mf'].append(self.linha[38:39])
self.pq['geracao'].append(self.linha[40:50])
continue
if mne == 'secr':
self.secr['mne'].append(self.linha[:4])
self.secr['num'].append(self.linha[5:8])
self.secr['nome'].append(self.linha[9:21])
self.secr['usi_1'].append(self.linha[24:27])
self.secr['fator_1'].append(self.linha[28:33])
self.secr['usi_2'].append(self.linha[34:37])
self.secr['fator_2'].append(self.linha[38:43])
self.secr['usi_3'].append(self.linha[44:47])
self.secr['fator_3'].append(self.linha[48:53])
self.secr['usi_4'].append(self.linha[54:57])
self.secr['fator_4'].append(self.linha[58:63])
self.secr['usi_5'].append(self.linha[64:67])
self.secr['fator_5'].append(self.linha[68:73])
continue
if mne_sigla == 'cr':
self.cr['mne'].append(self.linha[:2])
self.cr['num'].append(self.linha[4:7])
self.cr['nome'].append(self.linha[9:21])
self.cr['gr'].append(self.linha[24:26])
self.cr['A0'].append(self.linha[27:42])
self.cr['A1'].append(self.linha[43:58])
self.cr['A2'].append(self.linha[59:74])
self.cr['A3'].append(self.linha[75:90])
self.cr['A4'].append(self.linha[91:106])
self.cr['A5'].append(self.linha[107:122])
self.cr['A6'].append(self.linha[123:138])
continue
if mne_sigla == 'r11':
self.r11['mne'].append(self.linha[:3])
self.r11['di'].append(self.linha[4:6])
self.r11['hi'].append(self.linha[7:9])
self.r11['mi'].append(self.linha[10:11])
self.r11['df'].append(self.linha[12:14])
self.r11['hf'].append(self.linha[15:17])
self.r11['mf'].append(self.linha[18:19])
self.r11['cotaIni'].append(self.linha[20:30])
self.r11['varhora'].append(self.linha[30:40])
self.r11['vardia'].append(self.linha[40:50])
self.r11['coef'].append(self.linha[59:164])
continue
if mne_sigla == 'vr':
self.vr['mne'].append(self.linha[:2])
self.vr['dia'].append(self.linha[4:6])
self.vr['mneumo_verao'].append(self.linha[9:12])
continue
if mne_sigla == 'pd':
self.pd['mne'].append(self.linha[:2])
self.pd['tol_perc'].append(self.linha[3:9])
self.pd['tol_MW'].append(self.linha[12:22])
continue
if mne_sigla == 'vm':
self.vm['mne'].append(self.linha[:2])
self.vm['ind'].append(self.linha[4:7])
self.vm['di'].append(self.linha[8:10])
self.vm['hi'].append(self.linha[11:13])
self.vm['mi'].append(self.linha[14:15])
self.vm['df'].append(self.linha[16:18])
self.vm['hf'].append(self.linha[19:21])
self.vm['mf'].append(self.linha[22:23])
self.vm['taxa_enchimento'].append(self.linha[24:34])
continue
if mne_sigla == 'df':
self.df['mne'].append(self.linha[:2])
self.df['ind'].append(self.linha[4:7])
self.df['di'].append(self.linha[8:10])
self.df['hi'].append(self.linha[11:13])
self.df['mi'].append(self.linha[14:15])
self.df['df'].append(self.linha[16:18])
self.df['hf'].append(self.linha[19:21])
self.df['mf'].append(self.linha[22:23])
self.df['taxa_descarga'].append(self.linha[24:34])
continue
if mne_sigla == 'me':
self.me['mne'].append(self.linha[:2])
self.me['ind'].append(self.linha[4:7])
self.me['di'].append(self.linha[8:10])
self.me['hi'].append(self.linha[11:13])
self.me['mi'].append(self.linha[14:15])
self.me['df'].append(self.linha[16:18])
self.me['hf'].append(self.linha[19:21])
self.me['mf'].append(self.linha[22:23])
self.me['fator'].append(self.linha[24:34])
continue
if mneumo == 'meta cjsist':
self.meta_cjsist['mneumo'].append(self.linha[:13])
self.meta_cjsist['ind'].append(self.linha[14:17])
self.meta_cjsist['nome'].append(self.linha[18:20])
continue
if mneumo == 'meta receb':
self.meta_sist['mne'].append(self.linha[:13])
self.meta_sist['ind'].append(self.linha[14:17])
self.meta_sist['tp'].append(self.linha[19:21])
self.meta_sist['num'].append(self.linha[22:23])
self.meta_sist['meta'].append(self.linha[24:34])
self.meta_sist['tol_MW'].append(self.linha[34:44])
self.meta_sist['tol_perc'].append(self.linha[44:54])
continue
if mneumo == 'meta gter':
self.meta_usit['mne'].append(self.linha[:13])
self.meta_usit['ind'].append(self.linha[14:17])
self.meta_usit['tp'].append(self.linha[19:21])
self.meta_usit['num'].append(self.linha[22:23])
self.meta_usit['meta'].append(self.linha[24:34])
self.meta_usit['tol_MW'].append(self.linha[34:44])
self.meta_usit['tol_perc'].append(self.linha[44:54])
continue
if mne_sigla == 'sh':
self.sh['mne'].append(self.linha[:2])
self.sh['flag_simul'].append(self.linha[4:5])
self.sh['flag_pl'].append(self.linha[9:10])
self.sh['num_min'].append(self.linha[14:17])
self.sh['num_max'].append(self.linha[19:22])
self.sh['flag_quebra'].append(self.linha[24:25])
self.sh['ind_1'].append(self.linha[29:32])
self.sh['ind_2'].append(self.linha[34:37])
self.sh['ind_3'].append(self.linha[39:42])
self.sh['ind_4'].append(self.linha[44:47])
self.sh['ind_5'].append(self.linha[49:52])
continue
if mne_sigla == 'tf':
self.tf['mne'].append(self.linha[:2])
self.tf['custo'].append(self.linha[4:14])
continue
if mne_sigla == 'rs':
self.rs['mne'].append(self.linha[:2])
self.rs['cod'].append(self.linha[3:6])
self.rs['ind'].append(self.linha[7:11])
self.rs['subs'].append(self.linha[12:16])
self.rs['tp'].append(self.linha[22:26])
self.rs['comentario'].append(self.linha[27:39])
continue
if mne_sigla == 'sp':
self.sp['mne'].append(self.linha[:2])
self.sp['flag'].append(self.linha[4:5])
continue
if mne_sigla == 'ps':
self.ps['mne'].append(self.linha[:2])
self.ps['flag'].append(self.linha[4:5])
continue
if mne_sigla == 'pp':
self.pp['mne'].append(self.linha[:2])
self.pp['flag'].append(self.linha[3:4])
self.pp['iteracoes'].append(self.linha[5:8])
self.pp['num'].append(self.linha[9:12])
self.pp['tp'].append(self.linha[13:14])
continue
except Exception as err:
if isinstance(err, StopIteration):
self.bloco_tm['df'] = pd.DataFrame(self.tm)
self.bloco_sist['df'] = pd.DataFrame(self.sist)
self.bloco_ree['df'] = pd.DataFrame(self.ree)
self.bloco_uh['df'] = pd.DataFrame(self.uh)
self.bloco_tviag['df'] = pd.DataFrame(self.tviag)
self.bloco_ut['df'] = pd.DataFrame(self.ut)
self.bloco_usie['df'] = pd.DataFrame(self.usie)
self.bloco_dp['df'] = pd.DataFrame(self.dp)
self.bloco_de['df'] = pd.DataFrame(self.de)
self.bloco_cd['df'] = pd.DataFrame(self.cd)
self.bloco_ri['df'] = pd.DataFrame(self.ri)
self.bloco_ia['df'] = pd.DataFrame(self.ia)
self.bloco_rd['df'] = pd.DataFrame(self.rd)
self.bloco_rivar['df'] = pd.DataFrame(self.rivar)
self.bloco_it['df'] = pd.DataFrame(self.it)
self.bloco_gp['df'] = pd.DataFrame(self.gp)
self.bloco_ni['df'] = pd.DataFrame(self.ni)
self.bloco_ve['df'] = pd.DataFrame(self.ve)
self.bloco_ci_ce['df'] = pd.DataFrame(self.ci_ce)
self.bloco_re['df'] = pd.DataFrame(self.re)
self.bloco_lu['df'] = pd.DataFrame(self.lu)
self.bloco_fh['df'] = pd.DataFrame(self.fh)
self.bloco_ft['df'] = pd.DataFrame(self.ft)
self.bloco_fi['df'] = pd.DataFrame(self.fi)
self.bloco_fe['df'] = pd.DataFrame(self.fe)
self.bloco_fr['df'] = pd.DataFrame(self.fr)
self.bloco_fc['df'] = pd.DataFrame(self.fc)
self.bloco_ac['df'] = pd.DataFrame(self.ac)
self.bloco_da['df'] = pd.DataFrame(self.da)
self.bloco_fp['df'] = pd.DataFrame(self.fp)
self.bloco_ez['df'] = pd.DataFrame(self.ez)
self.bloco_ag['df'] = pd.DataFrame(self.ag)
self.bloco_mh['df'] = pd.DataFrame(self.mh)
self.bloco_mt['df'] = pd.DataFrame(self.mt)
self.bloco_tx['df'] = | pd.DataFrame(self.tx) | pandas.DataFrame |
import numpy as np
import pandas as pd
import streamlit as st
import os
from datetime import datetime
from brother_ql.conversion import convert
from brother_ql.backends.helpers import send
from brother_ql.raster import BrotherQLRaster
from PIL import Image, ImageFont, ImageDraw
def search_families(df_people, lastname):
""" Return a DataFrame of all families with 'lastname'.
"""
# Get all people with lastname
lastnames = df_people[df_people['Last Name'].str.lower() == lastname.lower()]
# Get unique families
unique_families = lastnames['Family Members'].drop_duplicates()
return [
df_people[df_people['Family Members'] == unique_family]
for unique_family in unique_families
]
def display_family(family, df_people):
""" For a given family, list each each member as well as check in box.
When check in box clicked, record in df_people and show print button.
When print clicked, print label.
"""
for person in family['First Name']:
left, centre, right = st.columns(3)
left.write(person)
member_id = family[family['First Name']==person]['Member ID']
member_row = member_id.index[0]
checked_in_already = | pd.to_datetime(df_people.loc[member_row, "Checked In"]) | pandas.to_datetime |
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
companies = ['AAPL', 'GOOG', 'GOOGL', 'AMZN', 'TSLA', 'MSFT']
DATA_DIR = "./data/regression"
# TODO feature engineering
for ticker in companies:
print(f"regression: {ticker}")
data = | pd.read_csv(f"{DATA_DIR}/public-sentiment-{ticker}.csv") | pandas.read_csv |
"""
"""
from lists_and_dicts import *
from methods_and_classes import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import ticker
import seaborn as sns
# global parameters
pd.options.display.float_format = '{:,.2f}'.format
plt.style.use('my_style')
stock = pd.read_csv('data/stock_transactions.csv')
new = stock['alt_product_group & cc'].str.split(n=0, expand=True)
stock['cc'] = new[0]
stock['alternate_product_group'] = new[1]
stock['cost_value'] = stock['unit_cost'] * stock['quantity']
stock = stock.drop(columns=['alt_product_group & cc'])
num_invalid_apg = pd.isnull(stock['alternate_product_group']).sum()
print(f'You have {num_invalid_apg} records after the preprocessing where you find invalid "alternate_product_group"')
num_invalid_cc = pd.isnull(pd.to_numeric(stock['cc'], errors='coerce')).sum()
print(f'You have {num_invalid_cc} records after the preprocessing where you find invalid "cc"')
# transaction type 1: debit raw materials
chemical_sum = summarise_stock(stock, 1, 48, '50', 'quantity', np.sum)
oil_sum = summarise_stock(stock, 1, 40, '55', 'quantity', np.sum)
plastics_sum = summarise_stock(stock, 1, 60, '60', 'quantity', np.sum)
paper_sum = summarise_stock(stock, 1, 41, '80', 'quantity', np.sum)
tuffa_sum = summarise_stock(stock, 1, 51, '85', 'quantity', np.sum)
# concatenate
raw_mats_one = pd.DataFrame()
raw_mats_one = pd.concat([raw_mats_one, chemical_sum], axis=1).rename(columns={'value': 'chemical'})
raw_mats_one = pd.concat([raw_mats_one, oil_sum], axis=1).rename(columns={'value': 'oil'})
raw_mats_one = pd.concat([raw_mats_one, plastics_sum], axis=1).rename(columns={'value': 'plastics'})
raw_mats_one = pd.concat([raw_mats_one, paper_sum], axis=1).rename(columns={'value': 'paper'})
raw_mats_one = pd.concat([raw_mats_one, tuffa_sum], axis=1).rename(columns={'value': 'tuffa'})
# plot
ax = raw_mats_one.plot(kind='bar', stacked=True, title='Factory Raw Material Use', xlabel='Months', ylabel='Units')
# set the y ticks to display correctly
ax.get_yaxis().set_major_formatter(ticker.StrMethodFormatter('{x:,.0f}'))
plt.savefig('plots/all_factories_raw_material_use.png')
plt.show()
# todo: invert, legend problem, value or volume?
# there are a couple of odd values but in general it passes the common sense test.
# the spike in chemical in april 20 is an error in the data related to the product 'blue era'
# transaction type 0: credit finished goods
chemical_sum = summarise_stock(stock, 0, 48, '50', 'quantity', np.sum)
oil_sum = summarise_stock(stock, 0, 40, '55', 'quantity', np.sum)
plastics_sum = summarise_stock(stock, 0, 60, '60', 'quantity', np.sum)
paper_sum = summarise_stock(stock, 0, 41, '80', 'quantity', np.sum)
tuffa_sum = summarise_stock(stock, 0, 51, '85', 'quantity', np.sum)
# concatenate
finished_goods = pd.DataFrame()
finished_goods = pd.concat([finished_goods, chemical_sum], axis=1).rename(columns={'value': 'chemical'})
finished_goods = | pd.concat([finished_goods, oil_sum], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
# Question 1
def filter_employment(df, mask_ls):
'''
INPUT
df - a dataframe holding the Employment column
mask_ls - a list holding the emplyment values to filter for
OUTPUT
df - a dataframe with the filtered rows as per the values defined in the mask
'''
mask = df['Employment'].isin(mask_ls)
return df[mask]
def expand_cat_column(df, column):
'''
INPUT
df - a dataframe holding at least one categorical column
column - a string holding the name of the categorical column to expand
OUTPUT
df - the original dataframe with the expanded categorical column
df_cat - the dataframe containing only the expanded columns
'''
df[column] = df[column].str.split(';')
df_cat = pd.get_dummies(df[column].apply(pd.Series).stack()).sum(level=0)
df = pd.concat([df.drop(column, axis=1), df_cat], axis=1)
return df, df_cat
def map_salary(df_salary, df_ones):
'''
INPUT
df_salary - a dataframe containing a column with salary data
df_ones - a column with 0 or 1s, indicating whether salary information shall be mapped or not in the given row
OUTPUT
df - a dataframe with the mapped salary values
'''
mul = lambda col: col * df_salary
df = df_ones.apply(mul)
return df
def convert_to_fulltime(df_workhours, df_filtered_cols):
'''
INPUT
df_workhours - a dataframe with the column WorkWeekHrs
df_filtered_cols - a dataframe containing only columns with salary data
OUTPUT
df - a dataframe with the converted salary to 40hrs/week
'''
df = 40 *df_filtered_cols.div(df_workhours, axis=0)
return df
def create_df_with_agg_values(df):
'''
INPUT
df - a dataframe with columns containing salary values
OUTPUT
df_agg - a new dataframe containing mean, standard deviation and number of counts for each column of the input dataframe
'''
mask = df != 0
df_agg_mean = df[mask].median()
df_agg_std = df[mask].std()
df_agg_counts = df[mask].count()
df_agg = | pd.concat([df_agg_mean, df_agg_std, df_agg_counts], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 14:38:59 2020
This script plots the co-occurence rates
@author: acn980
"""
import calendar
import pandas as pd
from datetime import date
import matplotlib.pyplot as plt
import numpy as np
import os,sys,glob
sys.path.insert(0,r'E:\github\seasonality_risk\Functions')
from Functions_HCMC import detrend_fft, remove_NaN_skew, ax_joint_mm, extract_MM, joint_mm_all_cooc, plot_cooc_CI
np.set_printoptions(precision=15)
#%%
save = False
fn_trunk = 'E:/surfdrive/Documents'
fn = os.path.join(fn_trunk, 'Master2019\Thomas\data\matlab_csv')
fn_files = 'Master2019/Thomas/data'
fn2 = os.path.join(fn_trunk,fn_files)
lag_joint = 0 #days
#%% We import the monthly data AND the dates
fn_tide = os.path.join(fn,'Tide_WACC_VungTau_Cleaned_Detrended_Strict_sel_const.csv')
date_parser = lambda x: pd.datetime.strptime(x, "%d-%m-%Y %H:%M:%S")
tide = pd.read_csv(fn_tide, parse_dates = True, date_parser= date_parser, index_col = 'Date')
tide.rename(columns = {tide.columns[0]:'tide'}, inplace = True)
all_tide = tide.resample('M').max()
tide_day = tide.resample('D').max()
# Importing monthly data - rainfall
allfiles = glob.glob(os.path.join(fn2, 'NewRain\TRENDS\MONTH_CORRECTED', 'Thiessen_*.csv'))
all_rain = | pd.DataFrame(data=None) | pandas.DataFrame |
import datetime as dt
import os
import shutil
import unittest
import numpy as np
import pandas as pd
import sys
sys.path.insert(0, '/home/jost/DHC/devicely')
import devicely
class ShimmerTestCase(unittest.TestCase):
READ_PATH = 'tests/Shimmer_test_data/read_test_data.csv'
WRITE_PATH = 'tests/Shimmer_test_data/write_test_data.csv'
def setUp(self):
self.expected_delimiter = ';'
self.expected_head = pd.DataFrame(
{"Shimmer_40AC_Accel_LN_X_CAL": [-1.434782608695652, -1.4021739130434783, -1.434782608695652, -1.4130434782608696, -1.4456521739130435],
"Shimmer_40AC_Accel_LN_Y_CAL": [10.0, 10.0, 10.0, 10.0, 10.0],
"Shimmer_40AC_Accel_LN_Z_CAL": [0.5543478260869565, 0.5543478260869565, 0.5543478260869565, 0.5217391304347826, 0.5108695652173912],
"Shimmer_40AC_Accel_WR_X_CAL": [-3.9305804907241173, -3.9233991621783364, -3.897067624177139, -3.932974266906044, -3.944943147815679],
"Shimmer_40AC_Accel_WR_Y_CAL": [8.42130460801915, 8.442848593656493, 8.42848593656493, 8.42130460801915, 8.42848593656493],
"Shimmer_40AC_Accel_WR_Z_CAL": [-1.620586475164572, -1.5990424895272293, -1.6038300418910831, -1.589467384799521, -1.6612806702573308],
"Shimmer_40AC_Battery_CAL": [4139.194139194139, 4137.728937728937, 4111.355311355311, 4140.65934065934, 4134.798534798534],
"Shimmer_40AC_Ext_Exp_A15_CAL": [1684.9816849816848, 1673.260073260073, 1901.098901098901, 1722.3443223443223, 1678.3882783882782],
"Shimmer_40AC_GSR_Range_CAL": [2.0, 2.0, 2.0, 2.0, 2.0],
"Shimmer_40AC_GSR_Skin_Conductance_CAL": [2.493040293040293, 2.4915750915750916, 2.49010989010989, 2.4886446886446887, 2.4886446886446887],
"Shimmer_40AC_GSR_Skin_Resistance_CAL": [401.1166617690273, 401.3525433695972, 401.58870255957635, 401.8251398292611, 401.8251398292611],
"Shimmer_40AC_Gyro_X_CAL": [0.13740458015267176, 0.183206106870229, 0.2748091603053435, 0.22900763358778625, 0.13740458015267176],
"Shimmer_40AC_Gyro_Y_CAL": [1.8778625954198473, 1.3282442748091603, 1.282442748091603, 1.450381679389313, 1.5114503816793892],
"Shimmer_40AC_Gyro_Z_CAL": [-0.183206106870229, -0.41221374045801523, -0.1984732824427481, -0.12213740458015267, -0.47328244274809156],
"Shimmer_40AC_Int_Exp_A12_CAL": [1680.5860805860805, 1703.296703296703, 1687.179487179487, 1650.5494505494505, 1701.8315018315018],
"Shimmer_40AC_Mag_X_CAL": [-0.11244377811094453, -0.10944527736131934, -0.10794602698650674, -0.10644677661169415, -0.11394302848575712],
"Shimmer_40AC_Mag_Y_CAL": [-0.9160419790104948, -0.9130434782608695, -0.9100449775112444, -0.9010494752623688, -0.9040479760119939],
"Shimmer_40AC_Mag_Z_CAL": [-0.047976011994003, -0.047976011994003, -0.04947526236881559, -0.050974512743628186, -0.037481259370314844],
"Shimmer_40AC_Pressure_BMP280_CAL": [100.43537920858897, 100.4297311006671, 100.44102732749235, 100.44102732749235, 100.43820326666797],
"Shimmer_40AC_Temperature_BMP280_CAL": [33.365877509029815, 33.365877509029815, 33.365877509029815, 33.365877509029815, 33.365877509029815],
'acc_mag': [10.1176036 , 10.11303086, 10.1176036 , 10.11280889, 10.11686206]},
index=pd.to_datetime(["2020-07-28 10:56:50.034", "2020-07-28 10:56:50.057", "2020-07-28 10:56:50.074", "2020-07-28 10:56:50.099", "2020-07-28 10:56:50.111"])
)
self.expected_head.index.name = 'time'
self.expected_units = pd.Series(
{'Shimmer_40AC_Timestamp_Unix_CAL': 'ms',
'Shimmer_40AC_Accel_LN_X_CAL': 'm/(s^2)',
'Shimmer_40AC_Accel_LN_Y_CAL': 'm/(s^2)',
'Shimmer_40AC_Accel_LN_Z_CAL': 'm/(s^2)',
'Shimmer_40AC_Accel_WR_X_CAL': 'm/(s^2)',
'Shimmer_40AC_Accel_WR_Y_CAL': 'm/(s^2)',
'Shimmer_40AC_Accel_WR_Z_CAL': 'm/(s^2)',
'Shimmer_40AC_Battery_CAL': 'mV',
'Shimmer_40AC_Ext_Exp_A15_CAL': 'mV',
'Shimmer_40AC_GSR_Range_CAL': 'no_units',
'Shimmer_40AC_GSR_Skin_Conductance_CAL': 'uS',
'Shimmer_40AC_GSR_Skin_Resistance_CAL': 'kOhms',
'Shimmer_40AC_Gyro_X_CAL': 'deg/s',
'Shimmer_40AC_Gyro_Y_CAL': 'deg/s',
'Shimmer_40AC_Gyro_Z_CAL': 'deg/s',
'Shimmer_40AC_Int_Exp_A12_CAL': 'mV',
'Shimmer_40AC_Mag_X_CAL': 'local_flux',
'Shimmer_40AC_Mag_Y_CAL': 'local_flux',
'Shimmer_40AC_Mag_Z_CAL': 'local_flux',
'Shimmer_40AC_Pressure_BMP280_CAL': 'kPa',
'Shimmer_40AC_Temperature_BMP280_CAL': 'Degrees Celsius',
'acc_mag': 'm/(s^2)'}
)
self.reader = devicely.ShimmerPlusReader(self.READ_PATH)
def test_basic_read(self):
self.assertEqual(self.reader.delimiter, self.expected_delimiter)
pd.testing.assert_frame_equal(self.reader.data.head().reset_index(drop=True),
self.expected_head.reset_index(drop=True))
pd.testing.assert_index_equal(self.reader.data.head().index.round('ms'),
self.expected_head.index.round('ms'))
| pd.testing.assert_series_equal(self.reader.units, self.expected_units, check_names=False) | pandas.testing.assert_series_equal |
import numpy as np
from moviepy.editor import VideoFileClip
from ImageSequenceClip import ImageSequenceClip
import utils
from timeit import default_timer as timer
from SrtTextHandler import SrtTextHandler
from DatEmotionHandler import DatEmotionHandler
from collections import defaultdict
from scipy.io import wavfile
import pandas as pd
from video2splice import splice_video, splice_audio
import glob
import csv
"""
DEAP scale: valence, arousal (continuous 1-9) - Russell's scale ( <NAME>, “A circumplex model of affect,” Journal of Personality and Social Psychology, vol. 39, no. 6, pp. 1161–1178, 1980)
"Arousal can range from inactive (e.g. uninterested, bored) to active (e.g. alert, excited), whereas valence ranges from unpleasant (e.g. sad, stressed) to pleasant (e.g. happy, elated)"
Source: http://www.eecs.qmul.ac.uk/mmv/datasets/deap/doc/tac_special_issue_2011.pdf
A dummy text csv is also generated here but it's not used (generated so we can use splices2npz.py)
Splice video into 3 seconds each -> video, emotion (1D), audio
Start splicing video from the beginning (if more than 1 time is shown, it means we ran the code more than once)
Test: Vid 1-16, 19-40 (3 splices each)
Em 1: 28 * 3 -> 84
Em 0: 10 * 3 -> 30
Program ran for 221.39 seconds
Train (highlights): Vid 1-16, 19-40 (20 splices each)
Em 1: 1-7, 9-13, 15, 20, 22-25, 27, 29-32, 34-38 (7+5+2+4+1+4+5=28 -> 560 splices)
Em 0: 8, 14, 16, 19, 21, 26, 28, 33, 39, 40 (10 -> 200 splices)
Program ran for 1209.07 seconds
Train (raw videos): same emotion as above, but number of splices per video is different:
Em 1 (total=2,069): Vid 1 (46), 2 (67), 3 (77), 4 (92), 5 (40), 6 (70), 7 (48), 9 (90), -> 530
10 (83), 11 (76), 12 (72), 13 (61), 15 (23), 20 (78), 22 (78), -> 471
23 (78), 24 (79), 25 (89), 27 (112), 29 (90), 30 (69), 31 (95), -> 612
32 (75), 34 (79), 35 (59), 36 (66), 37 (104), 38 (73) -> 456
Em 0 (total=781): Vid 8 (70), 14 (73), 16 (74), 19 (55), 21 (94), -> 366
26 (82), 28 (63), 33 (60), 39 (124), 40 (86) -> 415
P.S.: Raw 21, 23 have compromised frames and splicing doesn't work.
Solved by converting to .mov and back to .mp4
"""
__author__ = "<NAME>"
is_test = False
if is_test:
ROOT = utils.project_dir_name() + 'data/deap/test_data/'
else:
ROOT = utils.project_dir_name() + 'data/deap_raw/mp4/'
params = {
'root': ROOT,
'emotion_root': utils.project_dir_name() + 'data/deap_raw/',
'sr': 16000,
'seconds': 3,
'num_samples': -1 # default = -1 for all samples. Use different to test code
}
def get_num_video_splices(video_splices_dir):
return len(glob.glob(video_splices_dir+'*.mp4'))
def splice_emotion(video_name_int, csv_filename='participant_ratings_1D.csv'):
n_splices = get_num_video_splices(params['root'] + 'video_splices_{}secs/'.format(params['seconds']))
# Extract emotion
with open(csv_filename, newline='') as csvfile:
reader = csv.DictReader(csvfile)
dict = {'video_id': []}
for row in reader:
dict['video_id'].append(row['emotion'])
# DEAP dataset is annotated for each video, not per time in video.
# This means that the emotion score will be the same for every splice in 'video_name'.
emotion_csv_data = defaultdict(lambda: [])
text_csv_data = defaultdict(lambda: [])
emotion_counter = defaultdict(lambda: 0)
emotion = dict['video_id'][video_name_int-1]
for e in range(0, n_splices):
emotion_csv_data['splice'].append(e)
emotion_csv_data['emotion'] = emotion
text_csv_data['splice'].append(e)
text_csv_data['text'].append("")
emotion_counter[emotion] += 1
# Save emotion splices in csv file
df = | pd.DataFrame(emotion_csv_data, columns=['splice', 'emotion']) | pandas.DataFrame |
# Some utilities to cluster and name vectors
import logging
from collections import Counter
from itertools import chain, combinations
import altair as alt
import networkx as nx
import numpy as np
import pandas as pd
from community import community_louvain
from scipy.spatial.distance import cityblock, cosine
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from eurito_indicators.pipeline.processing_utils import clean_table_names, make_lq
def build_cluster_graph(
vectors: pd.DataFrame,
clustering_algorithms: list,
n_runs: int = 10,
sample: int = None,
):
"""Builds a cluster network based on observation co-occurrences in a clustering output
Args:
vector: vectors to cluster
clustering_algorithms: a list where the first element is the clustering
algorithm and the second element are the parameter names and sets
n_runs: number of times to run a clustering algorithm
sample: size of the vector to sample.
Returns:
A network where the nodes are observations and their edges number
of co-occurrences in the clustering
"""
clustering_edges = []
index_to_id_lookup = {n: ind for n, ind in enumerate(vectors.index)}
logging.info("Running cluster ensemble")
for cl in clustering_algorithms:
logging.info(cl[0])
algo = cl[0]
parametres = [{cl[1][0]: v} for v in cl[1][1]]
for par in parametres:
logging.info(f"running {par}")
for _ in range(n_runs):
cl_assignments = algo(**par).fit_predict(vectors)
index_cluster_pair = {n: c for n, c in enumerate(cl_assignments)}
indices = range(0, len(cl_assignments))
pairs = combinations(indices, 2)
for p in pairs:
if index_cluster_pair[p[0]] == index_cluster_pair[p[1]]:
clustering_edges.append(frozenset(p))
edges_weighted = Counter(clustering_edges)
logging.info("Building cluster graph")
edge_list = [(list(fs)[0], list(fs)[1]) for fs in edges_weighted.keys()]
cluster_graph = nx.Graph()
cluster_graph.add_edges_from(edge_list)
for ed in cluster_graph.edges():
cluster_graph[ed[0]][ed[1]]["weight"] = edges_weighted[
frozenset([ed[0], ed[1]])
]
return cluster_graph, index_to_id_lookup
def name_categories_tokenised(table, category, tokenised, top_terms=3000, top_words=10):
"""Analyses a tokenised variable to extract salient terms"""
docterm_matrix = (
table.groupby(category)[tokenised]
.apply(lambda x: pd.Series(chain(*list(x))).value_counts())
.unstack(level=1)
.fillna(0)
)
pop_words = (
docterm_matrix.sum().sort_values(ascending=False)[:top_terms].index.tolist()
)
docterm_popular = docterm_matrix[pop_words]
tf = TfidfTransformer()
tfidf_df = pd.DataFrame(
tf.fit_transform(docterm_popular).todense(),
columns=docterm_popular.columns,
index=docterm_popular.index,
)
names = {}
for _id, row in tfidf_df.iterrows():
salient = "_".join(
tfidf_df.loc[_id].sort_values(ascending=False)[:top_words].index.tolist()
)
names[_id] = salient
return names
def extract_name_communities(
cluster_graph: nx.Graph,
resolution: float,
index_lookup: dict,
text_table: pd.DataFrame,
doc_id: str = "project_id",
doc_text: str = "title",
) -> list:
"""Extracts community from the cluster graph and names them
Args:
cluster_graph: network object
resolution: resolution for community detection
index_lookup: lookup between integer indices and project ids
text_table: table with variable names
doc_id: document id in the text table
doc_text: text variable in the text table
Returns:
a lookup between communities and the projects that belong to them
a lookup between community indices and names
"""
logging.info("Extracting communities")
comms = community_louvain.best_partition(cluster_graph, resolution=resolution)
ids_to_comms = {index_lookup[k]: v for k, v in comms.items()}
comm_assignments = {
comm: [index_lookup[k] for k, v in comms.items() if v == comm]
for comm in set(comms.values())
}
logging.info("Naming communities")
comm_names = name_category(
(text_table.assign(community=lambda df: df[doc_id].map(ids_to_comms))),
text_var=doc_text,
)
return comm_assignments, comm_names
def name_category(
text_table: pd.DataFrame,
cat_var: str = "community",
text_var: str = "title",
top_words: int = 15,
max_features: int = 5000,
max_df: float = 0.8,
) -> dict:
"""Names the clusters with their highest tfidf tokens
Args:
text_table: a dataframe with category to name
cat_var: category to name
text_var: text to use in naming
top_words: top words to keep in name
Returns:
lookup between category indices and names
"""
grouped_names = text_table.groupby(cat_var)[text_var].apply(
lambda x: " ".join(list(x))
)
tfidf = TfidfVectorizer(
stop_words="english",
ngram_range=[2, 3],
max_features=max_features,
max_df=max_df,
)
tfidf_fit = tfidf.fit_transform(grouped_names)
tfidf_df = pd.DataFrame(tfidf_fit.todense(), columns=tfidf.vocabulary_)
cluster_salient = {}
for ind, _ in tfidf_df.iterrows():
salient_words = tfidf_df.loc[ind].sort_values(ascending=False).index[:top_words]
cluster_salient[ind] = "_".join(salient_words)
return cluster_salient
def make_distance_to_clusters(
vectors_df: pd.DataFrame,
distance,
cluster_ids: dict,
) -> pd.DataFrame:
"""Calculates distances between all vectors in a table and the centroids of identified clusters
Args:
vectors_df: table with all vectors
cluster_id: lookup between cluster indices and vector indices
Returns:
A table with distance between each vector and the cluster categories
"""
logging.info("Calculating cluster centroids")
cluster_centroids_df = pd.concat(
[
vectors_df.loc[vectors_df.index.isin(set(v))].median()
for v in cluster_ids.values()
],
axis=1,
).T
cluster_centroids_df.index = cluster_ids.keys()
cluster_centroids_array = np.array(cluster_centroids_df)
vectors_array = np.array(vectors_df)
dists = []
logging.info("Calculating vector distances to clusters")
for v in vectors_array:
vect_dist = []
for cl_centr in cluster_centroids_array:
vect_dist.append(distance(v, cl_centr))
dists.append(vect_dist)
dist_df = pd.DataFrame(
dists,
index=vectors_df.index,
columns=sorted(set(cluster_ids.keys()))
# columns=[cluster_names[comm] for comm in sorted(set(cluster_ids.keys()))],
)
return dist_df
def check_community_consensus(doc_ids: list, communities: list) -> pd.DataFrame:
"""Checks if project ids are allocated to the same communities by our approach
Args:
doc_ids: list of doc ids
communities: list of community assignments
Returns:
A dataframe with pairs of projects and the number of times they are assigned together
"""
id_consensus = []
doc_pairs = combinations(doc_ids, 2)
for c in doc_pairs:
match = [1 if comm[c[0]] == comm[c[1]] else 0 for comm in communities]
matches_id = pd.Series(
list(c) + match, index=["doc1", "doc2"] + list(range(len(communities)))
)
id_consensus.append(matches_id)
id_consensus_df = pd.DataFrame(id_consensus).set_index(["doc1", "doc2"])
return id_consensus_df
def make_doc_comm_lookup(comm_doc_lookup: dict, method: str = "single") -> dict:
"""Creates doc to comm lookup from a comm to list of docs lookup
Args:
comm_doc_lookup: lookup between clusters and their related vectors
method: if single, assign one element to each cluster. If multiple,
many clusters can be assigned to the same document
"""
if method == "single":
return {el: k for k, v in comm_doc_lookup.items() for el in v}
elif method == "multiple":
assign_dict = dict()
for cl, assignments in comm_doc_lookup.items():
for _id in assignments:
if _id not in assign_dict.keys():
assign_dict[_id] = []
assign_dict[_id].append(cl)
else:
assign_dict[_id].append(cl)
return assign_dict
def get_closest_documents(
dist_df: pd.DataFrame,
cluster: int,
cluster_assignments: dict,
sd_scale: float = 2,
exclude_in_cluster: bool = True,
) -> list:
"""Returns closest documents to a cluster
Args:
dist_df: dataframe with distances
cluster: cluster we are interested in
cluster assignments: lookup between cluster ids and project ids
sd_scale: standard deviation to define proximity
exclude_in_cluster: whether we want to exclude docs already in a cluster from the results
Returns:
A list of project ids
"""
dist_copy = dist_df.copy()
if exclude_in_cluster is True:
dist_copy = dist_copy.loc[~dist_copy.index.isin(cluster_assignments[cluster])]
sd = np.std(dist_copy[cluster])
selected = (
dist_copy.sort_values(cluster, ascending=True)
.loc[dist_copy[cluster] < (np.mean(dist_copy[cluster]) - sd_scale * sd)]
.index.tolist()
)
logging.info(cluster)
logging.info(len(selected))
return selected
def tag_clusters(docs, cluster_assignment, cluster_name="cluster_covid"):
"""Tag clusters with an assignment"""
docs_copy = docs.copy()
docs_copy[cluster_name] = docs_copy["project_id"].map(cluster_assignment)
docs_copy[cluster_name] = docs_copy[cluster_name].fillna("non_covid")
return docs_copy
def make_activity(
table: pd.DataFrame,
reg_var: str = "coordinator_country",
cat_var: str = "cluster_covid",
funding_var: str = "ec_max_contribution",
) -> pd.DataFrame:
"""Counts activity, funding and LQs in a project activity table"""
if reg_var == "participant_country":
table = process_participant(table)
table_wide = table.groupby([reg_var, cat_var]).size().unstack().fillna(0)
table_funding = (
table.groupby([reg_var, cat_var])[funding_var].sum().unstack().fillna(0)
)
table_lq, table_funding_lq = [make_lq(t) for t in [table_wide, table_funding]]
stacked_tables = [
t.stack().reset_index(name="value").assign(variable=v)
for t, v in zip(
[table_wide, table_lq, table_funding, table_funding_lq],
["project_total", "project_lq", "funding_total", "funding_lq"],
)
]
return pd.concat(stacked_tables, axis=0)
def process_participant(table):
"""Creates a participant table (requires exploding a combined variable
in the projects table)
"""
docs = table.copy()
docs["participant_country"] = docs["participant_countries"].apply(
lambda x: x.split(";") if type(x) == str else np.nan
)
docs_expl = docs.dropna(axis=0, subset=["participant_country"]).explode(
"participant_country"
)
return docs_expl
def specialisation_robust(
doc_distances,
sd_thres,
projects,
cluster_assignments,
pre_covid_date="2019/01/01",
reg_var="coordinator_country",
):
"""Extract activity levels based on different thresholds of "distance"
from a cluster centroid
"""
docs = projects.copy().query(f"start_date<'{pre_covid_date}'")
# Get project lists under different threshold
close_to_clusters = [
make_doc_comm_lookup(
{
cl: get_closest_documents(
doc_distances, cl, cluster_assignments, sd_scale=sd
)
for cl in doc_distances.columns
},
method="multiple",
)
for sd in sd_thres
]
# Assign projects to categories
results = []
for thres, assign in zip(["low", "high"], close_to_clusters):
docs_ = docs.copy()
docs_["cluster_covid"] = docs_["project_id"].map(assign)
docs_["cluster_covid"] = docs_["cluster_covid"].fillna("non_covid")
docs_expl = docs_.explode("cluster_covid")
if reg_var == "participant_country":
docs_part = process_participant(docs_expl)
act = make_activity(docs_part, reg_var="participant_country")
else:
act = make_activity(docs_expl)
act = act.rename(columns={"value": f"value_{str(thres)}"})
results.append(act)
if len(results) > 1:
merged = results[0].merge(results[1], on=[reg_var, "cluster_covid", "variable"])
return merged
else:
return results[0]
def plot_specialisation_robust(
reg_var,
activity_var,
dist_to_clusters,
cluster_assignments,
projs,
focus_countries,
clean_var_lookup,
thres=[1.5, 2.5],
):
"""Plot level of preparedness in a covid cluster"""
specialisation_related = specialisation_robust(
dist_to_clusters,
[1.5, 2.5],
projects=projs,
reg_var=reg_var,
cluster_assignments=cluster_assignments,
)
specialisation_long = (
specialisation_related.loc[
specialisation_related[reg_var].isin(focus_countries)
]
.query(f"variable=='{activity_var}'")
.reset_index(drop=True)
.melt(id_vars=[reg_var, "variable", "cluster_covid"], var_name="position")
.reset_index(drop=True)
)
specialisation_long_clean = clean_table_names(
specialisation_long, [reg_var, "cluster_covid"], clean_var_lookup
).query("cluster_covid!='non_covid'")
spec_comp = (
alt.Chart()
.mark_line()
.encode(
x=alt.X("value", title=activity_var),
detail="cluster_covid",
y=alt.Y(
"cluster_covid",
title=None,
axis=alt.Axis(ticks=False, labels=False),
sort="descending",
),
color="cluster_covid_clean:N",
)
).properties(height=20, width=200)
ruler = (
alt.Chart()
.transform_calculate(v="1")
.mark_rule(color="black", strokeDash=[1, 1])
.encode(x="v:Q")
)
comp_chart = alt.layer(spec_comp, ruler, data=specialisation_long_clean).facet(
row=alt.Row(
f"{reg_var}_clean",
sort=alt.EncodingSortField("value", "median", order="descending"),
header=alt.Header(labelAngle=360, labelAlign="left"),
)
)
return comp_chart
def make_pre_post_table(
projs,
cluster_groups,
distance_to_clusters,
reg_var="coordinator_country",
sd=1.5,
):
"""Extracts activity levels before and after covid-19"""
# NB there is some confusion between the two types of lookups we use
# one that maps one project id to one cluster
# one that maps one cluster to a list of project ids
# Should find a clearer way to organise this
response = make_activity(
projs.query("start_date>'2019/01/01'").assign(
cluster_covid=lambda df: df["project_id"].map(
make_doc_comm_lookup(cluster_groups)
)
),
reg_var=reg_var,
)
print(response.head())
sp_related = specialisation_robust(
distance_to_clusters,
[sd],
projects=projs,
reg_var=reg_var,
cluster_assignments=cluster_groups,
).rename(columns={"value_low": "value_pre"})
print(sp_related.head())
combi = response.merge(sp_related, on=[reg_var, "cluster_covid", "variable"])
logging.info(
combi.groupby(["variable", "cluster_covid"]).apply(
lambda df: df[["value", "value_pre"]].corr(method="spearman").iloc[0, 1]
)
)
return combi
def filter_pre_post_table(
combi_table,
focus_countries,
reg_var="coordinator_country",
focus_var="project_lq",
volume_var="project_total",
):
"""Filters an activity table to focus on particular variables and countries"""
combined_table_focus = combi_table.loc[
combi_table[reg_var].isin(focus_countries)
].query(f"variable=='{focus_var}'")
size_lookup = combi_table.query(f"variable=='{volume_var}'")[
[reg_var, "cluster_covid", "value"]
].rename(columns={"value": "volume"})
combined_table_focus = combined_table_focus.merge(
size_lookup, on=[reg_var, "cluster_covid"]
)
return combined_table_focus
def plot_preparedness_response(data, clean_var_lookup, reg_var):
"""Plots a comparison between preparedness and response"""
data_clean = clean_table_names(data, [reg_var, "cluster_covid"], clean_var_lookup)
clean_country_name = reg_var + "_clean"
data_clean["cluster_covid_clean"] = [x for x in data_clean["cluster_covid_clean"]]
comp_ch = (
alt.Chart()
.mark_point(filled=True, stroke="black", strokeWidth=0.5)
.encode(
x=alt.X(
"value_pre",
title="Related activity pre 2020",
scale=alt.Scale(zero=False),
),
size=alt.Size("volume",title='Number of projects'),
y=alt.Y("value", title="Activity post-2020", scale=alt.Scale(zero=False)),
color=alt.Color(clean_country_name, scale=alt.Scale(scheme="tableau20"),
title='Coordinator country'),
tooltip=[clean_country_name, "volume"],
)
).properties(height=200, width=300)
hor = (
alt.Chart()
.transform_calculate(y="1")
.mark_rule(strokeDash=[2, 2])
.encode(y="y:Q")
)
vert = (
alt.Chart()
.transform_calculate(x="1")
.mark_rule(strokeDash=[2, 2])
.encode(x="x:Q")
)
lay = alt.layer(comp_ch, hor, vert, data=data_clean).facet(
facet=alt.Facet("cluster_covid_clean", title="Covid research cluster"), columns=3)
return lay
def calculate_pairwise_distances(df_1: pd.DataFrame, df_2: pd.DataFrame):
"""Calculate pairwise distances between two groups of vectors
Args:
"""
df_1_array = np.array(df_1)
df_2_array = np.array(df_2)
distances = []
for v_1 in df_1_array:
vect_dist = []
for v_2 in df_2_array:
vect_dist.append(cityblock(v_1, v_2))
distances.append(vect_dist)
dist_df = pd.DataFrame(distances, index=df_1.index, columns=df_2.index)
return dist_df
def rank_org_distances(org_cluster_dist, cluster, orgs_in_cluster, cluster_labels):
"""Ranks distances between organisations a covid-related corpus of research
and adds extra variables to aggregate results later
Args:
org_cluster_dist: semantic distance between an organisation and a
cluster
cluster: cluster we are focusing on
orgs_in_cluster: lookup between cluster and organisations that
participated in its projects
cluster_labels: clean cluster names
"""
dist_copy = org_cluster_dist.copy()
dist_copy["ranking"] = pd.qcut(
dist_copy[cluster], q=np.arange(0, 1.1, 0.1), labels=False
)
dist_copy["is_participant"] = dist_copy.index.get_level_values(level=0).isin(
orgs_in_cluster[cluster]
)
dist_copy["cluster"] = cluster_labels[cluster]
return dist_copy.rename(columns={cluster: "distance"})[
["cluster", "ranking", "is_participant", "country"]
]
def plot_participation_distance(org_distances, focus_countries, clean_variable_lookup):
"""Plots level of participation by country and cluster at different levels of distance"""
org_distances_mean = (
org_distances.groupby(["country", "cluster", "ranking"])["is_participant"]
.mean()
.loc[focus_countries]
.reset_index(drop=False)
.rename(columns={"is_participant": "share"})
)
org_distances_sum = (
org_distances.groupby(["country", "cluster", "ranking"])["is_participant"]
.sum()
.loc[focus_countries]
.reset_index(drop=False)
.rename(columns={"is_participant": "total"})
)
org_distances_aggregate = pd.concat(
[org_distances_mean, org_distances_sum["total"]], axis=1
)
org_distances_aggregate['country_clean'] = org_distances_aggregate['country'].map(clean_variable_lookup)
distances_chart = (
alt.Chart(org_distances_aggregate)
.mark_point(filled=True, stroke="black", strokeWidth=0.5)
.encode(
y=alt.Y(
"country_clean",title='Country',
sort=alt.EncodingSortField("share", op="median", order="descending"),
),
x=alt.X("share", axis=alt.Axis(format="%"), title="Share of participation"),
tooltip=["country_clean", "share", "total", "ranking"],
size=alt.Size("total", title="Volume of participation"),
color=alt.Color(
"ranking:O",
scale=alt.Scale(scheme="redblue"),
title="Distance to cluster",
),
facet=alt.Facet("cluster", columns=3, title='Covid research cluster'),
)
)
return distances_chart
def k_check_clusters(vectors, cluster_assignments, n_clust=25, sample_size=1000):
"""Checks the robustness of a cluster assignment using a Kmeans baseline
Args:
vectors: docs to assess
cluster_assignment: lookup between clusters and document lists that we are validating
n_cluster: number of clusters to use in the test
sample_size: sample_size for comparing pairs
Returns:
a df with number of cluster co-occurrences and a df with cluster distribution co-occurrences
"""
doc_cluster_lookup = make_doc_comm_lookup(cluster_assignments)
logging.info("fitting cluster")
k = KMeans(n_clusters=n_clust)
fit = k.fit_predict(vectors)
k_labs = {i: k for i, k in zip(vectors.index, fit)}
samp = vectors.sample(n=1000).index.tolist()
pairs = combinations(samp, 2)
matches = []
logging.info("Calculating co-occurrences")
for p in pairs:
if k_labs[p[0]] == k_labs[p[1]]:
matches.append([doc_cluster_lookup[p[0]], doc_cluster_lookup[p[1]]])
co_occ_df = (
pd.DataFrame(matches, columns=["c1", "c2"])
.pivot_table(index="c1", columns="c2", aggfunc="size")
.fillna(0)
.stack()
)
logging.info("Calculating share similarities")
reclassify = {
clust: pd.Series(
[v for k, v in k_labs.items() if k in cluster_assignments[clust]]
).value_counts(normalize=True)
for clust in cluster_assignments.keys()
}
corrs = []
for p in combinations(reclassify.keys(), 2):
comb = | pd.concat([reclassify[p[0]], reclassify[p[1]]], axis=1) | pandas.concat |
# pylint: disable-msg=W0612,E1101,W0141
import nose
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notnull, isnull
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, cPickle,
product as cart_product, zip)
import pandas as pd
import pandas.index as _index
class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]],
names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'],
inplace=True)
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
self.assertNotIsInstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
tm.assert_isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
assert_series_equal(result, expected)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(
level='month').transform(np.sum)
expected = op(self.ymd['A'], broadcasted)
assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
pickled = cPickle.dumps(frame)
unpickled = cPickle.loads(pickled)
assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
expected = self.frame.ix[[0, 3]]
assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
self.assertIs(chunk.index, new_index)
chunk = self.ymd.ix[new_index]
self.assertIs(chunk.index, new_index)
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
self.assertIs(chunk.columns, new_index)
chunk = ymdT.ix[:, new_index]
self.assertIs(chunk.columns, new_index)
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
self.assertEquals(result.index.names, self.frame.index.names)
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
self.assert_(lines[2].startswith('a 0 foo'))
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
assert_almost_equal(col.values, df.values[:, 0])
self.assertRaises(KeyError, df.__getitem__, ('foo', 'four'))
self.assertRaises(KeyError, df.__getitem__, 'foobar')
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
result2 = s.ix[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
self.assertEquals(result, expected)
# fancy
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
expected = s.reindex(s.index[49:51])
assert_series_equal(result, expected)
# key error
self.assertRaises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
self.assertRaises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
self.assert_(isnull(s.values[42:65]).all())
self.assert_(notnull(s.values[:42]).all())
self.assert_(notnull(s.values[65:]).all())
s[2000, 3, 10] = np.nan
self.assert_(isnull(s[49]))
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
with assertRaisesRegexp(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.ix[:4]
expected = self.frame[:4]
assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.ix[:4] = 0
self.assert_((cp.values[:4] == 0).all())
self.assert_((cp.values[4:] != 0).all())
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.ix[:, 'value']
assert_series_equal(df['value'], result)
result = df.ix[1:3, 'value']
assert_series_equal(df['value'][1:3], result)
result = df.ix[:, :]
assert_frame_equal(df, result)
result = df
df.ix[:, 'value'] = 10
result['value'] = 10
assert_frame_equal(df, result)
df.ix[:, :] = 10
assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'],
'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'],
['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
assert_frame_equal(cp['a'], cp['b'])
#----------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
self.assertTrue((df['A'].values == 0).all())
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
assert_series_equal(df['A', '1'], df['B', '1'])
assert_series_equal(df['A', '2'], df['B', '1'])
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.ix[(0, 0), :]
expected = idf.ix[0, 0]
expected2 = idf.xs((0, 0))
assert_series_equal(result, expected)
assert_series_equal(result, expected2)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.ix[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.ix[2000, 1, 6][['A', 'B', 'C']]
assert_series_equal(result, expected)
def test_getitem_multilevel_index_tuple_unsorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.ix[query_index, "data"]
xp = Series(['x'], index=MultiIndex.from_tuples([(0, 1, 0)]))
assert_series_equal(rs, xp)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.ix[('bar', 'two')]
assert_series_equal(xs, xs2)
assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a','abcde',1),
('b','bbcde',2),
('y','yzcde',25),
('z','xbcde',24),
('z',None,26),
('z','zbcde',25),
('z','ybcde',26),
]
df = DataFrame(acc, columns=['a1','a2','cnt']).set_index(['a1','a2'])
expected = DataFrame({ 'cnt' : [24,26,25,26] }, index=Index(['xbcde',np.nan,'zbcde','ybcde'],name='a2'))
result = df.xs('z',level='a1')
assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.ix['foo']
expected = self.frame.T['foo'].T
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.ix[2000, 4]
assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.ix['foo', 'one']
assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'),
('p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
self.assertRaises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.ix[20111201, :]
assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep='\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
self.assertEqual(len(result), 2)
assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.ix[2000, 5]['A']
assert_series_equal(result, expected)
# not implementing this for now
self.assertRaises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
result = df['bar']
result2 = df.ix[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2],
[0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.ix[1:2]
exp = frame.reindex(frame.index[2:])
assert_frame_equal(res, exp)
frame.ix[1:2] = 7
self.assert_((frame.ix[1:2] == 7).values.all())
series = Series(np.random.randn(len(index)), index=index)
res = series.ix[1:2]
exp = series.reindex(series.index[2:])
assert_series_equal(res, exp)
series.ix[1:2] = 7
self.assert_((series.ix[1:2] == 7).values.all())
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.ix[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
# raises exception
self.assertRaises(KeyError, frame.ix.__getitem__, 3)
# however this will work
result = self.frame.ix[2]
expected = self.frame.xs(self.frame.index[2])
assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
assert_frame_equal(result, expected)
def test_getitem_slice_not_sorted(self):
df = self.frame.sortlevel(1).T
# buglet with int typechecking
result = df.ix[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
assert_series_equal(dft['foo', 'two'], s > s.median())
# tm.assert_isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.ix[('bar', 'two'), 'B'] = 5
self.assertEquals(self.frame.ix[('bar', 'two'), 'B'], 5)
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
self.assertEquals(df.ix[('bar', 'two'), 1], 7)
def test_fancy_slice_partial(self):
result = self.frame.ix['bar':'baz']
expected = self.frame[3:7]
assert_frame_equal(result, expected)
result = self.ymd.ix[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.ix[('a', 'y'), :]
expected = df.ix[('a', 'y')]
assert_frame_equal(result, expected)
result = df.ix[('a', 'y'), [1, 0]]
expected = df.ix[('a', 'y')][[1, 0]]
assert_frame_equal(result, expected)
self.assertRaises(KeyError, df.ix.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_sortlevel(self):
df = self.frame.copy()
df.index = np.arange(len(df))
assertRaisesRegexp(TypeError, 'hierarchical index', df.sortlevel, 0)
# axis=1
# series
a_sorted = self.frame['A'].sortlevel(0)
with assertRaisesRegexp(TypeError, 'hierarchical index'):
self.frame.reset_index()['A'].sortlevel()
# preserve names
self.assertEquals(a_sorted.index.names, self.frame.index.names)
# inplace
rs = self.frame.copy()
rs.sortlevel(0, inplace=True)
assert_frame_equal(rs, self.frame.sortlevel(0))
def test_sortlevel_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int64)
# it works!
result = df.sortlevel(0)
self.assertTrue(result.index.lexsort_depth == 3)
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)]*3)
df = DataFrame(np.random.randn(4000), index=index, dtype = np.int32)
# it works!
result = df.sortlevel(0)
self.assert_((result.dtypes.values == df.dtypes.values).all() == True)
self.assertTrue(result.index.lexsort_depth == 3)
def test_delevel_infer_dtype(self):
tuples = [tuple for tuple in cart_product(['foo', 'bar'],
[10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples,
names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
self.assert_(com.is_integer_dtype(deleveled['prm1']))
self.assert_(com.is_float_dtype(deleveled['prm2']))
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
self.assertEquals(len(deleveled.columns), len(self.ymd.columns))
deleveled = self.series.reset_index()
tm.assert_isinstance(deleveled, DataFrame)
self.assertEqual(len(deleveled.columns),
len(self.series.index.levels) + 1)
deleveled = self.series.reset_index(drop=True)
tm.assert_isinstance(deleveled, Series)
def test_sortlevel_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sortlevel(level='second')
expected = self.frame.sortlevel(level=1)
assert_frame_equal(result, expected)
def test_sortlevel_mixed(self):
sorted_before = self.frame.sortlevel(1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sortlevel(1)
assert_frame_equal(sorted_before, sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sortlevel(1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sortlevel(1, axis=1)
assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count(axis=axis)
expected = expected.reindex_like(result).astype('i8')
assert_frame_equal(result, expected)
self.frame.ix[1, [1, 2]] = np.nan
self.frame.ix[7, [0, 1]] = np.nan
self.ymd.ix[1, [1, 2]] = np.nan
self.ymd.ix[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
assertRaisesRegexp(TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
assert_almost_equal(result.columns, ['A', 'B', 'C'])
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'],
['one', 'two', 'three', 'four']],
labels=[[0, 0, 0, 2, 2],
[2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
assert_series_equal(result.astype('f8'),
expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0])
assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
assert_frame_equal(result, expected)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked2 = unstacked.unstack()
# test that ints work
unstacked = self.ymd.astype(int).unstack()
# test that int32 work
unstacked = self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0),
(1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sortlevel(2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
assert_frame_equal(restacked.sortlevel(0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort(axis=1, ascending=False)
restacked = unstacked.stack()
assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).ix[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sortlevel(1, axis=1)
stacked = df.stack()
assert_series_equal(stacked['foo'], df['foo'].stack())
self.assertEqual(stacked['bar'].dtype, np.float_)
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive',
'activ', 'activ', 'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
assert_series_equal(restacked,
result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
self.assertEquals(unstacked.index.name, 'first')
self.assertEquals(unstacked.columns.names, ['exp', 'second'])
restacked = unstacked.stack()
self.assertEquals(restacked.index.names, self.frame.index.names)
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
assert_frame_equal(unstacked, expected)
self.assertEquals(unstacked.columns.names,
expected.columns.names)
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sortlevel(0)
assert_frame_equal(restacked, self.ymd)
self.assertEquals(restacked.index.names, self.ymd.index.names)
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
assert_frame_equal(unstacked, expected.ix[:, unstacked.columns])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'], freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02', '2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10', '2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU')
rs = down.stack('ID')
xp = unst.ix[:, ['VAR1']].resample('W-THU').stack('ID')
xp.columns.name = 'Params'
assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'],
'B': ['b1', 'b2'],
'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
self.assertTrue(len(stacked) > len(stacked.dropna()))
stacked = df.unstack().stack(dropna=True)
assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
assert_series_equal(applied.reindex(expected.index), expected)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
self.assertEquals(len(result.columns), 4)
recons = result.stack()
assert_frame_equal(recons, df)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]], names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'),
('f2', 's1'), ('f2', 's2'),
('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
self.assert_((result.columns == ['f2', 'f3']).all())
def test_join(self):
a = self.frame.ix[:5, ['A']]
b = self.frame.ix[2:, ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
self.assert_(not np.isnan(joined.values).all())
assert_frame_equal(joined, expected, check_names=False) # TODO what should join do with names ?
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel(0, 1)
swapped2 = self.frame['A'].swaplevel('first', 'second')
self.assert_(not swapped.index.equals(self.frame.index))
assert_series_equal(swapped, swapped2)
back = swapped.swaplevel(0, 1)
back2 = swapped.swaplevel('second', 'first')
self.assert_(back.index.equals(self.frame.index))
assert_series_equal(back, back2)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
panel = Panel({'ItemA': self.frame,
'ItemB': self.frame * 2})
result = panel.swaplevel(0, 1, axis='major')
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
assert_frame_equal(result, expected)
with assertRaisesRegexp(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with assertRaisesRegexp(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
tm.assert_isinstance(df.columns, MultiIndex)
self.assert_((df[2000, 1, 10] == df[2000, 1, 7]).all())
def test_alignment(self):
x = Series(data=[1, 2, 3],
index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6],
index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
assert_series_equal(res, exp)
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 1, 2]])
self.assert_(index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 2, 1]])
self.assert_(not index.is_lexsorted())
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1],
[0, 1, 0, 2, 2, 1]])
self.assert_(not index.is_lexsorted())
self.assertEqual(index.lexsort_depth, 0)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
self.assert_((df['foo'].values == 0).all())
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sortlevel(0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
self.assertRaises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
self.assert_((df['foo', 'one'] == 0).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns._tuple_index)]
result = df['foo']
result2 = df.ix[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.ix['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = | MultiIndex.from_tuples(tuples) | pandas.core.index.MultiIndex.from_tuples |
from unittest import TestCase
import sklearn_pmml_model
from sklearn_pmml_model.tree import PMMLTreeClassifier
from sklearn2pmml.pipeline import PMMLPipeline
from sklearn2pmml import sklearn2pmml
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from io import StringIO
import pandas as pd
import numpy as np
from os import path, remove
from sklearn.datasets import load_digits
BASE_DIR = path.dirname(sklearn_pmml_model.__file__)
class TestTree(TestCase):
def test_invalid_tree(self):
with self.assertRaises(Exception) as cm:
PMMLTreeClassifier(pmml=StringIO("""
<PMML xmlns="http://www.dmg.org/PMML-4_3" version="4.3">
<DataDictionary>
<DataField name="Class" optype="categorical" dataType="string">
<Value value="setosa"/>
<Value value="versicolor"/>
<Value value="virginica"/>
</DataField>
</DataDictionary>
<MiningSchema>
<MiningField name="Class" usageType="target"/>
</MiningSchema>
</PMML>
"""))
assert str(cm.exception) == 'PMML model does not contain TreeModel.'
def test_fit_exception(self):
with self.assertRaises(Exception) as cm:
pmml = path.join(BASE_DIR, '../models/decisionTree.pmml')
clf = PMMLTreeClassifier(pmml=pmml)
clf.fit(np.array([[]]), np.array([]))
assert str(cm.exception) == 'Not supported.'
def test_incomplete_node(self):
with self.assertRaises(Exception) as cm:
PMMLTreeClassifier(pmml=StringIO("""
<PMML xmlns="http://www.dmg.org/PMML-4_3" version="4.3">
<DataDictionary>
<DataField name="Class" optype="categorical" dataType="string">
<Value value="setosa"/>
<Value value="versicolor"/>
<Value value="virginica"/>
</DataField>
</DataDictionary>
<MiningSchema>
<MiningField name="Class" usageType="target"/>
</MiningSchema>
<TreeModel splitCharacteristic="binarySplit">
<Node id="1">
<True/>
<Node id="2"/>
</Node>
</TreeModel>
</PMML>
"""))
assert str(cm.exception) == 'Node has insufficient information to ' \
'determine output: recordCount or score ' \
'attributes expected'
def test_unsupported_predicate(self):
with self.assertRaises(Exception) as cm:
PMMLTreeClassifier(pmml=StringIO("""
<PMML xmlns="http://www.dmg.org/PMML-4_3" version="4.3">
<DataDictionary>
<DataField name="Class" optype="categorical" dataType="string">
<Value value="setosa"/>
<Value value="versicolor"/>
<Value value="virginica"/>
</DataField>
</DataDictionary>
<MiningSchema>
<MiningField name="Class" usageType="target"/>
</MiningSchema>
<TreeModel splitCharacteristic="binarySplit">
<Node id="1">
<True/>
<Node id="2" score="setosa">
<UnsupportedPredicate/>
</Node>
<Node id="3" score="versicolor">
<UnsupportedPredicate/>
</Node>
</Node>
</TreeModel>
</PMML>
"""))
assert str(cm.exception) == 'Unsupported tree format: unknown predicate' \
' structure in Node 2'
def test_tree_threshold_value(self):
clf = PMMLTreeClassifier(path.join(BASE_DIR, '../models/categorical.pmml'))
assert clf.tree_.threshold == [[0, 4], 25.18735, -2, 125.5, -2, -2, 20.02033, -2, -2]
class TestIrisTreeIntegration(TestCase):
def setUp(self):
pair = [0, 1]
data = load_iris()
X = pd.DataFrame(data.data[:, pair])
X.columns = np.array(data.feature_names)[pair]
y = pd.Series(np.array(data.target_names)[data.target])
y.name = "Class"
X, Xte, y, yte = train_test_split(X, y, test_size=0.33, random_state=123)
self.test = (Xte, yte)
self.train = (X, y)
pmml = path.join(BASE_DIR, '../models/decisionTree.pmml')
self.clf = PMMLTreeClassifier(pmml=pmml)
self.ref = DecisionTreeClassifier(random_state=1).fit(X, y)
def test_predict(self):
Xte, _ = self.test
assert np.array_equal(self.ref.predict(Xte), self.clf.predict(Xte))
def test_predict_proba(self):
Xte, _ = self.test
assert np.array_equal(
self.ref.predict_proba(Xte),
self.clf.predict_proba(Xte)
)
def test_score(self):
Xte, yte = self.test
assert self.ref.score(Xte, yte) == self.clf.score(Xte, yte)
def test_sklearn2pmml(self):
# Export to PMML
pipeline = PMMLPipeline([
("classifier", self.ref)
])
pipeline.fit(self.train[0], self.train[1])
sklearn2pmml(pipeline, "tree_sklearn2pmml.pmml", with_repr = True)
try:
# Import PMML
model = PMMLTreeClassifier(pmml='tree_sklearn2pmml.pmml')
# Verify classification
Xte, _ = self.test
assert np.array_equal(
self.ref.predict_proba(Xte),
model.predict_proba(Xte)
)
finally:
remove("tree_sklearn2pmml.pmml")
class TestDigitsTreeIntegration(TestCase):
def setUp(self):
data = load_digits()
self.columns = [2, 3, 4, 5, 6, 7, 9, 10, 13, 14, 17, 18, 19, 20, 21, 25, 26,
27, 28, 29, 30, 33, 34, 35, 36, 37, 38, 41, 42, 43, 45, 46,
50, 51, 52, 53, 54, 55, 57, 58, 59, 60, 61, 62, 63]
X = pd.DataFrame(data.data)
y = pd.Series(np.array(data.target_names)[data.target])
y.name = "Class"
X, Xte, y, yte = train_test_split(X, y, test_size=0.33, random_state=123)
self.test = (Xte, yte)
self.clf = PMMLTreeClassifier(path.join(BASE_DIR, '../models/digits.pmml'))
self.ref = DecisionTreeClassifier(random_state=1).fit(X, y)
def test_predict(self):
Xte, _ = self.test
assert np.array_equal(
self.ref.predict(Xte),
self.clf.predict(Xte[self.columns]).astype(np.int64)
)
def test_predict_proba(self):
Xte, _ = self.test
assert np.array_equal(
self.ref.predict_proba(Xte),
self.clf.predict_proba(Xte[self.columns])
)
def test_score(self):
Xte, yte = self.test
assert self.ref.score(Xte, yte) == self.clf.score(Xte[self.columns], yte)
class TestCategoricalTreeIntegration(TestCase):
def setUp(self):
self.clf = PMMLTreeClassifier(path.join(BASE_DIR, '../models/cat.pmml'))
def test_predict(self):
Xte = np.array([[0], [1], [2]])
assert np.array_equal(
np.array(['class1', 'class2', 'class3']),
self.clf.predict(Xte)
)
class TestCategoricalPimaTreeIntegration(TestCase):
def setUp(self):
df = pd.read_csv(path.join(BASE_DIR, '../models/categorical-test.csv'))
cats = np.unique(df['age'])
df['age'] = | pd.Categorical(df['age'], categories=cats) | pandas.Categorical |
# Copyright (C) 2019-2022, Pyronear.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import ipywidgets as widgets
import pathlib
import pandas as pd
from datetime import datetime
from IPython.display import display
def getImageWidget(imgFile, width=300):
try:
with open(imgFile, 'rb') as file:
return widgets.Image(value=file.read(), width=300)
except FileNotFoundError:
print(f'Skipping {imgFile}')
return widgets.HBox() # FIXME: find a better solution ?
class AnnotationChecker:
"""
Check movie annotations in jupyter notebook
Display one accordion widget per movie file containing one tab per sequence with
fixed state (fire, confident, ...).
In each tab, the first and last frames of the sequence are shown together with
radio buttons containing the annotations. The user can change the annotations
and update them by clicking on the "Save" button that creates a new csv file
(checkedLabels.csv)
Args:
- inputdir: directory containing image files and csv files (*.labels.csv) with the
summary of movie annotations:
* fBase: the original movie file
* stateStart: the first frame of a sequence with fixed state
* stateStart: the last frame of a sequence with fixed state
* state definitions: exploitable, fire, clf_confidence, loc_confidence
* imgFile: image file name
- labels: csv file with labels. By default uses all files in inputdir
"""
def __init__(self, inputdir, labels=None):
self.inputdir = pathlib.Path(inputdir)
assert self.inputdir.is_dir(), 'Invalid path {self.inputdir}'
self.outputfile = self.inputdir / 'checkedLabels.csv'
if labels is None:
csvFiles = self.inputdir.glob('*.labels.csv')
self.labels = pd.concat(map(pd.read_csv, csvFiles))
else:
self.labels = pd.read_csv(labels)
self.menus = | pd.Series(index=self.labels.index) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.