prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import wf_core_data.utils
import requests
import pandas as pd
from collections import OrderedDict
# import pickle
# import json
import datetime
import time
import logging
import os
logger = logging.getLogger(__name__)
DEFAULT_DELAY = 0.25
DEFAULT_MAX_REQUESTS = 50
DEFAULT_WRITE_CHUNK_SIZE = 10
SCHOOLS_BASE_ID = 'appJBT9a4f3b7hWQ2'
DATA_DICT_BASE_ID = 'appJBT9a4f3b7hWQ2'
# DATA_DICT_BASE_ID = 'appHMyIWgnHqVJymL'
class AirtableClient:
def __init__(
self,
api_key=None,
url_base='https://api.airtable.com/v0/'
):
self.api_key = api_key
self.url_base = url_base
if self.api_key is None:
self.api_key = os.getenv('AIRTABLE_API_KEY')
def fetch_tl_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching TL data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='TLs',
params=params
)
tl_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('teacher_id_at', record.get('id')),
('teacher_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('teacher_full_name_at', fields.get('Full Name')),
('teacher_first_name_at', fields.get('First Name')),
('teacher_middle_name_at', fields.get('Middle Name')),
('teacher_last_name_at', fields.get('Last Name')),
('teacher_title_at', fields.get('Title')),
('teacher_ethnicity_at', fields.get('Race & Ethnicity')),
('teacher_ethnicity_other_at', fields.get('Race & Ethnicity - Other')),
('teacher_income_background_at', fields.get('Income Background')),
('teacher_email_at', fields.get('Email')),
('teacher_email_2_at', fields.get('Email 2')),
('teacher_email_3_at', fields.get('Email 3')),
('teacher_phone_at', fields.get('Phone Number')),
('teacher_phone_2_at', fields.get('Phone Number 2')),
('teacher_employer_at', fields.get('Employer')),
('hub_at', fields.get('Hub')),
('pod_at', fields.get('Pod')),
('user_id_tc', fields.get('TC User ID'))
])
tl_data.append(datum)
if format == 'dataframe':
tl_data = convert_tl_data_to_df(tl_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return tl_data
def fetch_location_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching location data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Locations',
params=params
)
location_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('location_id_at', record.get('id')),
('location_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('location_address_at', fields.get('Address')),
('school_id_at', wf_core_data.utils.to_singleton(fields.get('School Name'))),
('school_location_start_at', wf_core_data.utils.to_date(fields.get('Start of time at location'))),
('school_location_end_at', wf_core_data.utils.to_date(fields.get('End of time at location')))
])
location_data.append(datum)
if format == 'dataframe':
location_data = convert_location_data_to_df(location_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return location_data
def fetch_teacher_school_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching teacher school association data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Teachers x Schools',
params=params
)
teacher_school_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('teacher_school_id_at', record.get('id')),
('teacher_school_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('teacher_id_at', fields.get('TL')),
('school_id_at', fields.get('School')),
('teacher_school_start_at', wf_core_data.utils.to_date(fields.get('Start Date'))),
('teacher_school_end_at', wf_core_data.utils.to_date(fields.get('End Date'))),
('teacher_school_active_at', wf_core_data.utils.to_boolean(fields.get('Currently Active')))
])
teacher_school_data.append(datum)
if format == 'dataframe':
teacher_school_data = convert_teacher_school_data_to_df(teacher_school_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return teacher_school_data
def fetch_school_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching school data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Schools',
params=params
)
school_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('school_id_at', record.get('id')),
('school_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('hub_id_at', fields.get('Hub')),
('pod_id_at', fields.get('Pod')),
('school_name_at', fields.get('Name')),
('school_short_name_at', fields.get('Short Name')),
('school_status_at', fields.get('School Status')),
('school_ssj_stage_at', fields.get('School Startup Stage')),
('school_governance_model_at', fields.get('Governance Model')),
('school_ages_served_at', fields.get('Ages served')),
('school_location_ids_at', fields.get('Locations')),
('school_id_tc', fields.get('TC school ID'))
])
school_data.append(datum)
if format == 'dataframe':
school_data = convert_school_data_to_df(school_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return school_data
def fetch_hub_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching hub data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Hubs',
params=params
)
hub_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('hub_id_at', record.get('id')),
('hub_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('hub_name_at', fields.get('Name'))
])
hub_data.append(datum)
if format == 'dataframe':
hub_data = convert_hub_data_to_df(hub_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return hub_data
def fetch_pod_data(
self,
pull_datetime=None,
params=None,
base_id=SCHOOLS_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching pod data from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Pods',
params=params
)
pod_data=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('pod_id_at', record.get('id')),
('pod_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('pod_name_at', fields.get('Name'))
])
pod_data.append(datum)
if format == 'dataframe':
pod_data = convert_pod_data_to_df(pod_data)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return pod_data
def fetch_ethnicity_lookup(self):
ethnicity_categories = self.fetch_ethnicity_categories()
ethnicity_mapping = self.fetch_ethnicity_mapping()
ethnicity_lookup = (
ethnicity_mapping
.join(
ethnicity_categories['ethnicity_category'],
how='left',
on='ethnicity_category_id_at'
)
.reindex(columns=[
'ethnicity_category'
])
.sort_index()
)
return ethnicity_lookup
def fetch_gender_lookup(self):
gender_categories = self.fetch_gender_categories()
gender_mapping = self.fetch_gender_mapping()
gender_lookup = (
gender_mapping
.join(
gender_categories['gender_category'],
how='left',
on='gender_category_id_at'
)
.reindex(columns=[
'gender_category'
])
.sort_index()
.sort_values('gender_category')
)
return gender_lookup
def fetch_household_income_lookup(self):
household_income_categories = self.fetch_household_income_categories()
household_income_mapping = self.fetch_household_income_mapping()
household_income_lookup = (
household_income_mapping
.join(
household_income_categories['household_income_category'],
how='left',
on='household_income_category_id_at'
)
.reindex(columns=[
'household_income_category'
])
.sort_index()
.sort_values('household_income_category')
)
return household_income_lookup
def fetch_nps_lookup(self):
nps_categories = self.fetch_nps_categories()
nps_mapping = self.fetch_nps_mapping()
nps_lookup = (
nps_mapping
.join(
nps_categories['nps_category'],
how='left',
on='nps_category_id_at'
)
.reindex(columns=[
'nps_category'
])
.sort_index()
)
return nps_lookup
def fetch_boolean_lookup(self):
boolean_categories = self.fetch_boolean_categories()
boolean_mapping = self.fetch_boolean_mapping()
boolean_lookup = (
boolean_mapping
.join(
boolean_categories['boolean_category'],
how='left',
on='boolean_category_id_at'
)
.reindex(columns=[
'boolean_category'
])
.sort_index()
.sort_values('boolean_category')
)
return boolean_lookup
def fetch_ethnicity_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching ethnicity categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Ethnicity categories',
params=params
)
ethnicity_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('ethnicity_category_id_at', record.get('id')),
('ethnicity_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('ethnicity_category', fields.get('ethnicity_category')),
('ethnicity_display_name_english', fields.get('ethnicity_display_name_english')),
('ethnicity_display_name_spanish', fields.get('ethnicity_display_name_spanish')) ])
ethnicity_categories.append(datum)
if format == 'dataframe':
ethnicity_categories = convert_ethnicity_categories_to_df(ethnicity_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return ethnicity_categories
def fetch_gender_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching gender categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Gender categories',
params=params
)
gender_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('gender_category_id_at', record.get('id')),
('gender_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('gender_category', fields.get('gender_category')),
('gender_display_name_english', fields.get('gender_display_name_english')),
('gender_display_name_spanish', fields.get('gender_display_name_spanish')) ])
gender_categories.append(datum)
if format == 'dataframe':
gender_categories = convert_gender_categories_to_df(gender_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return gender_categories
def fetch_household_income_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching household income categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Household income categories',
params=params
)
household_income_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('household_income_category_id_at', record.get('id')),
('household_income_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('household_income_category', fields.get('household_income_category')),
('household_income_display_name_english', fields.get('household_income_display_name_english')),
('household_income_display_name_spanish', fields.get('household_income_display_name_spanish')) ])
household_income_categories.append(datum)
if format == 'dataframe':
household_income_categories = convert_household_income_categories_to_df(household_income_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return household_income_categories
def fetch_nps_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching NPS categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='NPS categories',
params=params
)
nps_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('nps_category_id_at', record.get('id')),
('nps_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('nps_category', fields.get('nps_category')),
('nps_display_name_english', fields.get('nps_display_name_english')),
('nps_display_name_spanish', fields.get('nps_display_name_spanish')) ])
nps_categories.append(datum)
if format == 'dataframe':
nps_categories = convert_nps_categories_to_df(nps_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return nps_categories
def fetch_boolean_categories(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching boolean categories from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Boolean categories',
params=params
)
boolean_categories=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('boolean_category_id_at', record.get('id')),
('boolean_category_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('boolean_category', wf_core_data.utils.to_boolean(fields.get('boolean_category'))),
('boolean_display_name_english', fields.get('boolean_display_name_english')),
('boolean_display_name_spanish', fields.get('boolean_display_name_spanish')) ])
boolean_categories.append(datum)
if format == 'dataframe':
boolean_categories = convert_boolean_categories_to_df(boolean_categories)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return boolean_categories
def fetch_ethnicity_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching ethnicity mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Ethnicity mapping',
params=params
)
ethnicity_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('ethnicity_mapping_id_at', record.get('id')),
('ethnicity_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('ethnicity_response', fields.get('ethnicity_response')),
('ethnicity_category_id_at', fields.get('ethnicity_category'))
])
ethnicity_mapping.append(datum)
if format == 'dataframe':
ethnicity_mapping = convert_ethnicity_mapping_to_df(ethnicity_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return ethnicity_mapping
def fetch_gender_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching gender mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Gender mapping',
params=params
)
gender_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('gender_mapping_id_at', record.get('id')),
('gender_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('gender_response', fields.get('gender_response')),
('gender_category_id_at', fields.get('gender_category'))
])
gender_mapping.append(datum)
if format == 'dataframe':
gender_mapping = convert_gender_mapping_to_df(gender_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return gender_mapping
def fetch_household_income_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching household income mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Household income mapping',
params=params
)
household_income_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('household_income_mapping_id_at', record.get('id')),
('household_income_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('household_income_response', fields.get('household_income_response')),
('household_income_category_id_at', fields.get('household_income_category'))
])
household_income_mapping.append(datum)
if format == 'dataframe':
household_income_mapping = convert_household_income_mapping_to_df(household_income_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return household_income_mapping
def fetch_nps_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching NPS mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='NPS mapping',
params=params
)
nps_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('nps_mapping_id_at', record.get('id')),
('nps_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('nps_response', fields.get('nps_response')),
('nps_category_id_at', fields.get('nps_category'))
])
nps_mapping.append(datum)
if format == 'dataframe':
nps_mapping = convert_nps_mapping_to_df(nps_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return nps_mapping
def fetch_boolean_mapping(
self,
pull_datetime=None,
params=None,
base_id=DATA_DICT_BASE_ID,
format='dataframe',
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
pull_datetime = wf_core_data.utils.to_datetime(pull_datetime)
if pull_datetime is None:
pull_datetime = datetime.datetime.now(tz=datetime.timezone.utc)
logger.info('Fetching boolean mapping from Airtable')
records = self.bulk_get(
base_id=base_id,
endpoint='Boolean mapping',
params=params
)
boolean_mapping=list()
for record in records:
fields = record.get('fields', {})
datum = OrderedDict([
('boolean_mapping_id_at', record.get('id')),
('boolean_mapping_created_datetime_at', wf_core_data.utils.to_datetime(record.get('createdTime'))),
('pull_datetime', pull_datetime),
('boolean_response', fields.get('boolean_response')),
('boolean_category_id_at', fields.get('boolean_category'))
])
boolean_mapping.append(datum)
if format == 'dataframe':
boolean_mapping = convert_boolean_mapping_to_df(boolean_mapping)
elif format == 'list':
pass
else:
raise ValueError('Data format \'{}\' not recognized'.format(format))
return boolean_mapping
def write_dataframe(
self,
df,
base_id,
endpoint,
params=None,
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS,
write_chunk_size=DEFAULT_WRITE_CHUNK_SIZE
):
num_records = len(df)
num_chunks = (num_records // write_chunk_size) + 1
logger.info('Writing {} records in {} chunks'.format(
num_records,
num_chunks
))
for chunk_index in range(num_chunks):
start_row_index = chunk_index*write_chunk_size
end_row_index = min(
(chunk_index + 1)*write_chunk_size,
num_records
)
chunk_df = df.iloc[start_row_index:end_row_index]
chunk_list = chunk_df.to_dict(orient='records')
chunk_dict = {'records': [{'fields': row_dict} for row_dict in chunk_list]}
logger.info('Writing chunk {}: rows {} to {}'.format(
chunk_index,
start_row_index,
end_row_index
))
self.post(
base_id=base_id,
endpoint=endpoint,
data=chunk_dict
)
time.sleep(delay)
def bulk_get(
self,
base_id,
endpoint,
params=None,
delay=DEFAULT_DELAY,
max_requests=DEFAULT_MAX_REQUESTS
):
if params is None:
params = dict()
num_requests = 0
records = list()
while True:
data = self.get(
base_id=base_id,
endpoint=endpoint,
params=params
)
if 'records' in data.keys():
logging.info('Returned {} records'.format(len(data.get('records'))))
records.extend(data.get('records'))
num_requests += 1
if num_requests >= max_requests:
logger.warning('Reached maximum number of requests ({}). Terminating.'.format(
max_requests
))
break
offset = data.get('offset')
if offset is None:
break
params['offset'] = offset
time.sleep(delay)
return records
def post(
self,
base_id,
endpoint,
data
):
headers = dict()
if self.api_key is not None:
headers['Authorization'] = 'Bearer {}'.format(self.api_key)
r = requests.post(
'{}{}/{}'.format(
self.url_base,
base_id,
endpoint
),
headers=headers,
json=data
)
if r.status_code != 200:
error_message = 'Airtable POST request returned status code {}'.format(r.status_code)
r.raise_for_status()
return r.json()
def get(
self,
base_id,
endpoint,
params=None
):
headers = dict()
if self.api_key is not None:
headers['Authorization'] = 'Bearer {}'.format(self.api_key)
r = requests.get(
'{}{}/{}'.format(
self.url_base,
base_id,
endpoint
),
params=params,
headers=headers
)
if r.status_code != 200:
error_message = 'Airtable GET request returned status code {}'.format(r.status_code)
r.raise_for_status()
return r.json()
def convert_tl_data_to_df(tl_data):
if len(tl_data) == 0:
return pd.DataFrame()
tl_data_df = pd.DataFrame(
tl_data,
dtype='object'
)
tl_data_df['pull_datetime'] = pd.to_datetime(tl_data_df['pull_datetime'])
tl_data_df['teacher_created_datetime_at'] = pd.to_datetime(tl_data_df['teacher_created_datetime_at'])
# school_data_df['user_id_tc'] = pd.to_numeric(tl_data_df['user_id_tc']).astype('Int64')
tl_data_df = tl_data_df.astype({
'teacher_full_name_at': 'string',
'teacher_middle_name_at': 'string',
'teacher_last_name_at': 'string',
'teacher_title_at': 'string',
'teacher_ethnicity_at': 'string',
'teacher_ethnicity_other_at': 'string',
'teacher_income_background_at': 'string',
'teacher_email_at': 'string',
'teacher_email_2_at': 'string',
'teacher_email_3_at': 'string',
'teacher_phone_at': 'string',
'teacher_phone_2_at': 'string',
'teacher_employer_at': 'string',
'hub_at': 'string',
'pod_at': 'string',
'user_id_tc': 'string'
})
tl_data_df.set_index('teacher_id_at', inplace=True)
return tl_data_df
def convert_location_data_to_df(location_data):
if len(location_data) == 0:
return pd.DataFrame()
location_data_df = pd.DataFrame(
location_data,
dtype='object'
)
location_data_df['pull_datetime'] = pd.to_datetime(location_data_df['pull_datetime'])
location_data_df['location_created_datetime_at'] = pd.to_datetime(location_data_df['location_created_datetime_at'])
location_data_df = location_data_df.astype({
'location_id_at': 'string',
'location_address_at': 'string',
'school_id_at': 'string'
})
location_data_df.set_index('location_id_at', inplace=True)
return location_data_df
def convert_teacher_school_data_to_df(teacher_school_data):
if len(teacher_school_data) == 0:
return pd.DataFrame()
teacher_school_data_df = pd.DataFrame(
teacher_school_data,
dtype='object'
)
teacher_school_data_df['pull_datetime'] = pd.to_datetime(teacher_school_data_df['pull_datetime'])
teacher_school_data_df['teacher_school_created_datetime_at'] = pd.to_datetime(teacher_school_data_df['teacher_school_created_datetime_at'])
teacher_school_data_df = teacher_school_data_df.astype({
'teacher_school_active_at': 'bool'
})
teacher_school_data_df.set_index('teacher_school_id_at', inplace=True)
return teacher_school_data_df
def convert_school_data_to_df(school_data):
if len(school_data) == 0:
return pd.DataFrame()
school_data_df = pd.DataFrame(
school_data,
dtype='object'
)
school_data_df['pull_datetime'] = pd.to_datetime(school_data_df['pull_datetime'])
school_data_df['school_created_datetime_at'] = pd.to_datetime(school_data_df['school_created_datetime_at'])
school_data_df['hub_id_at'] = school_data_df['hub_id_at'].apply(wf_core_data.utils.to_singleton)
school_data_df['pod_id_at'] = school_data_df['pod_id_at'].apply(wf_core_data.utils.to_singleton)
school_data_df['school_id_tc'] = pd.to_numeric(school_data_df['school_id_tc']).astype('Int64')
school_data_df = school_data_df.astype({
'school_id_at': 'string',
'hub_id_at': 'string',
'pod_id_at': 'string',
'school_name_at': 'string',
'school_short_name_at': 'string',
'school_status_at': 'string',
'school_ssj_stage_at': 'string',
'school_governance_model_at': 'string',
})
school_data_df.set_index('school_id_at', inplace=True)
return school_data_df
def convert_hub_data_to_df(hub_data):
if len(hub_data) == 0:
return pd.DataFrame()
hub_data_df = pd.DataFrame(
hub_data,
dtype='object'
)
hub_data_df['pull_datetime'] = | pd.to_datetime(hub_data_df['pull_datetime']) | pandas.to_datetime |
"""
Tasks for the serving pipeline
"""
from pathlib import Path
import pickle
import pandas as pd
from sklearn import datasets
def get(product, sample):
"""Get input data to make predictions
"""
Path(str(product)).parent.mkdir(parents=True, exist_ok=True)
d = datasets.load_iris()
df = | pd.DataFrame(d['data']) | pandas.DataFrame |
from copy import copy
from pandas import DataFrame, concat, notnull, Series
from typing import List, Optional
from survey.attributes import RespondentAttribute
class AttributeContainerMixin(object):
_attributes: List[RespondentAttribute]
@property
def data(self) -> DataFrame:
"""
Return a DataFrame combining data from all the questions in the group.
"""
return concat([a.data for a in self._attributes], axis=1)
def attribute(self, name: str) -> Optional[RespondentAttribute]:
"""
Return the Attribute with the given name.
:param name: Name of the attribute to return.
"""
try:
return [a for a in self._attributes if a.name == name][0]
except IndexError:
return None
def to_list(self) -> List[RespondentAttribute]:
"""
Return all the Attributes asked in the Survey.
"""
return self._attributes
def merge(self, name: Optional[str] = '', **kwargs) -> RespondentAttribute:
"""
Return a new Question combining all the responses of the different
questions in the group.
N.B. assumes that there is a maximum of one response across all
questions for each respondent.
:param name: The name for the new merged Question.
:param kwargs: Attribute values to override in the new merged Question.
"""
if len(set([type(q) for q in self._attributes])) != 1:
raise TypeError(
'Questions must all be of the same type to merge answers.'
)
if self.data.notnull().sum(axis=1).max() > 1:
raise ValueError(
'Can only merge when there is a max of one response '
'across all questions per respondent.'
)
data = self.data.loc[self.data.notnull().sum(axis=1) == 1]
new_data = [row.loc[ | notnull(row) | pandas.notnull |
import requests
import os
import pandas as pd
from flask import Flask, render_template, request, redirect
#from bokeh.plotting import figure
#from bokeh.embed import components
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import dill
import spotipy.util as util
import spotipy.oauth2 as oauth2
import configparser
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import NearestNeighbors
app = Flask(__name__)
def save_pkl(df, filename):
with open('static/data/'+filename+'.pkl','wb') as fobj:
dill.dump(df,fobj)
def load_pkl(filename):
with open('static/data/'+filename+'.pkl','rb') as fobj:
df = dill.load(fobj)
return df
def load_credentials():
config = configparser.ConfigParser()
config.read('static/keys/tam_creds.nogit')
client_id = config.get('SPOTIFY', 'CLIENT_ID')
client_secret = config.get('SPOTIFY', 'CLIENT_SECRET')
# auth = oauth2.SpotifyClientCredentials(
# client_id=client_id,
# client_secret=client_secret
# )
return client_id, client_secret
def get_token(username,client_id,client_secret):
scope = 'playlist-modify-public'
token = util.prompt_for_user_token(username,scope,client_id=client_id,client_secret=client_secret,redirect_uri='theanythingmixtape://returnafterlogin')
return token
def create_spotify_playlist(token,username,playlist_name,playlist_description,playlist_ids):
if token:
sp = spotipy.Spotify(auth=token)
sp.trace = False
playlist = sp.user_playlist_create(username, playlist_name, public=True, description=playlist_description)
# pprint.pprint(playlist)
_ = sp.user_playlist_add_tracks(username, playlist['id'], playlist_ids)
# print(results)
return playlist['id']
else:
print("Can't get token for", username)
def spotify_from_trackids(username, playlist_name, playlist_description, playlist_ids):
client_id, client_secret = load_credentials()
token = get_token(username,client_id,client_secret)
playlist_id = create_spotify_playlist(token,username,playlist_name,playlist_description,playlist_ids)
return playlist_id
def classify_text(model, text):
print(text)
print(model.predict_proba(text))
best_genre = model.predict(text)
print(best_genre)
return best_genre
def create_playlist(genre, data, input_text, vocab, num_tracks=10):
genre_data = data.loc[data['genre']==genre[0]] #Select genre tracks
tfidf_matrix = vectorize_it(genre_data, input_text, vocab)
indices = get_closest_indices(tfidf_matrix,num_tracks)
track_list = fetch_track_ids(genre_data,indices)
return track_list
def vectorize_it(genre_data, input_text, vocab):
input_df = | pd.DataFrame(data=[['', input_text, '', '']], columns=['genre', 'lyrics', 'orig_index', 'track_id']) | pandas.DataFrame |
"""
Python module to do secondary preprocessing
Creates processed_train and processed_test .csv files
"""
import pandas as pd
import numpy as np
from datetime import datetime
from dateutil.parser import parse
import os
def feature_engineering(df):
"""
Function to calcualte debt-to-income
"""
df['dti'] = df['installment'] / (df['annual_inc'] / 12)
def convert_date_to_num(date_col):
"""
Function to convert date variables to number format
"""
return date_col.apply(lambda x: (parse(x) - datetime(1900, 1, 1)).days)
def data_preprocessing(df, ohe=False):
"""
Function for data pre-processing
The parameter ohe lets the user choose whether to do one-hot-encoding or transform those variables to categoricals
returns processed DataFrame
"""
df_new = df.copy()
feature_engineering(df_new)
# Columns to drop
cols_to_drop = ['emp_title', 'zip_code', 'application_type', 'desc', 'funded_amnt', 'funded_amnt_inv', 'grade',
'pymnt_plan', 'title', 'issue_d', ]
# Drop columns
df_new.drop(labels=cols_to_drop, axis=1, inplace=True)
# Transform date column to int
df_new['earliest_cr_line'] = convert_date_to_num(df_new['earliest_cr_line'])
# Clean employment length feature
df_new['emp_length'].replace('10+ years', '10 years', inplace=True)
df_new['emp_length'].replace('< 1 year', '0 years', inplace=True)
df_new['emp_length'].replace('n/a', np.nan, inplace=True)
df_new['emp_length'] = df_new['emp_length'].apply(lambda x: x if pd.isnull(x) else np.int8(x.split()[0]))
# Clean home ownership feature
df_new['home_ownership'].replace(to_replace=['NONE', 'ANY'], value='OTHER', inplace=True)
cat_cols = df_new.select_dtypes(include=['object']).columns
# Performs ohe or transforming to categoricals
if ohe:
dummies = | pd.get_dummies(df_new[cat_cols]) | pandas.get_dummies |
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
import re
import ast
import os
import sys
from urllib.request import urlopen
from datetime import datetime, timedelta, date
from traceback import format_exc
import json
import math
import urllib.error
from urllib.parse import quote
import time
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException,StaleElementReferenceException
import pandas as pd
import platform
js = """
<script language="javascript" type="text/javascript">
<!--
function MM_reloadPage(init) { //reloads the window if Nav4 resized
if (init==true) with (navigator) {if ((appName=="Netscape")&&(parseInt(appVersion)==4)) {
document.MM_pgW=innerWidth; document.MM_pgH=innerHeight; onresize=MM_reloadPage; }}
else if (innerWidth!=document.MM_pgW || innerHeight!=document.MM_pgH) location.reload();
}
MM_reloadPage(true);
//-->
</script>
<link href="/wbi.css" rel="stylesheet" type="text/css"/>
"""
caption = """
<caption="특별조사기일 style="display:inline !important; visibility:visible !important; width:1px; height:1px; font-size:0px; overflow:hidden; line-height:0; " 공고"="" 관계인집회기일="" 및="" 제2,3회="">
</caption="특별조사기일><table border="0" cellpadding="0" cellspacing="0" height="100%" width="100%">
"""
str1 = """<td height="33" style="padding-left:20px"><img alt="로고" src="/img/hpgonggo/logo_scourt.gif"/></td>"""
str2 = """<td height="27"><img alt="종료" border="0" onclick="window.close();" src="/img/hpgonggo/btn_close.gif" style="cursor:hand"/><img alt="공백" height="10" src="/img/hpgonggo/blank.gif" width="10"/></td>"""
class RescueCrawler:
def __init__(self, term=1):
self.start_date = datetime.today() - timedelta(1)
self.start_date = self.start_date.strftime("%Y.%m.%d")
term = -1 * term
self.end_date = date.today() + timedelta(weeks=term)
self.end_date = self.end_date.strftime("%Y.%m.%d")
self.path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.rescue_url = 'http://safind.scourt.go.kr/sf/hpbigonggo/whp_gonggo.jsp?org_bub_nm=&theme=#'
self.naver_news = 'https://search.naver.com/search.naver?sm=top_hty&fbm=1&ie=utf8&query='
self.naver_news_content = 'https://search.naver.com/search.naver?&where=news&query={}&start=1&sort=sim&field=0&pd=6'
self.options = webdriver.ChromeOptions()
self.options.add_argument('headless')
self.options.add_argument('window-size=1920x1080')
self.options.add_argument("disable-gpu")
self.options.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
if os.path.exists(self.path + '/task_module/backup/') == False:
print('backup 생성')
os.mkdir(self.path + '/task_module/backup')
if os.path.exists(self.path + '/task_module/backup/rescue/') == False:
os.mkdir(self.path + '/task_module/backup/rescue')
print("collect rescue case {} weeks ago".format(term), self.path)
def get_content (self, driver,area,start_date,end_date) :
final_result = pd.DataFrame()
#for area in area_list :
print(area)
driver.get(self.rescue_url)
driver.implicitly_wait(5)
select = Select(driver.find_element_by_xpath("//select[@id='sel_map']"))
select.select_by_visible_text('법인회생')
driver.implicitly_wait(3)
select = Select(driver.find_element_by_xpath("//select[@id='area']"))
select.select_by_visible_text(area)
driver.implicitly_wait(3)
driver.find_element_by_xpath('//*[@id="contants"]/div[2]/div[18]/a').click()
driver.implicitly_wait(5)
temp = self.get_info(driver,area,start_date,end_date)
print(len(temp))
final_result = final_result.append(temp, ignore_index=True)
return final_result
def get_info(self, driver,area,start_date,end_date):
area = area
last_date = start_date
info = []
i,j = 0,0
while last_date > end_date:
i = i+1
driver.implicitly_wait(3)
try:
driver.find_element_by_xpath('/html/body/div/div[4]/a['+str(i)+']').click()
j = j+1
if j == 11 :
i,j = 2,1
except NoSuchElementException:
last_date = end_date
else:
driver.implicitly_wait(3)
html = driver.page_source ## 페이지의 elements모두 가져오기
soup = BeautifulSoup(html, 'html.parser') ## BeautifulSoup사용하기
contents = soup.select('body > div > table > tbody > tr ')
k = 1
for content in contents:
date = content.find_all("td")[3].text
if date > start_date:
k = k+1
else:
case_num = content.find_all("td")[0].text
court = content.find_all("td")[1].text
company = content.find_all("td")[2].text
subject = content.find_all("td")[4].text
subject = re.sub('[\n\t]', '', subject).strip()
driver.find_element_by_xpath('/html/body/div/table/tbody/tr['+str(k)+']/td[6]/a').click()
driver.switch_to_window(driver.window_handles[1])
time.sleep(1)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
sub_info = soup.select('font > p')
if len(sub_info) == 2 :
address = sub_info[0].text
# ceo = sub_info[1].text
elif len(sub_info) == 1:
address = sub_info[0].text
# ceo = 'none'
else :
address = 'none'
# ceo = 'none'
if(date < end_date):
last_date = date
break
else :
info.append({'area':area,'case_num' : case_num,'court' : court,'company' :company,\
'date':date ,'subject' :subject,'sub_info':sub_info,'html':soup, 'address':address})
driver.switch_to_window(driver.window_handles[0])
k = k+1
dataframe = pd.DataFrame(info)
#driver.close()
return dataframe
def get_news_content_keyword(self, final_result):
naver_news_address = []
for i in range(0,len(final_result)):
comapny = final_result['company'][i]
company_name = comapny.replace("주식회사","").replace("(주)","").replace("분할존속회사","").replace("분할신설회사","").strip()
search_keyword = company_name + ' 회생 ' + '"'+company_name+'"' + ' "회생"'
encode_search_keyword = urllib.parse.quote(search_keyword) ## 입력한 키워드를 url에 넣을 수 있도록 parsing 변환
url = self.naver_news_content.format(encode_search_keyword)
req = requests.get(url) ## HTTP GET Request
html = req.text
soup = BeautifulSoup(html, 'html.parser')
my_titles_sub = soup.select('dl > dt > a')
for title in my_titles_sub:
if title.get('href') != '#':
# html = '<a href = "{}" target="_blank" > {} </a> '.format(title.get('href'),title.get('title'))
news_title = title.get('title')
news_url = title.get('href')
naver_news_address.append({'company': final_result['company'][i],
'news_title': news_title, 'news_url':news_url}) #url 가져오기
if len(naver_news_address) > 0 :
break
return naver_news_address
def get_company_keyword(self, rescue):
name=[]
category=[]
detailed_info=[]
for i in range(len(rescue)):
encode_search_keyword = urllib.parse.quote(rescue['company'][i]) ## 입력한 키워드를 url에 넣을 수 있도록 parsing 변환
url = self.naver_news + encode_search_keyword
req = requests.get(url) ## HTTP GET Request
html = req.content
soup = BeautifulSoup(html, 'html.parser')
try:
if soup.find('div',class_ = 'sp_company sc _au_company_search _au_company_answer'):
item_list=[]
info=[]
temp1=soup.find('div',class_ = 'sp_company sc _au_company_search _au_company_answer')
category.append(temp1.find('div', class_ = 'company_intro _ellipsis _detail').text.strip())
temp3=temp1.find_all('span', class_ = 'tit ',text=True)
temp4=temp1.find_all('span', class_ = 'txt_info ')
for j in range(len(temp3)-1):
item_list.append(temp3[j].text.strip())
for j in range(len(temp3)-1):
info.append(temp4[j].text.strip())
detailed_info.append(dict(zip(item_list, info)))
else:
temp2=soup.find('div',class_ = 'sp_company sc _au_company_search _company_same_name')
d1=[]
d2=[]
if temp2.find('span', class_ = 'sub_tit'):
for k in range(len(temp2.find_all('span', class_ = 'sub_tit'))):
d1.append(temp2.find_all('span', class_ = 'sub_tit')[k].text.strip())
d2.append(temp2.find_all('div', class_ = 'item_txt')[k].text.strip())
df=pd.DataFrame({'기업분류':d1,'세부정보':d2})
df_new=df.loc[pd.Series(df['세부정보']).str.contains(rescue['ceo'][i])]['기업분류']
df_new=df_new.reset_index(drop=True)
category.append(df_new[0])
detailed_info.append('None')
else:
for kk in range(len(temp2.find_all('span', class_ = 'info_sub'))):
d1.append(temp2.find_all('span', class_ = 'info_sub')[kk].text.strip())
d2.append(temp2.find_all('span', class_ = 'info_txt')[kk].text.strip())
df=pd.DataFrame({'기업분류':d1,'세부정보':d2})
df_new=df.loc[ | pd.Series(df['세부정보']) | pandas.Series |
import tensorflow as tf
import pandas as pd
import pickle
def predict_model(crim, zn, indus, chas,
nox, rm, age, dis, rad,
tax, ptratio, black, lstat):
# Import variable
scaler_x = pickle.load(open('./saved_model/scaler_x.pickle', 'rb'))
scaler_y = pickle.load(open('./saved_model/scaler_y.pickle', 'rb'))
model = tf.keras.models.load_model('./saved_model/model.h5')
# Make data
data = {
'CRIM': [crim],
'ZN': [zn],
'INDUS': [indus],
'CHAS': [chas],
'NOX': [nox],
'RM': [rm],
'AGE': [age],
'DIS': [dis],
'RAD': [rad],
'TAX': [tax],
'PTRATIO': [ptratio],
'B': [black],
'LSTAT': [lstat]
}
predict_df = | pd.DataFrame(data=data) | pandas.DataFrame |
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, confusion_matrix, classification_report, roc_curve, auc
import xgboost as xgb
from pandas import DataFrame, concat
from preprocess_helper_functions import *
from sklearn.model_selection import StratifiedShuffleSplit
import os
def pr(y_i, y, x):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
def get_model(x, y):
r = np.log(pr(1,y, x) / pr(0,y,x))
m = LogisticRegression(C=4, dual=True)
x_nb = x.multiply(r)
return m.fit(x_nb, y), r
def call_NB_SVM_algorithm(X_train, y_train, X_test, y_test):
m,r = get_model(X_train, y_train)
p_test = m.predict_proba(X_test.multiply(r))[:,1]
npround = np.vectorize(round)
p_test_ints = npround(p_test)
f1score = f1_score(y_test, p_test_ints)
logloss = log_loss(list(y_test), list(p_test_ints.astype(int)))
accuracy = accuracy_score(y_test, p_test_ints)
return DataFrame({'pred': p_test, 'truth': y_test})
#----------------------------------------------------------------------------------------
#Logistic Regression
def call_logreg_algorithm(X_train, y_train, X_test, y_test):
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
p_test = logreg.predict_proba(X_test)[:,1]
npround = np.vectorize(round)
p_test_ints = npround(p_test)
f1score = f1_score(y_test, p_test_ints)
#print("F1-score: ", f1score)
logloss = log_loss(list(y_test), list(p_test_ints.astype(int)))
#print("Log Loss: ", logloss)
accuracy = accuracy_score(y_test, p_test_ints)
#print("Accuracy: ", accuracy)
return DataFrame({'pred': p_test, 'truth': y_test})
#----------------------------------------------------------------------------------------
#XGBoost
def call_xgboost_algorithm(xgb, vectorizer, X_train, y_train, X_test, y_test):
#Training on XGBoost
d_train = xgb.DMatrix(X_train, label=y_train)
#Set our parameters for xgboost
params = {}
num_round = 500
params['objective'] = 'binary:logistic'
params['eval_metric'] = ['logloss']
params['eta'] = 0.01 #Learning rate
params['max_depth'] = 6 #Depth of the tree. Default is 6.
params['colsample_bytree'] = 0.8
bst = xgb.train(params, d_train, num_round, verbose_eval= True)
d_test = xgb.DMatrix(X_test, label=y_test)
p_test = bst.predict(d_test)
npround = np.vectorize(round)
p_test_ints = npround(p_test)
f1score = f1_score(y_test, p_test_ints)
#print("F1-score: ", f1score)
logloss = log_loss(list(y_test), list(p_test_ints.astype(int)))
#print("Log Loss: ", logloss)
accuracy = accuracy_score(y_test, p_test_ints)
#print("Accuracy: ", accuracy)
return | DataFrame({'pred': p_test, 'truth': y_test}) | pandas.DataFrame |
import numpy as np
import pandas as pd
# Crime data is collected into two separate csv files. The first contains
# 40 years of data by state, and 10 years (in 10 xls files) by city
# data in this csv contains estimates in instances of no reporting
df = pd.read_csv(
"http://s3-us-gov-west-1.amazonaws.com/cg-d4b776d0-d898-4153-90c8-8336f86bdfec/estimated_crimes_1979_2018.csv")
# replace null values with 'US'
df['state_abbr'] = df['state_abbr'].replace(np.nan, 'US')
# add violent crime rate (vcr) and property crime rate (pcr) to dataframe
df['vcr'] = df['violent_crime'] / df['population']
df['pcr'] = df['property_crime'] / df['population']
# initialize a new dataframe for exporting
sand = pd.DataFrame(index=None)
sand['state'] = df['state_abbr']
sand['year'] = df['year']
sand['vcr'] = df['vcr']
sand['pcr'] = df['pcr']
# export to csv
sand.to_csv('./crime_data/state_crime.csv', index=False)
# read in xls files, skipping the headers and footers
xl2018 = pd.read_excel(
'./crime_data/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2018.xls',
skiprows=3,
skipfooter=10)
xl2017 = pd.read_excel(
'./crime_data/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2017.xls',
skiprows=3,
skipfooter=10)
xl2016 = pd.read_excel(
'./crime_data/Table_6_Offenses_Known_to_Law_Enforcement_by_State_by_City_2016.xls',
skiprows=3,
skipfooter=11)
xl2015 = pd.read_excel(
'./crime_data/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2015.xls',
skiprows=3,
skipfooter=10)
xl2014 = pd.read_excel('./crime_data/table-8.xls', skiprows=3, skipfooter=17)
xl2013 = pd.read_excel(
'./crime_data/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2013.xls',
skiprows=3,
skipfooter=10)
xl2012 = pd.read_excel(
'./crime_data/Table_8_Offenses_Known_to_Law_Enforcement_by_State_by_City_2012.xls',
skiprows=3,
skipfooter=7)
xl2011 = pd.read_excel(
'./crime_data/table_8_offenses_known_to_law_enforcement_by_state_by_city_2011.xls',
skiprows=3,
skipfooter=7)
xl2010 = pd.read_excel('./crime_data/10tbl08.xls', skiprows=3, skipfooter=7)
xl2009 = pd.read_excel('./crime_data/09tbl08.xls', skiprows=3, skipfooter=7)
# build a function to automatically clean the results and add to a new DF for
# import to database
def cleaner(x, year):
"""
Takes a dataframe, changes state abbreviations, changes state NaNs,
calculates violent crime and property crime rate and returns it as
a new DataFrame (city_st, vcr, pcr) for the year passed in
"""
# create new dataframe
df = pd.DataFrame(columns=['city', 'vcr_' + year, 'pcr_' + year])
# clean numbers from state column and put into new df
df['city'] = x['State'].str.replace(r'\d+', '')
# clean numbers from city column
x['City'] = x['City'].str.replace(r'\d+', '')
# clean column names
if 'Violent\ncrime' in x.columns:
x = x.rename(columns={'Violent\ncrime': 'Violent crime',
'Property\ncrime': 'Property crime'})
# remove null values from column
if x['City'].isnull().sum() >= 1:
x['City'] = x['City'].replace(np.nan, 'None')
# replace states with abbreviations
df['city'] = df['city'].replace({"ALABAMA": "AL", "ALASKA": "AK", "ARIZONA": "AZ",
"ARKANSAS": "AK", "CALIFORNIA": "CA",
"COLORADO": "CO", "CONNECTICUT": "CT",
"DELAWARE": "DE", "DISTRICT OF COLUMBIA": "DC",
"FLORIDA": "FL", "GEORGIA": "GA", "HAWAII": "HI",
"IDAHO": "ID", "ILLINOIS": "IL", "INDIANA": "IN",
"IOWA": "IA", "KANSAS": "KS", "KENTUCKY": "KY",
"LOUISIANA": "LA", "MAINE": "ME", "MARYLAND": "MD",
"MASSACHUSETTS": "MA", "MICHIGAN": "MI",
"MINNESOTA": "MN", "MISSISSIPPI": "MS",
"MISSOURI": "MI", "MONTANA": "MT", "NEBRASKA": "NE",
"NEVADA": "NV", "NEW HAMPSHIRE": "NH",
"NEW JERSEY": "NJ", "NEW MEXICO": "NM",
"NEW YORK": "NY", "NORTH CAROLINA": "NC",
"NORTH DAKOTA": "ND", "OHIO": "OH",
"OKLAHOMA": "OK", "OREGON": "OR",
"PENNSYLVANIA": "PA", "RHODE ISLAND": "RI",
"SOUTH CAROLINA": "SC", "SOUTH DAKOTA": "SD",
"TENNESSEE": "TN", "TEXAS": "TX", "UTAH": "UT",
"VERMONT": "VT", "VIRGINIA": "VA",
"WASHINGTON": "WA", "WEST VIRGINIA": "WV",
"WISCONSIN": "WI", "WYOMING": "WY"})
# iterate through dataframe, replacing nan values with proper state abbr.
state = ""
for i in range(len(df)):
if pd.notnull(df.at[i, 'city']):
if df.at[i, 'city'] != state:
state = df.at[i, 'city']
elif pd.isnull(df.at[i, 'city']):
df.at[i, 'city'] = state
# populate city column 'city, ST'
for i in range(len(df['city'])):
df['city'][i] = x['City'][i] + ", " + df['city'][i]
# populate violent crime rate column
df['vcr_' + year][i] = x['Violent crime'][i] / x['Population'][i]
# populate property crime rate column
df['pcr_' + year][i] = x['Property crime'][i] / x['Population'][i]
# set the index for later concatenation
df.set_index('city')
return df
cl18 = cleaner(xl2018, '2018')
cl17 = cleaner(xl2017, '2017')
cl16 = cleaner(xl2016, '2016')
cl15 = cleaner(xl2015, '2015')
cl14 = cleaner(xl2014, '2014')
cl13 = cleaner(xl2013, '2013')
cl12 = cleaner(xl2012, '2012')
cl11 = cleaner(xl2011, '2011')
cl10 = cleaner(xl2010, '2010')
cl09 = cleaner(xl2009, '2009')
# merge the dataframes
masta = pd.merge(cl18, cl17, how='outer', on='city')
masta2 = pd.merge(cl16, cl15, how='outer', on='city')
masta3 = pd.merge(cl14, cl13, how='outer', on='city')
masta4 = pd.merge(cl12, cl11, how='outer', on='city')
masta5 = pd.merge(cl10, cl09, how='outer', on='city')
master = | pd.merge(masta, masta2, how='outer', on='city') | pandas.merge |
import scipy
import numpy
import pandas
import os
import isatools.isatab as isatab
import json
import inspect
import re
from ..enumerations import VariableType, DatasetLevel, SampleType, AssayRole
from ..utilities.generic import removeDuplicateColumns
from .._toolboxPath import toolboxPath
from datetime import datetime
import copy
from ..utilities import removeDuplicateColumns
from ..utilities import normalisation
from ..utilities.normalisation._normaliserABC import Normaliser
import warnings
class Dataset:
"""
Base class for nPYc dataset objects.
:param str sop: Load configuration parameters from specified SOP JSON file
:param sopPath: By default SOPs are loaded from the :file:`nPYc/StudyDesigns/SOP/` directory, if not ``None`` the directory specified in *sopPath=* will be searched before the builtin SOP directory.
"""
"""
Default timestamp format is :rfc:`3339`
"""
_timestampFormat = '%Y-%m-%dT%H:%M:%S'
def __init__(self, sop='Generic', sopPath=None, **kwargs):
"""
Bare constructor.
"""
from .. import __version__
self._intensityData = numpy.array(None)
self.featureMetadata = pandas.DataFrame(None, columns=['Feature Name'])
"""
:math:`m` × :math:`q` pandas dataframe of feature identifiers and metadata
The featureMetadata table can include any datatype that can be placed in a pandas cell, However the toolbox assumes certain prerequisites on the following columns in order to function:
================ ========================================= ============
Column dtype Usage
================ ========================================= ============
Feature Name str or float ID of the :term:`feature` measured in this column. Each 'Feature Name' must be unique in the table. If 'Feature Name' is numeric, the columns should be sorted in ascending or descending order.
================ ========================================= ============
"""
self.sampleMetadata = pandas.DataFrame(None,
columns=['Sample ID', 'AssayRole', 'SampleType', 'Sample File Name',
'Sample Base Name', 'Dilution', 'Batch', 'Correction Batch',
'Acquired Time', 'Run Order', 'Exclusion Details', 'Metadata Available'])
"""
:math:`n` × :math:`p` dataframe of sample identifiers and metadata.
The sampleMetadata table can include any datatype that can be placed in a pandas cell, However the toolbox assumes certain prerequisites on the following columns in order to function:
================== ========================================= ============
Column dtype Usage
================== ========================================= ============
Sample ID str ID of the :term:`sampling event` generating this sample
AssayRole :py:class:`~nPYc.enumerations.AssayRole` Defines the role of this assay
SampleType :py:class:`~nPYc.enumerations.SampleType` Defines the type of sample acquired
Sample File Name str :term:`Unique file name<Sample File Name>` for the analytical data
Sample Base Name str :term:`Common identifier<Sample Base Name>` that links analytical data to the *Sample ID*
Dilution float Where *AssayRole* is :py:attr:`~nPYc.enumerations.AssayRole.LinearityReference`, the expected abundance is indicated here
Batch int Acquisition batch
Correction Batch int When detecting and correcting for :term:`batch<Batch Effects>` and :term:`Run-Order<Run-Order Effects>` effects, run-order effects are characterised within samples sharing the same *Correction Batch*, while batch effects are detected between distinct values
Acquired Time datetime.datetime Date and time of acquisition of raw data
Run order int Order of sample acquisition
Exclusion Details str Details of reasoning if marked for exclusion
Metadata Available bool Records which samples had metadata provided with the .addSampleInfo() method
================== ========================================= ============
"""
self.featureMask = numpy.array(None, dtype=bool)
""":math:`m` element vector, with ``True`` representing features to be included in analysis, and ``False`` those to be excluded"""
self.sampleMask = numpy.array(None, dtype=bool)
""":math:`p` element vector, with ``True`` representing samples to be included in analysis, and ``False`` those to be excluded"""
self.Attributes = dict()
"""
Dictionary of object configuration attributes, including those loaded from :doc:`SOP files<configuration/builtinSOPs>`.
Defined attributes are as follows\:
================ ========================================= ============
Key dtype Usage
================ ========================================= ============
'dpi' positive int Raster resolution when plotting figures
'figureSize' positive (float, float) Size to plot figures
'figureFormat' str Format to save figures in
'histBins' positive int Number of bins to use when drawing histograms
'Feature Names' Column in :py:attr:`featureMetadata` ID of the primary feature name
================ ========================================= ============
"""
self.VariableType = None
self.AnalyticalPlatform = None
""":py:class:`~nPYc.enumerations.VariableType` enum specifying the type of data represented."""
self.Attributes['Log'] = list()
self.Attributes['Log'].append([datetime.now(), 'nPYc Toolbox version %s.' % (__version__)])
self._loadParameters(sop, sopPath)
self._Normalisation = normalisation.NullNormaliser()
# Allow SOP-loaded attributes to be overriden by kwargs
self.Attributes = {**self.Attributes, **kwargs}
self._name = self.__class__.__name__
@property
def intensityData(self):
"""
:math:`n` × :math:`m` numpy matrix of measurements
"""
return self.Normalisation.normalise(self._intensityData)
@intensityData.setter
def intensityData(self, X: numpy.ndarray):
self._intensityData = X
@property
def noSamples(self) -> int:
"""
:return: Number of samples in the dataset (*n*)
:rtype: int
"""
try:
(noSamples, noFeatures) = self._intensityData.shape
except:
noSamples = 0
return noSamples
@property
def noFeatures(self) -> int:
"""
:return: Number of features in the dataset (*m*)
:rtype: int
"""
try:
(noSamples, noFeatures) = self._intensityData.shape
except:
noFeatures = 0
return noFeatures
@property
def log(self) -> str:
"""
Return log entries as a string.
"""
output = ""
for (timestamp, item) in self.Attributes['Log']:
output = output + timestamp.strftime(self._timestampFormat)
output = output + "\t"
output = output + item
output = output + "\n"
return output
@property
def name(self) -> str:
"""
Returns or sets the name of the dataset. *name* must be a string
"""
return self._name
@name.setter
def name(self, value: str):
"""
Validates *value* is valid for filenames
"""
if not isinstance(value, str):
raise TypeError('Name must be a string.')
self._name = value.strip()
@property
def Normalisation(self):
"""
:py:class:`~nPYc.utilities.normalisation._normaliserABC.Normaliser` object that transforms the measurements in :py:attr:`intensityData`.
"""
return self._Normalisation
@Normalisation.setter
def Normalisation(self, normaliser):
if not isinstance(normaliser, Normaliser):
raise TypeError('Normalisation must implement the Normaliser ABC!')
else:
self._Normalisation = normaliser
def __repr__(self):
"""
Customise printing of instance description.
"""
return "<%s instance at %s, named %s, with %d samples, %d features>" % (
self.__class__.__name__, id(self), self.name, self.noSamples, self.noFeatures)
def validateObject(self, verbose=True, raiseError=False, raiseWarning=True):
"""
Checks that all the attributes specified in the class definition are present and of the required class and/or values.
Checks for attributes existence and type.
Check for mandatory columns existence, but does not check the column values (type or uniqueness).
If 'sampleMetadataExcluded', 'intensityDataExcluded', 'featureMetadataExcluded' or 'excludedFlag' exist, the existence and number of exclusions (based on 'sampleMetadataExcluded') is checked
:param verbose: if True the result of each check is printed (default True)
:type verbose: bool
:param raiseError: if True an error is raised when a check fails and the validation is interrupted (default False)
:type raiseError: bool
:param raiseWarning: if True a warning is raised when a check fails
:type raiseWarning: bool
:return: True if the Object conforms to basic :py:class:`Dataset`
:rtype: bool
:raises TypeError: if the Object class is wrong
:raises AttributeError: if self.Attributes does not exist
:raises TypeError: if self.Attributes is not a dict
:raises AttributeError: if self.Attributes['Log'] does not exist
:raises TypeError: if self.Attributes['Log'] is not a list
:raises AttributeError: if self.Attributes['dpi'] does not exist
:raises TypeError: if self.Attributes['dpi'] is not an int
:raises AttributeError: if self.Attributes['figureSize'] does not exist
:raises TypeError: if self.Attributes['figureSize'] is not a list
:raises ValueError: if self.Attributes['figureSize'] is not of length 2
:raises TypeError: if self.Attributes['figureSize'][0] is not a int or float
:raises TypeError: if self.Attributes['figureSize'][1] is not a int or float
:raises AttributeError: if self.Attributes['figureFormat'] does not exist
:raises TypeError: if self.Attributes['figureFormat'] is not a str
:raises AttributeError: if self.Attributes['histBins'] does not exist
:raises TypeError: if self.Attributes['histBins'] is not an int
:raises AttributeError: if self.Attributes['noFiles'] does not exist
:raises TypeError: if self.Attributes['noFiles'] is not an int
:raises AttributeError: if self.Attributes['quantiles'] does not exist
:raises TypeError: if self.Attributes['quantiles'] is not a list
:raises ValueError: if self.Attributes['quantiles'] is not of length 2
:raises TypeError: if self.Attributes['quantiles'][0] is not a int or float
:raises TypeError: if self.Attributes['quantiles'][1] is not a int or float
:raises AttributeError: if self.Attributes['sampleMetadataNotExported'] does not exist
:raises TypeError: if self.Attributes['sampleMetadataNotExported'] is not a list
:raises AttributeError: if self.Attributes['featureMetadataNotExported'] does not exist
:raises TypeError: if self.Attributes['featureMetadataNotExported'] is not a list
:raises AttributeError: if self.Attributes['analyticalMeasurements'] does not exist
:raises TypeError: if self.Attributes['analyticalMeasurements'] is not a dict
:raises AttributeError: if self.Attributes['excludeFromPlotting'] does not exist
:raises TypeError: if self.Attributes['excludeFromPlotting'] is not a list
:raises AttributeError: if self.VariableType does not exist
:raises AttributeError: if self._Normalisation does not exist
:raises TypeError: if self._Normalisation is not the Normaliser ABC
:raises AttributeError: if self._name does not exist
:raises TypeError: if self._name is not a str
:raises AttributeError: if self._intensityData does not exist
:raises TypeError: if self._intensityData is not a numpy.ndarray
:raises AttributeError: if self.sampleMetadata does not exist
:raises TypeError: if self.sampleMetadata is not a pandas.DataFrame
:raises LookupError: if self.sampleMetadata does not have a Sample File Name column
:raises LookupError: if self.sampleMetadata does not have an AssayRole column
:raises LookupError: if self.sampleMetadata does not have a SampleType column
:raises LookupError: if self.sampleMetadata does not have a Dilution column
:raises LookupError: if self.sampleMetadata does not have a Batch column
:raises LookupError: if self.sampleMetadata does not have a Correction Batch column
:raises LookupError: if self.sampleMetadata does not have a Run Order column
:raises LookupError: if self.sampleMetadata does not have a Sample ID column
:raises LookupError: if self.sampleMetadata does not have a Sample Base Name column
:raises LookupError: if self.sampleMetadata does not have an Acquired Time column
:raises LookupError: if self.sampleMetadata does not have an Exclusion Details column
:raises AttributeError: if self.featureMetadata does not exist
:raises TypeError: if self.featureMetadata is not a pandas.DataFrame
:raises LookupError: if self.featureMetadata does not have a Feature Name column
:raises AttributeError: if self.sampleMask does not exist
:raises TypeError: if self.sampleMask is not a numpy.ndarray
:raises ValueError: if self.sampleMask are not bool
:raises AttributeError: if self.featureMask does not exist
:raises TypeError: if self.featureMask is not a numpy.ndarray
:raises ValueError: if self.featureMask are not bool
:raises AttributeError: if self.sampleMetadataExcluded does not exist
:raises TypeError: if self.sampleMetadataExcluded is not a list
:raises AttributeError: if self.intensityDataExcluded does not exist
:raises TypeError: if self.intensityDataExcluded is not a list
:raises ValueError: if self.intensityDataExcluded does not have the same number of exclusions as self.sampleMetadataExcluded
:raises AttributeError: if self.featureMetadataExcluded does not exist
:raises TypeError: if self.featureMetadataExcluded is not a list
:raises ValueError: if self.featureMetadataExcluded does not have the same number of exclusions as self.sampleMetadataExcluded
:raises AttributeError: if self.excludedFlag does not exist
:raises TypeError: if self.excludedFlag is not a list
:raises ValueError: if self.excludedFlag does not have the same number of exclusions as self.sampleMetadataExcluded
"""
def conditionTest(successCond, successMsg, failureMsg, allFailures, verb, raiseErr, raiseWarn, exception):
if not successCond:
allFailures.append(failureMsg)
msg = failureMsg
if raiseWarn:
warnings.warn(msg)
if raiseErr:
raise exception
else:
msg = successMsg
if verb:
print(msg)
return (allFailures)
## init
failureList = []
# reference number of exclusions in list, from sampleMetadataExcluded
refNumExcluded = None
## Check object class
condition = isinstance(self, Dataset)
success = 'Check Object class:\tOK'
failure = 'Check Object class:\tFailure, not Dataset, but ' + str(type(self))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
## self.Attributes
# exist
condition = hasattr(self, 'Attributes')
success = 'Check self.Attributes exists:\tOK'
failure = 'Check self.Attributes exists:\tFailure, no attribute \'self.Attributes\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a dict
condition = isinstance(self.Attributes, dict)
success = 'Check self.Attributes is a dict:\tOK'
failure = 'Check self.Attributes is a dict:\tFailure, \'self.Attributes\' is' + str(type(self.Attributes))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
## self.Attributes keys
## Log
# exist
condition = 'Log' in self.Attributes
success = 'Check self.Attributes[\'Log\'] exists:\tOK'
failure = 'Check self.Attributes[\'Log\'] exists:\tFailure, no attribute \'self.Attributes[\'Log\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.Attributes['Log'], list)
success = 'Check self.Attributes[\'Log\'] is a list:\tOK'
failure = 'Check self.Attributes[\'Log\'] is a list:\tFailure, \'self.Attributes[\'Log\']\' is ' + str(
type(self.Attributes['Log']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['Log']
## dpi
# exist
condition = 'dpi' in self.Attributes
success = 'Check self.Attributes[\'dpi\'] exists:\tOK'
failure = 'Check self.Attributes[\'dpi\'] exists:\tFailure, no attribute \'self.Attributes[\'dpi\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is an int
condition = isinstance(self.Attributes['dpi'], (int, numpy.integer))
success = 'Check self.Attributes[\'dpi\'] is an int:\tOK'
failure = 'Check self.Attributes[\'dpi\'] is an int:\tFailure, \'self.Attributes[\'dpi\']\' is ' + str(
type(self.Attributes['dpi']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['dpi']
## figureSize
# exist
condition = 'figureSize' in self.Attributes
success = 'Check self.Attributes[\'figureSize\'] exists:\tOK'
failure = 'Check self.Attributes[\'figureSize\'] exists:\tFailure, no attribute \'self.Attributes[\'figureSize\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.Attributes['figureSize'], list)
success = 'Check self.Attributes[\'figureSize\'] is a list:\tOK'
failure = 'Check self.Attributes[\'figureSize\'] is a list:\tFailure, \'self.Attributes[\'figureSize\']\' is ' + str(
type(self.Attributes['figureSize']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
if condition:
# is of length 2
condition = (len(self.Attributes['figureSize']) == 2)
success = 'Check self.Attributes[\'figureSize\'] is of length 2:\tOK'
failure = 'Check self.Attributes[\'figureSize\'] is of length 2:\tFailure, \'self.Attributes[\'figureSize\']\' is of length ' + str(
len(self.Attributes['figureSize']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=ValueError(failure))
if condition:
# figureSize[] are int
for i in range(2):
condition = isinstance(self.Attributes['figureSize'][i],
(int, float, numpy.integer, numpy.floating))
success = 'Check self.Attributes[\'figureSize\'][' + str(i) + '] is int or float:\tOK'
failure = 'Check self.Attributes[\'figureSize\'][' + str(
i) + '] is int or float:\tFailure, \'self.Attributes[\'figureSize\'][' + str(
i) + '] is ' + str(type(self.Attributes['figureSize'][i]))
failureList = conditionTest(condition, success, failure, failureList, verbose,
raiseError, raiseWarning, exception=TypeError(failure))
# end self.Attributes['figureSize'] length 2
# end self.Attributes['figureSize] list
# end self.Attributes['figureSize']
## figureFormat
# exist
condition = 'figureFormat' in self.Attributes
success = 'Check self.Attributes[\'figureFormat\'] exists:\tOK'
failure = 'Check self.Attributes[\'figureFormat\'] exists:\tFailure, no attribute \'self.Attributes[\'figureFormat\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a str
condition = isinstance(self.Attributes['figureFormat'], str)
success = 'Check self.Attributes[\'figureFormat\'] is a str:\tOK'
failure = 'Check self.Attributes[\'figureFormat\'] is a str:\tFailure, \'self.Attributes[\'figureFormat\']\' is ' + str(
type(self.Attributes['figureFormat']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['figureFormat']
## histBins
# exist
condition = 'histBins' in self.Attributes
success = 'Check self.Attributes[\'histBins\'] exists:\tOK'
failure = 'Check self.Attributes[\'histBins\'] exists:\tFailure, no attribute \'self.Attributes[\'histBins\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is an int
condition = isinstance(self.Attributes['histBins'], (int, numpy.integer))
success = 'Check self.Attributes[\'histBins\'] is an int:\tOK'
failure = 'Check self.Attributes[\'histBins\'] is an int:\tFailure, \'self.Attributes[\'histBins\']\' is ' + str(
type(self.Attributes['histBins']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['histBins']
## noFiles
# exist
condition = 'noFiles' in self.Attributes
success = 'Check self.Attributes[\'noFiles\'] exists:\tOK'
failure = 'Check self.Attributes[\'noFiles\'] exists:\tFailure, no attribute \'self.Attributes[\'noFiles\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is an int
condition = isinstance(self.Attributes['noFiles'], (int, numpy.integer))
success = 'Check self.Attributes[\'noFiles\'] is an int:\tOK'
failure = 'Check self.Attributes[\'noFiles\'] is an int:\tFailure, \'self.Attributes[\'noFiles\']\' is ' + str(
type(self.Attributes['noFiles']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['noFiles']
## quantiles
# exist
condition = 'quantiles' in self.Attributes
success = 'Check self.Attributes[\'quantiles\'] exists:\tOK'
failure = 'Check self.Attributes[\'quantiles\'] exists:\tFailure, no attribute \'self.Attributes[\'quantiles\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.Attributes['quantiles'], list)
success = 'Check self.Attributes[\'quantiles\'] is a list:\tOK'
failure = 'Check self.Attributes[\'quantiles\'] is a list:\tFailure, \'self.Attributes[\'quantiles\']\' is ' + str(
type(self.Attributes['quantiles']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
if condition:
# is of length 2
condition = (len(self.Attributes['quantiles']) == 2)
success = 'Check self.Attributes[\'quantiles\'] is of length 2:\tOK'
failure = 'Check self.Attributes[\'quantiles\'] is of length 2:\tFailure, \'self.Attributes[\'quantiles\']\' is of length ' + str(
len(self.Attributes['quantiles']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=ValueError(failure))
if condition:
# quantiles[] are int
for i in range(2):
condition = isinstance(self.Attributes['quantiles'][i],
(int, float, numpy.integer, numpy.floating))
success = 'Check self.Attributes[\'quantiles\'][' + str(i) + '] is int or float:\tOK'
failure = 'Check self.Attributes[\'quantiles\'][' + str(
i) + '] is int or float:\tFailure, \'self.Attributes[\'quantiles\'][' + str(
i) + '] is ' + str(type(self.Attributes['quantiles'][i]))
failureList = conditionTest(condition, success, failure, failureList, verbose,
raiseError, raiseWarning, exception=TypeError(failure))
# end self.Attributes['quantiles'] length 2
# end self.Attributes['quantiles'] list
# end self.Attributes['quantiles']
## sampleMetadataNotExported
# exist
condition = 'sampleMetadataNotExported' in self.Attributes
success = 'Check self.Attributes[\'sampleMetadataNotExported\'] exists:\tOK'
failure = 'Check self.Attributes[\'sampleMetadataNotExported\'] exists:\tFailure, no attribute \'self.Attributes[\'sampleMetadataNotExported\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.Attributes['sampleMetadataNotExported'], list)
success = 'Check self.Attributes[\'sampleMetadataNotExported\'] is a list:\tOK'
failure = 'Check self.Attributes[\'sampleMetadataNotExported\'] is a list:\tFailure, \'self.Attributes[\'sampleMetadataNotExported\']\' is ' + str(
type(self.Attributes['sampleMetadataNotExported']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['sampleMetadataNotExported']
## featureMetadataNotExported
# exist
condition = 'featureMetadataNotExported' in self.Attributes
success = 'Check self.Attributes[\'featureMetadataNotExported\'] exists:\tOK'
failure = 'Check self.Attributes[\'featureMetadataNotExported\'] exists:\tFailure, no attribute \'self.Attributes[\'featureMetadataNotExported\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.Attributes['featureMetadataNotExported'], list)
success = 'Check self.Attributes[\'featureMetadataNotExported\'] is a list:\tOK'
failure = 'Check self.Attributes[\'featureMetadataNotExported\'] is a list:\tFailure, \'self.Attributes[\'featureMetadataNotExported\']\' is ' + str(
type(self.Attributes['featureMetadataNotExported']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['featureMetadataNotExported']
## analyticalMeasurements
# exist
condition = 'analyticalMeasurements' in self.Attributes
success = 'Check self.Attributes[\'analyticalMeasurements\'] exists:\tOK'
failure = 'Check self.Attributes[\'analyticalMeasurements\'] exists:\tFailure, no attribute \'self.Attributes[\'analyticalMeasurements\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a dict
condition = isinstance(self.Attributes['analyticalMeasurements'], dict)
success = 'Check self.Attributes[\'analyticalMeasurements\'] is a dict:\tOK'
failure = 'Check self.Attributes[\'analyticalMeasurements\'] is a dict:\tFailure, \'self.Attributes[\'analyticalMeasurements\']\' is ' + str(
type(self.Attributes['analyticalMeasurements']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['analyticalMeasurements']
## excludeFromPlotting
# exist
condition = 'excludeFromPlotting' in self.Attributes
success = 'Check self.Attributes[\'excludeFromPlotting\'] exists:\tOK'
failure = 'Check self.Attributes[\'excludeFromPlotting\'] exists:\tFailure, no attribute \'self.Attributes[\'excludeFromPlotting\']\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.Attributes['excludeFromPlotting'], list)
success = 'Check self.Attributes[\'excludeFromPlotting\'] is a list:\tOK'
failure = 'Check self.Attributes[\'excludeFromPlotting\'] is a list:\tFailure, \'self.Attributes[\'excludeFromPlotting\']\' is ' + str(
type(self.Attributes['excludeFromPlotting']))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=TypeError(failure))
# end self.Attributes['excludeFromPlotting']
# end self.Attributes dictionary
# end self.Attributes
## self.VariableType
# exist
condition = hasattr(self, 'VariableType')
success = 'Check self.VariableType exists:\tOK'
failure = 'Check self.VariableType exists:\tFailure, no attribute \'self.VariableType\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
# end Variabletype
## self._Normalisation
# exist
condition = hasattr(self, '_Normalisation')
success = 'Check self._Normalisation exists:\tOK'
failure = 'Check self._Normalisation exists:\tFailure, no attribute \'self._Normalisation\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is Normaliser ABC
condition = isinstance(self._Normalisation, Normaliser)
success = 'Check self._Normalisation is Normaliser ABC:\tOK'
failure = 'Check self._Normalisation is Normaliser ABC:\tFailure, \'self._Normalisation\' is ' + str(
type(self._Normalisation))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
# end self._Normalisation
## self._name
# exist
condition = hasattr(self, '_name')
success = 'Check self._name exists:\tOK'
failure = 'Check self._name exists:\tFailure, no attribute \'self._name\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a str
condition = isinstance(self._name, str)
success = 'Check self._name is a str:\tOK'
failure = 'Check self._name is a str:\tFailure, \'self._name\' is ' + str(type(self._name))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
# end self._name
## self._intensityData
# exist
condition = hasattr(self, '_intensityData')
success = 'Check self._intensityData exists:\tOK'
failure = 'Check self._intensityData exists:\tFailure, no attribute \'self._intensityData\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a numpy.ndarray
condition = isinstance(self._intensityData, numpy.ndarray)
success = 'Check self._intensityData is a numpy.ndarray:\tOK'
failure = 'Check self._intensityData is a numpy.ndarray:\tFailure, \'self._intensityData\' is ' + str(
type(self._intensityData))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
# end self._intensityData numpy.ndarray
# end self._intensityData
## self.sampleMetadata
# exist
condition = hasattr(self, 'sampleMetadata')
success = 'Check self.sampleMetadata exists:\tOK'
failure = 'Check self.sampleMetadata exists:\tFailure, no attribute \'self.sampleMetadata\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.sampleMetadata, pandas.DataFrame)
success = 'Check self.sampleMetadata is a pandas.DataFrame:\tOK'
failure = 'Check self.sampleMetadata is a pandas.DataFrame:\tFailure, \'self.sampleMetadata\' is ' + str(
type(self.sampleMetadata))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
# ['Sample File Name']
condition = ('Sample File Name' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Sample File Name\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Sample File Name\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Sample File Name\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['AssayRole']
condition = ('AssayRole' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'AssayRole\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'AssayRole\'] exists:\tFailure, \'self.sampleMetadata\' lacks an \'AssayRole\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['SampleType']
condition = ('SampleType' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'SampleType\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'SampleType\'] exists:\tFailure, \'self.sampleMetadata\' lacks an \'SampleType\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['Dilution']
condition = ('Dilution' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Dilution\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Dilution\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Dilution\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['Batch']
condition = ('Batch' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Batch\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Batch\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Batch\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['Correction Batch']
condition = ('Correction Batch' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Correction Batch\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Correction Batch\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Correction Batch\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['Run Order']
condition = ('Run Order' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Run Order\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Run Order\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Run Order\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['Sample ID']
condition = ('Sample ID' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Sample ID\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Sample ID\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Sample ID\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['Sample Base Name']
condition = ('Sample Base Name' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Sample Base Name\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Sample Base Name\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Sample Base Name\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['Acquired Time']
condition = ('Acquired Time' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Acquired Time\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Acquired Time\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Acquired Time\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# ['Exclusion Details']
condition = ('Exclusion Details' in self.sampleMetadata.columns)
success = 'Check self.sampleMetadata[\'Exclusion Details\'] exists:\tOK'
failure = 'Check self.sampleMetadata[\'Exclusion Details\'] exists:\tFailure, \'self.sampleMetadata\' lacks a \'Exclusion Details\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# end self.sampleMetadata pandas.DataFrame
# end self.sampleMetadata
## self.featureMetadata
# exist
condition = hasattr(self, 'featureMetadata')
success = 'Check self.featureMetadata exists:\tOK'
failure = 'Check self.featureMetadata exists:\tFailure, no attribute \'self.featureMetadata\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a pandas.DataFrame
condition = isinstance(self.featureMetadata, pandas.DataFrame)
success = 'Check self.featureMetadata is a pandas.DataFrame:\tOK'
failure = 'Check self.featureMetadata is a pandas.DataFrame:\tFailure, \'self.featureMetadata\' is ' + str(
type(self.featureMetadata))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
# ['Feature Name']
condition = ('Feature Name' in self.featureMetadata.columns)
success = 'Check self.featureMetadata[\'Feature Name\'] exists:\tOK'
failure = 'Check self.featureMetadata[\'Feature Name\'] exists:\tFailure, \'self.featureMetadata\' lacks a \'Feature Name\' column'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=LookupError(failure))
# end self.featureMetadata['Feature Name']
# end self.featureMetadata pandas.DataFrame
# end self.featureMetadata
## self.sampleMask
# exist
condition = hasattr(self, 'sampleMask')
success = 'Check self.sampleMask exists:\tOK'
failure = 'Check self.sampleMask exists:\tFailure, no attribute \'self.sampleMask\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a numpy.ndarray
condition = isinstance(self.sampleMask, numpy.ndarray)
success = 'Check self.sampleMask is a numpy.ndarray:\tOK'
failure = 'Check self.sampleMask is a numpy.ndarray:\tFailure, \'self.sampleMask\' is ' + str(
type(self.sampleMask))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
# if (self.sampleMask.all() != numpy.array(False, dtype=bool)):
# self.sampleMask is bool
condition = (self.sampleMask.dtype == numpy.dtype(bool))
success = 'Check self.sampleMask is bool:\tOK'
failure = 'Check self.sampleMask is bool:\tFailure, \'self.sampleMask\' is ' + str(
self.sampleMask.dtype)
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=ValueError(failure))
# end self.samplemask numpy.ndarray
## end self.sampleMask
## self.featureMask
# exist
condition = hasattr(self, 'featureMask')
success = 'Check self.featureMask exists:\tOK'
failure = 'Check self.featureMask exists:\tFailure, no attribute \'self.featureMask\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a numpy.ndarray
condition = isinstance(self.featureMask, numpy.ndarray)
success = 'Check self.featureMask is a numpy.ndarray:\tOK'
failure = 'Check self.featureMask is a numpy.ndarray:\tFailure, \'self.featureMask\' is ' + str(
type(self.featureMask))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
# if (self.featureMask.all() != numpy.array(False, dtype=bool)):
# self.featureMask is bool
condition = (self.featureMask.dtype == numpy.dtype(bool))
success = 'Check self.featureMask is bool:\tOK'
failure = 'Check self.featureMask is bool:\tFailure, \'self.featureMask\' is ' + str(
self.featureMask.dtype)
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=ValueError(failure))
# end self.featureMask numpy.ndarray
## end self.featureMask
## Exclusion data
# If any exclusion exists
if (hasattr(self, 'sampleMetadataExcluded') | hasattr(self, 'intensityDataExcluded') | hasattr(self,
'featureMetadataExcluded') | hasattr(
self, 'excludedFlag')):
if verbose:
print('---- exclusion lists found, check exclusions ----')
## sampleMetadataExcluded
# exist
condition = hasattr(self, 'sampleMetadataExcluded')
success = 'Check self.sampleMetadataExcluded exists:\tOK'
failure = 'Check self.sampleMetadataExcluded exists:\tFailure, no attribute \'self.sampleMetadataExcluded\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.sampleMetadataExcluded, list)
success = 'Check self.sampleMetadataExcluded is a list:\tOK'
failure = 'Check self.sampleMetadataExcluded is a list:\tFailure, \'self.sampleMetadataExcluded\' is ' + str(
type(self.sampleMetadataExcluded))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
# Use sampleMetadataExcluded as reference number of exclusions
refNumExcluded = len(self.sampleMetadataExcluded)
if verbose:
print('---- self.sampleMetadataExcluded used as reference number of exclusions ----')
print('\t' + str(refNumExcluded) + ' exclusions')
# end sampleMetadataExcluded is a list
# end sampleMetadataExcluded
## intensityDataExcluded
# exist
condition = hasattr(self, 'intensityDataExcluded')
success = 'Check self.intensityDataExcluded exists:\tOK'
failure = 'Check self.intensityDataExcluded exists:\tFailure, no attribute \'self.intensityDataExcluded\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.intensityDataExcluded, list)
success = 'Check self.intensityDataExcluded is a list:\tOK'
failure = 'Check self.intensityDataExcluded is a list:\tFailure, \'self.intensityDataExcluded\' is ' + str(
type(self.intensityDataExcluded))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
# number of exclusions
condition = (len(self.intensityDataExcluded) == refNumExcluded)
success = 'Check self.intensityDataExcluded number of exclusions:\tOK'
failure = 'Check self.intensityDataExcluded number of exclusions:\tFailure, \'self.intensityDataExcluded\' has ' + str(
len(self.intensityDataExcluded)) + ' exclusions, ' + str(refNumExcluded) + ' expected'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=ValueError(failure))
# end intensityDataExcluded is a list
# end intensityDataExclude
## featureMetadataExcluded
# exist
condition = hasattr(self, 'featureMetadataExcluded')
success = 'Check self.featureMetadataExcluded exists:\tOK'
failure = 'Check self.featureMetadataExcluded exists:\tFailure, no attribute \'self.featureMetadataExcluded\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.featureMetadataExcluded, list)
success = 'Check self.featureMetadataExcluded is a list:\tOK'
failure = 'Check self.featureMetadataExcluded is a list:\tFailure, \'self.featureMetadataExcluded\' is ' + str(
type(self.featureMetadataExcluded))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
# number of exclusions
condition = (len(self.featureMetadataExcluded) == refNumExcluded)
success = 'Check self.featureMetadataExcluded number of exclusions:\tOK'
failure = 'Check self.featureMetadataExcluded number of exclusions:\tFailure, \'self.featureMetadataExcluded\' has ' + str(
len(self.featureMetadataExcluded)) + ' exclusions, ' + str(refNumExcluded) + ' expected'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=ValueError(failure))
# end featureMetadataExcluded is a list
# end featureMetadataExcluded
## excludedFlag
# exist
condition = hasattr(self, 'excludedFlag')
success = 'Check self.excludedFlag exists:\tOK'
failure = 'Check self.excludedFlag exists:\tFailure, no attribute \'self.excludedFlag\''
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=AttributeError(failure))
if condition:
# is a list
condition = isinstance(self.excludedFlag, list)
success = 'Check self.excludedFlag is a list:\tOK'
failure = 'Check self.excludedFlag is a list:\tFailure, \'self.excludedFlag\' is ' + str(
type(self.excludedFlag))
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError, raiseWarning,
exception=TypeError(failure))
if condition:
# number of exclusions
condition = (len(self.excludedFlag) == refNumExcluded)
success = 'Check self.excludedFlag number of exclusions:\tOK'
failure = 'Check self.excludedFlag number of exclusions:\tFailure, \'self.excludedFlag\' has ' + str(
len(self.excludedFlag)) + ' exclusions, ' + str(refNumExcluded) + ' expected'
failureList = conditionTest(condition, success, failure, failureList, verbose, raiseError,
raiseWarning, exception=ValueError(failure))
# end excludedFlag is a list
# end excludedFlag
# end exclusions are present
else:
if verbose:
print('---- no exclusion lists found, no check ----')
# end Exclusion Data
## List additional attributes (print + log)
expectedSet = set({'Attributes', 'VariableType', '_Normalisation', '_name', '_intensityData', 'sampleMetadata',
'featureMetadata', 'sampleMask', 'featureMask', 'sampleMetadataExcluded',
'intensityDataExcluded', 'featureMetadataExcluded', 'excludedFlag'})
objectSet = set(self.__dict__.keys())
additionalAttributes = objectSet - expectedSet
if len(additionalAttributes) > 0:
if verbose:
print('--------')
print(str(len(additionalAttributes)) + ' additional attributes in the object:')
print('\t' + str(list(additionalAttributes)))
else:
if verbose:
print('--------')
print('No additional attributes in the object')
## Log and final Output
# Basic failure might compromise logging, failure of QC compromises sample meta
if len(failureList) == 0:
# Log
self.Attributes['Log'].append([datetime.now(),
'Conforms to Dataset (0 errors), (%i samples and %i features), with %i additional attributes in the object: %s' % (
self.noSamples, self.noFeatures, len(additionalAttributes),
list(additionalAttributes))])
# print results
if verbose:
print('--------')
print('Conforms to Dataset:\t 0 errors found')
return True
# Try logging to something that might not have a log
else:
# try logging
try:
self.Attributes['Log'].append([datetime.now(),
'Failed Dataset validation, with the following %i issues: %s' % (
len(failureList), failureList)])
except (AttributeError, KeyError, TypeError):
if verbose:
print('--------')
print('Logging failed')
# print results
if verbose:
print('--------')
print('Does not conform to Dataset:\t %i errors found' % (len(failureList)))
# output
if raiseWarning:
warnings.warn('Does not conform to Dataset:\t %i errors found' % (len(failureList)))
return False
def _loadParameters(self, sop, sopPath):
"""
Load assay parameters from JSON SOP files located in sopPath.
SOP names should be unique (obviously), but this is not enforced. Duplicate SOP files may cause undefined behaviour.
:param sop: the SOP name
:type sop: string
:param sopPath: the path to sop
:type sopPath: string
"""
import json
from collections import ChainMap
from ..utilities.extractParams import buildFileList
import re
# Always load some generic values
with open(os.path.join(toolboxPath(), 'StudyDesigns', 'SOP', 'Generic.json')) as data_file:
attributes = json.load(data_file)
self.Attributes = {**self.Attributes, **attributes}
# But if SOP is Generic, skip
if sop == 'Generic':
return
def splitext(path):
return {os.path.splitext(os.path.basename(path))[0]: path}
pattern = re.compile('.+?\.json$')
builtinSOPS = os.path.join(toolboxPath(), 'StudyDesigns', 'SOP')
sopPaths = buildFileList(builtinSOPS, pattern)
if sopPath is not None:
if not os.path.isdir(sopPath):
raise ValueError("Path: %s must be a directory." % sopPath)
sopPaths.extend(buildFileList(sopPath, pattern))
# Remove empty entries from list
sopPathList = [x for x in sopPaths if x != []]
sopPaths = dict()
for sopPATH in sopPathList:
sopPaths.update(splitext(sopPATH))
if not sop in sopPaths:
raise ValueError("The SOP '%s' is not present in '%s', or '%s'." % (sop, builtinSOPS, sopPath))
with open(sopPaths[sop]) as data_file:
attributes = json.load(data_file)
self.Attributes = {**self.Attributes, **attributes}
self.Attributes['Log'].append([datetime.now(), 'SOP configuration %s loaded from %s.' % (sop, sopPaths[sop])])
def initialiseMasks(self):
"""
Re-initialise :py:attr:`featureMask` and :py:attr:`sampleMask` to match the current dimensions of :py:attr:`intensityData`, and include all samples.
"""
self.featureMask = numpy.squeeze(numpy.ones([self.noFeatures, 1], dtype=bool), axis=1)
"""*m* element vector, with ``True`` representing features to be included in analysis, and ``False`` those to be excluded"""
self.sampleMask = numpy.squeeze(numpy.ones([self.noSamples, 1], dtype=bool), axis=1)
"""*p* element vector, with ``True`` representing samples to be included in analysis, and ``False`` those to be excluded"""
self.Attributes['Log'].append([datetime.now(), "Masks Initialised to True.\n"])
def updateMasks(self, filterSamples=True, filterFeatures=True,
sampleTypes=list(SampleType),
assayRoles=list(AssayRole), **kwargs):
"""
Update :py:attr:`~Dataset.sampleMask` and :py:attr:`~Dataset.featureMask` according to parameters.
:py:meth:`updateMasks` sets :py:attr:`~Dataset.sampleMask` or :py:attr:`~Dataset.featureMask` to ``False`` for those items failing analytical criteria.
.. note:: To avoid reintroducing items manually excluded, this method only ever sets items to ``False``, therefore if you wish to move from more stringent criteria to a less stringent set, you will need to reset the mask to all ``True`` using :py:meth:`~Dataset.initialiseMasks`.
:param bool filterSamples: If ``False`` don't modify sampleMask
:param bool filterFeatures: If ``False`` don't modify featureMask
:param sampleTypes: List of types of samples to retain
:type sampleTypes: SampleType
:param AssayRole sampleRoles: List of assays roles to retain
"""
if not isinstance(sampleTypes, list):
raise TypeError('sampleTypes must be a list of SampleType enums')
if not isinstance(assayRoles, list):
raise TypeError('sampleTypes must be a list of AssayRole enums')
if not all(isinstance(item, SampleType) for item in sampleTypes):
raise TypeError('sampleTypes must be SampleType enums.')
if not all(isinstance(item, AssayRole) for item in assayRoles):
raise TypeError('assayRoles must be AssayRole enums.')
# Feature exclusions
if filterFeatures:
raise NotImplementedError
# Sample Exclusions
if filterSamples:
sampleMask = self.sampleMetadata['SampleType'].isin(sampleTypes)
assayMask = self.sampleMetadata['AssayRole'].isin(assayRoles)
sampleMask = numpy.logical_and(sampleMask, assayMask)
self.sampleMask = numpy.logical_and(sampleMask, self.sampleMask)
self.Attributes['Log'].append([datetime.now(),
"Dataset filtered with: filterSamples=%s, filterFeatures=%s, sampleClasses=%s, sampleRoles=%s, %s." % (
filterSamples,
filterFeatures,
sampleTypes,
assayRoles,
', '.join("{!s}={!r}".format(key, val) for (key, val) in kwargs.items()))])
def applyMasks(self):
"""
Permanently delete elements masked (those set to ``False``) in :py:attr:`sampleMask` and :py:attr:`featureMask`, from :py:attr:`featureMetadata`, :py:attr:`sampleMetadata`, and :py:attr:`intensityData`.
"""
# Only save to excluded if features or samples masked
if (sum(self.sampleMask == False) > 0) | (sum(self.featureMask == False) > 0):
# Instantiate lists if first application
if not hasattr(self, 'sampleMetadataExcluded'):
self.sampleMetadataExcluded = []
self.intensityDataExcluded = []
self.featureMetadataExcluded = []
self.excludedFlag = []
# Samples
if sum(self.sampleMask) != len(self.sampleMask):
# Account for if self.sampleMask is a pandas.series
try:
self.sampleMask = self.sampleMask.values
except:
pass
# Save excluded samples
self.sampleMetadataExcluded.append(self.sampleMetadata[:][self.sampleMask == False])
self.intensityDataExcluded.append(self._intensityData[self.sampleMask == False, :])
self.featureMetadataExcluded.append(self.featureMetadata)
self.excludedFlag.append('Samples')
# Delete excluded samples
self.sampleMetadata = self.sampleMetadata.loc[self.sampleMask]
self.sampleMetadata.reset_index(drop=True, inplace=True)
self._intensityData = self._intensityData[self.sampleMask, :]
if hasattr(self, 'fit'):
self.fit = self.fit[self.sampleMask, :]
# Features
if sum(self.featureMask) != len(self.featureMask):
# Save excluded features
# Save excluded features
self.featureMetadataExcluded.append(self.featureMetadata[:][self.featureMask == False])
self.intensityDataExcluded.append(self._intensityData[:, self.featureMask == False])
self.sampleMetadataExcluded.append(self.sampleMetadata)
self.excludedFlag.append('Features')
# Delete excluded features
self.featureMetadata = self.featureMetadata.loc[self.featureMask]
self.featureMetadata.reset_index(drop=True, inplace=True)
self._intensityData = self._intensityData[:, self.featureMask]
self.Attributes['Log'].append([datetime.now(), '%i samples and %i features removed from dataset.' % (
sum(self.sampleMask == False), sum(self.featureMask == False))])
# Build new masks
self.initialiseMasks()
def addSampleInfo(self, descriptionFormat=None, filePath=None, **kwargs):
"""
Load additional metadata and map it in to the :py:attr:`sampleMetadata` table.
Possible options:
* **'Basic CSV'** Joins the :py:attr:`sampleMetadata` table with the data in the ``csv`` file at *filePath=*, matching on the 'Sample File Name' column in both (see :doc:`samplemetadata`).
* **'Filenames'** Parses sample information out of the filenames, based on the named capture groups in the regex passed in *filenamespec*
* **'Raw Data'** Extract analytical parameters from raw data files
* **'ISATAB'** ISATAB study designs
:param str descriptionFormat: Format of metadata to be added
:param str filePath: Path to the additional data to be added
:raises NotImplementedError: if the descriptionFormat is not understood
"""
"""
Extra options for internal NPC use:
* **'NPC LIMS'** NPC LIMS files mapping files names of raw analytical data to sample IDs
* **'NPC Subject Info'** Map subject metadata from a NPC sample manifest file (format defined in 'PCSOP.082')
"""
if descriptionFormat == 'Basic CSV':
self._matchBasicCSV(filePath)
elif descriptionFormat == 'NPC LIMS':
self._matchDatasetToLIMS(filePath)
elif descriptionFormat == 'NPC Subject Info':
self._matchDatasetToSubjectInfo(filePath)
elif descriptionFormat == 'Raw Data':
self._getSampleMetadataFromRawData(filePath)
elif descriptionFormat == 'ISATAB':
self._matchDatasetToISATAB(filePath, **kwargs)
elif descriptionFormat == 'Filenames':
self._getSampleMetadataFromFilename(kwargs['filenameSpec'])
else:
raise NotImplementedError
def addFeatureInfo(self, filePath=None, descriptionFormat=None, featureId=None, **kwargs):
"""
Load additional metadata and map it in to the :py:attr:`featureMetadata` table.
Possible options:
* **'Reference Ranges'** JSON file specifying upper and lower reference ranges for a feature.
:param str filePath: Path to the additional data to be added
:param str descriptionFormat:
:param str featureId: Unique feature Id field in the metadata file provided to match with internal Feature Name
:raises NotImplementedError: if the descriptionFormat is not understood
"""
if descriptionFormat is None:
if featureId is None:
raise ValueError('Please provide a valid featureId')
# Read new data and copy the current state of featureMetadata
csvData = pandas.read_csv(filePath)
if not any(csvData[featureId].isin(self.featureMetadata['Feature Name'])):
raise ValueError('No matching features found in csv file provided.')
currentMetadata = self.featureMetadata.copy()
# Overwrite previously existing columns
columnsToRemove = csvData.columns
if 'Feature Name' in columnsToRemove:
columnsToRemove = columnsToRemove.drop(['Feature Name'])
for column in columnsToRemove:
if column in currentMetadata.columns:
currentMetadata.drop(column, axis=1, inplace=True)
currentMetadata = currentMetadata.merge(csvData, how='left', left_on='Feature Name',
right_on=featureId, sort=False)
# Avoid duplicating feature ID field
if featureId != 'Feature Name':
currentMetadata.drop(featureId, axis=1, inplace=True)
self.featureMetadata = currentMetadata
elif descriptionFormat.lower() == 'reference ranges':
from ..utilities._addReferenceRanges import addReferenceRanges
addReferenceRanges(self.featureMetadata, filePath)
def _matchBasicCSV(self, filePath):
"""
Do a basic join of the data in the csv file at filePath to the :py:attr:`sampleMetadata` dataframe on the 'Sample File Name'.
"""
csvData = pandas.read_csv(filePath, dtype={'Sample File Name':str, 'Sample ID': str})
currentMetadata = self.sampleMetadata.copy()
if 'Sample File Name' not in csvData.columns:
raise KeyError("No 'Sample File Name' column present, unable to join tables.")
# Check if there are any duplicates in the csv file
u_ids, u_counts = numpy.unique(csvData['Sample File Name'], return_counts=True)
if any(u_counts > 1):
warnings.warn('Check and remove duplicates in CSV file')
return
# Store previous AssayRole and SampleType in case they were parsed using from filename:
#
oldAssayRole = currentMetadata['AssayRole']
oldSampleType = currentMetadata['SampleType']
oldDilution = currentMetadata['Dilution']
##
# If colums exist in both csv data and dataset.sampleMetadata remove them from sampleMetadata
##
columnsToRemove = csvData.columns
columnsToRemove = columnsToRemove.drop(['Sample File Name'])
for column in columnsToRemove:
if column in currentMetadata.columns:
currentMetadata.drop(column, axis=1, inplace=True)
# If AssayRole or SampleType columns are present parse strings into enums
csvData['AssayRole'] = [(x.replace(" ", "")).lower() if type(x) is str else numpy.nan for x in csvData['AssayRole']]
csvData['SampleType'] = [(x.replace(" ", "")).lower() if type(x) is str else numpy.nan for x in csvData['SampleType']]
if 'AssayRole' in csvData.columns:
for role in AssayRole:
csvData.loc[csvData['AssayRole'].values == (str(role).replace(" ", "")).lower(), 'AssayRole'] = role
if 'SampleType' in csvData.columns:
for stype in SampleType:
csvData.loc[csvData['SampleType'].values == (str(stype).replace(" ", "")).lower(), 'SampleType'] = stype
# If Acquired Time column is in the CSV file, reformat data to allow operations on timestamps and timedeltas,
# which are used in some plotting functions
if 'Acquired Time' in csvData:
csv_datetime = pandas.to_datetime(csvData['Acquired Time'], errors='ignore')
# msData.sampleMetadata['Acquired Time'] = z
csv_datetime = csv_datetime.dt.strftime('%d-%b-%Y %H:%M:%S')
csvData['Acquired Time'] = csv_datetime.apply(lambda x: datetime.strptime(x, '%d-%b-%Y %H:%M:%S')).astype('O')
# Left join, without sort, so the intensityData matrix and the sample Masks are kept in order
# Preserve information about sample mask alongside merge even on the case of samples missing from CSV file.
# Is this required?? Masked field doesn't seem to be used anywhere else
currentMetadata['Masked'] = False
currentMetadata.loc[(self.sampleMask == False), 'Masked'] = True
joinedTable = pandas.merge(currentMetadata, csvData, how='left', left_on='Sample File Name',
right_on='Sample File Name', sort=False)
merged_samples = pandas.merge(currentMetadata, csvData, how='inner', left_on='Sample File Name',
right_on='Sample File Name', sort=False)
merged_samples = merged_samples['Sample File Name']
merged_indices = joinedTable[joinedTable['Sample File Name'].isin(merged_samples)].index
# Samples in the CSV file but not acquired will go for sampleAbsentMetadata, for consistency with NPC Lims import
csv_butnotacq = csvData.loc[csvData['Sample File Name'].isin(currentMetadata['Sample File Name']) == False, :]
if csv_butnotacq.shape[0] != 0:
sampleAbsentMetadata = csv_butnotacq.copy(deep=True)
# Removed normalised index columns
# Enum masks describing the data in each row
sampleAbsentMetadata.loc[:, 'SampleType'] = SampleType.StudySample
sampleAbsentMetadata.loc[sampleAbsentMetadata['SampleType'].str.match('StudyPool', na=False).astype(
bool), 'SampleType'] = SampleType.StudyPool
sampleAbsentMetadata.loc[sampleAbsentMetadata['SampleType'].str.match('ExternalReference', na=False).astype(
bool), 'SampleType'] = SampleType.ExternalReference
sampleAbsentMetadata.loc[:, 'AssayRole'] = AssayRole.Assay
sampleAbsentMetadata.loc[sampleAbsentMetadata['AssayRole'].str.match('PrecisionReference', na=False).astype(
bool), 'AssayRole'] = AssayRole.PrecisionReference
sampleAbsentMetadata.loc[sampleAbsentMetadata['AssayRole'].str.match('LinearityReference', na=False).astype(
bool), 'AssayRole'] = AssayRole.LinearityReference
# Remove duplicate columns (these will be appended with _x or _y)
cols = [c for c in sampleAbsentMetadata.columns if c[-2:] != '_y']
sampleAbsentMetadata = sampleAbsentMetadata[cols]
sampleAbsentMetadata.rename(columns=lambda x: x.replace('_x', ''), inplace=True)
self.sampleAbsentMetadata = sampleAbsentMetadata
# By default everything in the CSV has metadata available and samples mentioned there will not be masked
# unless Include Sample field was == False
joinedTable.loc[merged_indices, 'Metadata Available'] = True
# Samples in the folder and processed but not mentioned in the CSV.
acquired_butnotcsv = currentMetadata.loc[(currentMetadata['Sample File Name'].isin(csvData['Sample File Name']) == False), :]
# Ensure that acquired but no csv only counts samples which 1 are not in CSV and 2 - also have no other kind of
# AssayRole information provided (from parsing filenames for example)
if acquired_butnotcsv.shape[0] != 0:
noMetadataIndex = acquired_butnotcsv.index
# Find samples where metadata was there previously and is not on the new CSV
previousMetadataAvailable = currentMetadata.loc[(~oldSampleType.isnull()) & (~oldAssayRole.isnull())
& ((currentMetadata['Sample File Name'].isin(csvData['Sample File Name']) == False)), :].index
metadataNotAvailable = [x for x in noMetadataIndex if x not in previousMetadataAvailable]
# Keep old AssayRoles and SampleTypes for cases not mentioned in CSV for which this information was previously
# available
joinedTable.loc[previousMetadataAvailable, 'AssayRole'] = oldAssayRole[previousMetadataAvailable]
joinedTable.loc[previousMetadataAvailable, 'SampleType'] = oldSampleType[previousMetadataAvailable]
joinedTable.loc[previousMetadataAvailable, 'Dilution'] = oldDilution[previousMetadataAvailable]
# If not in the new CSV, but previously there, keep it and don't mask
if len(metadataNotAvailable) > 0:
joinedTable.loc[metadataNotAvailable, 'Metadata Available'] = False
# self.sampleMask[metadataNotAvailable] = False
# joinedTable.loc[metadataNotAvailable, 'Exclusion Details'] = 'No Metadata in CSV'
# 1) ACQ and in "include Sample" - drop and set mask to false
# Samples Not ACQ and in "include Sample" set to False - drop and ignore from the dataframe
# Remove acquired samples where Include sample column equals false - does not remove, just masks the sample
if 'Include Sample' in csvData.columns:
which_to_drop = joinedTable[joinedTable['Include Sample'] == False].index
#self.intensityData = numpy.delete(self.intensityData, which_to_drop, axis=0)
#self.sampleMask = numpy.delete(self.sampleMask, which_to_drop)
self.sampleMask[which_to_drop] = False
#joinedTable.drop(which_to_drop, axis=0, inplace=True)
joinedTable.drop('Include Sample', inplace=True, axis=1)
previously_masked = joinedTable[joinedTable['Masked'] == True].index
self.sampleMask[previously_masked] = False
joinedTable.drop('Masked', inplace=True, axis=1)
# Regenerate the dataframe index for joined table
joinedTable.reset_index(inplace=True, drop=True)
self.sampleMetadata = joinedTable
# Commented out as we shouldn't need this here after removing the LIMS, but lets keep it
# This should make it work - but its assuming the sample "NAME" is the same as File name as in LIMS.
self.sampleMetadata['Sample Base Name'] = self.sampleMetadata['Sample File Name']
# Ensure there is a batch column
if 'Batch' not in self.sampleMetadata:
self.sampleMetadata['Batch'] = 1
self.Attributes['Log'].append([datetime.now(), 'Basic CSV matched from %s' % (filePath)])
def _getSampleMetadataFromFilename(self, filenameSpec):
"""
Filename spec is not supported in the empty base class.
"""
raise NotImplementedError
def _getSampleMetadataFromRawData(self, rawDataPath):
"""
Pull metadata out of raw experiment files.
"""
raise NotImplementedError
def _matchDatasetToLIMS(self, pathToLIMSfile):
"""
Establish the `Sampling ID` by matching the `Sample Base Name` with the LIMS file information.
:param str pathToLIMSfile: Path to LIMS file for map Sampling ID
"""
# Read in LIMS file
self.limsFile = pandas.read_csv(pathToLIMSfile, converters={'Sample ID': str})
if any(self.limsFile.columns.str.match('Sampling ID')) and any(self.limsFile.columns.str.match('Sample ID')):
warnings.warn('The LIMS File contains both a Sample ID and Sampling ID Fields')
# rename 'sample ID' to 'sampling ID' to match sampleMetadata format
if any(self.limsFile.columns.str.match('Sampling ID')):
self.limsFile.rename(columns={'Sampling ID': 'Sample ID'}, inplace=True)
# Prepare data
# Create normalised columns
self.sampleMetadata.loc[:, 'Sample Base Name Normalised'] = self.sampleMetadata['Sample Base Name'].str.lower()
# if is float, make it a string with 'Assay data location'
if isinstance(self.limsFile.loc[0, 'Assay data name'], (int, float, numpy.integer, numpy.floating)):
self.limsFile.loc[:, 'Assay data name'] = self.limsFile.loc[:, 'Assay data location'].str.cat(
self.limsFile['Assay data name'].astype(str), sep='/')
self.limsFile.loc[:, 'Assay data name Normalised'] = self.limsFile['Assay data name'].str.lower()
# Match limsFile to sampleMetdata for samples with data PRESENT
# Remove already present columns
if 'Sampling ID' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Sampling ID'], axis=1, inplace=True)
if 'Sample ID' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Sample ID'], axis=1, inplace=True)
if 'Subject ID' in self.sampleMetadata.columns: self.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
merged_samples = pandas.merge(self.sampleMetadata, self.limsFile, how='inner',left_on='Sample Base Name Normalised',
right_on='Assay data name Normalised', sort=False)
self.sampleMetadata = pandas.merge(self.sampleMetadata, self.limsFile, left_on='Sample Base Name Normalised',
right_on='Assay data name Normalised', how='left', sort=False)
merged_samples = merged_samples['Sample File Name']
merged_indices = self.sampleMetadata[self.sampleMetadata['Sample File Name'].isin(merged_samples)].index
# Complete/create set of boolean columns describing the data in each row for sampleMetadata
self.sampleMetadata.loc[:, 'Data Present'] = self.sampleMetadata['Sample File Name'].str.match('.+', na=False)
self.sampleMetadata.loc[:, 'LIMS Present'] = self.sampleMetadata['Assay data name'].str.match('.+', na=False,
case=False)
self.sampleMetadata.loc[:, 'LIMS Marked Missing'] = self.sampleMetadata['Status'].str.match('Missing', na=False)
# Remove duplicate columns (these will be appended with _x or _y)
cols = [c for c in self.sampleMetadata.columns if c[-2:] != '_y']
self.sampleMetadata = self.sampleMetadata[cols]
self.sampleMetadata.rename(columns=lambda x: x.replace('_x', ''), inplace=True)
# Find samples present in LIMS but not acquired
lims_butnotacq = self.limsFile.loc[self.limsFile['Assay data name Normalised'].isin(
self.sampleMetadata['Sample Base Name Normalised']) == False, :]
# Removed normalised index coloumns
self.sampleMetadata.drop(labels=['Sample Base Name Normalised', 'Assay data name Normalised'], axis=1,
inplace=True)
self.limsFile.drop(labels=['Assay data name Normalised'], axis=1, inplace=True)
# Enforce string type on matched data
self.sampleMetadata['Assay data name'] = self.sampleMetadata['Assay data name'].astype(str)
self.sampleMetadata['Assay data location'] = self.sampleMetadata['Assay data location'].astype(str)
self.sampleMetadata['Sample ID'] = self.sampleMetadata['Sample ID'].astype(str)
self.sampleMetadata['Status'] = self.sampleMetadata['Status'].astype(str)
if hasattr(self.sampleMetadata, 'Sample batch'):
self.sampleMetadata['Sample batch'] = self.sampleMetadata['Sample batch'].astype(str)
if hasattr(self.sampleMetadata, 'Assay protocol'):
self.sampleMetadata['Assay protocol'] = self.sampleMetadata['Assay protocol'].astype(str)
if hasattr(self.sampleMetadata, 'Sample position'):
self.sampleMetadata['Sample position'] = self.sampleMetadata['Sample position'].astype(str)
if lims_butnotacq.shape[0] != 0:
sampleAbsentMetadata = lims_butnotacq.copy(deep=True)
# Enum masks describing the data in each row
sampleAbsentMetadata.loc[:, 'SampleType'] = SampleType.StudySample
sampleAbsentMetadata.loc[sampleAbsentMetadata['Status'].str.match('Study Reference', na=False).astype(
bool), 'SampleType'] = SampleType.StudyPool
sampleAbsentMetadata.loc[sampleAbsentMetadata['Status'].str.match('Long Term Reference', na=False).astype(
bool), 'SampleType'] = SampleType.ExternalReference
sampleAbsentMetadata.loc[:, 'AssayRole'] = AssayRole.Assay
sampleAbsentMetadata.loc[sampleAbsentMetadata['Status'].str.match('Study Reference', na=False).astype(
bool), 'AssayRole'] = AssayRole.PrecisionReference
sampleAbsentMetadata.loc[sampleAbsentMetadata['Status'].str.match('Long Term Reference', na=False).astype(
bool), 'AssayRole'] = AssayRole.PrecisionReference
sampleAbsentMetadata.loc[:, 'LIMS Marked Missing'] = sampleAbsentMetadata['Status'].str.match('Missing',
na=False).astype(
bool)
# Remove duplicate columns (these will be appended with _x or _y)
cols = [c for c in sampleAbsentMetadata.columns if c[-2:] != '_y']
sampleAbsentMetadata = sampleAbsentMetadata[cols]
sampleAbsentMetadata.rename(columns=lambda x: x.replace('_x', ''), inplace=True)
self.sampleAbsentMetadata = sampleAbsentMetadata
# Rename values in Sample ID, special case for Study Pool, External Reference and Procedural Blank
if 'SampleType' in self.sampleMetadata.columns:
self.sampleMetadata.loc[(((self.sampleMetadata['Sample ID'] == 'nan') | (
self.sampleMetadata['Sample ID'] == '')) & (self.sampleMetadata[
'SampleType'] == SampleType.StudyPool)).tolist(), 'Sample ID'] = 'Study Pool Sample'
self.sampleMetadata.loc[(((self.sampleMetadata['Sample ID'] == 'nan') | (
self.sampleMetadata['Sample ID'] == '')) & (self.sampleMetadata[
'SampleType'] == SampleType.ExternalReference)).tolist(), 'Sample ID'] = 'External Reference Sample'
self.sampleMetadata.loc[(((self.sampleMetadata['Sample ID'] == 'nan') | (
self.sampleMetadata['Sample ID'] == '')) & (self.sampleMetadata[
'SampleType'] == SampleType.ProceduralBlank)).tolist(), 'Sample ID'] = 'Procedural Blank Sample'
self.sampleMetadata.loc[(self.sampleMetadata['Sample ID'] == 'nan').tolist(), 'Sample ID'] = 'Not specified'
self.sampleMetadata.loc[(self.sampleMetadata[
'Sample ID'] == '').tolist(), 'Sample ID'] = 'Present but undefined in the LIMS file'
# Metadata Available field is set to True
self.sampleMetadata.loc[merged_indices, 'Metadata Available'] = True
# Log
self.Attributes['Log'].append([datetime.now(), 'LIMS sample IDs matched from %s' % (pathToLIMSfile)])
def _matchDatasetToSubjectInfo(self, pathToSubjectInfoFile):
"""
Match the Sample IDs in :py:attr:`sampleMetadata` to the subject information mapped in the sample manifest file found at *subjectInfoFile*.
The column *Sample ID* in :py:attr:`sampleMetadata` is matched to *Sample ID* in the *Sampling Events* sheet
:param str pathToSubjectInfoFile: path to subject information file, an Excel file with sheets 'Subject Info' and 'Sampling Events'
"""
self.subjectInfo = pandas.read_excel(pathToSubjectInfoFile, sheet_name='Subject Info',
converters={'Subject ID': str})
cols = [c for c in self.subjectInfo.columns if c[:7] != 'Unnamed']
self.subjectInfo = self.subjectInfo[cols]
self.samplingEvents = pandas.read_excel(pathToSubjectInfoFile, sheet_name='Sampling Events',
converters={'Subject ID': str, 'Sampling ID': str})
cols = [c for c in self.samplingEvents.columns if c[:7] != 'Unnamed']
self.samplingEvents = self.samplingEvents[cols]
self.samplingEvents.rename(columns={'Sampling ID': 'Sample ID'}, inplace=True)
# Create one overall samplingInfo sheet - combine subjectInfo and samplingEvents for samples present in samplingEvents
self.samplingInfo = pandas.merge(self.samplingEvents, self.subjectInfo, left_on='Subject ID',
right_on='Subject ID', how='left', sort=False)
self.samplingInfo.rename(columns={'Sampling ID': 'Sample ID'}, inplace=True)
# Remove duplicate columns (these will be appended with _x or _y)
self.samplingInfo = removeDuplicateColumns(self.samplingInfo)
# Remove any rows which are just nans
self.samplingInfo = self.samplingInfo.loc[self.samplingInfo['Sample ID'].values != 'nan', :]
# Rename 'Sample Type' to 'Biofluid'
if hasattr(self.samplingInfo, 'Sample Type'):
self.samplingInfo.rename(columns={'Sample Type': 'Biofluid'}, inplace=True)
# Check no duplicates in sampleInfo
u_ids, u_counts = numpy.unique(self.samplingInfo['Sample ID'], return_counts=True)
if any(u_counts > 1):
warnings.warn('Check and remove (non-biofluid related) duplicates in sample manifest file')
# Match subjectInfo to sampleMetadata for samples with data ABSENT (i.e., samples in sampleAbsentMetadata)
if hasattr(self, 'sampleAbsentMetadata'):
self.sampleAbsentMetadata = pandas.merge(self.sampleAbsentMetadata, self.samplingInfo,
left_on='Sample ID', right_on='Sample ID', how='left',
sort=False)
# Remove duplicate columns (these will be appended with _x or _y)
cols = [c for c in self.sampleAbsentMetadata.columns if c[-2:] != '_y']
self.sampleAbsentMetadata = self.sampleAbsentMetadata[cols]
self.sampleAbsentMetadata.rename(columns=lambda x: x.replace('_x', ''), inplace=True)
self.sampleAbsentMetadata['SubjectInfoData'] = False
self.sampleAbsentMetadata.loc[self.sampleAbsentMetadata['Subject ID'].notnull(), 'SubjectInfoData'] = True
# Match subjectInfo to sampleMetdata for samples with data PRESENT
self.sampleMetadata = pandas.merge(self.sampleMetadata, self.samplingInfo, left_on='Sample ID',
right_on='Sample ID', how='left', sort=False)
# Remove duplicate columns (these will be appended with _x or _y)
cols = [c for c in self.sampleMetadata.columns if c[-2:] != '_y']
self.sampleMetadata = self.sampleMetadata[cols]
self.sampleMetadata.rename(columns=lambda x: x.replace('_x', ''), inplace=True)
self.sampleMetadata['SubjectInfoData'] = False
self.sampleMetadata.loc[self.sampleMetadata['Subject ID'].notnull(), 'SubjectInfoData'] = True
# Find samples present in sampleInfo but not in LIMS
info_butnotlims = self.samplingInfo.loc[
self.samplingInfo['Sample ID'].isin(self.limsFile['Sample ID']) == False, :]
if info_butnotlims.shape[0] != 0:
self.subjectAbsentMetadata = info_butnotlims.copy(deep=True)
self.Attributes['Log'].append([datetime.now(), 'Subject information matched from %s' % (pathToSubjectInfoFile)])
def __validateColumns(self, df, assay):
if (assay == 'NMR') and set(
['Sample Name', 'NMR Assay Name', 'Date', 'Comment[time]', 'Parameter Value[run order]',
'Parameter Value[sample batch]',
'Parameter Value[acquisition batch]', 'Parameter Value[instrument]']).issubset(df.columns):
return True
elif (assay == 'MS') and set(
['Sample Name', 'MS Assay Name', 'Date', 'Comment[time]', 'Parameter Value[run order]',
'Parameter Value[sample batch]',
'Parameter Value[instrument]']).issubset(df.columns):
return True
else:
return False
def _initialiseFromCSV(self, sampleMetadataPath):
"""
Initialise the object from the three csv outputs of :py:meth:`~nPYc.Dataset.exportDataset()`.
NOTE: This function assumes that the saved dataset was well formed with all the expected columns in the metadata tables.
:param str sampleMetadataPath: Path to the *Name_sampleMetadata.csv* table, the file names of the featureMetadata
and intensityData tables are inferred from the provided filename.
"""
##
# Determine object name and paths
##
(folderPath, fileName) = os.path.split(sampleMetadataPath)
objectName = re.match('(.*?)_sampleMetadata.csv', fileName).groups()[0]
intensityDataPath = os.path.join(folderPath, objectName + '_intensityData.csv')
featureMetadataPath = os.path.join(folderPath, objectName + '_featureMetadata.csv')
##
# Load tables
##
intensityData = numpy.loadtxt(intensityDataPath, dtype=float, delimiter=',')
featureMetadata = pandas.read_csv(featureMetadataPath, index_col=0)
sampleMetadata = pandas.read_csv(sampleMetadataPath, index_col=0)
##
# Fix up types
##
featureMetadata['Feature Name'] = featureMetadata['Feature Name'].astype(str)
sampleMetadata['Sample File Name'] = sampleMetadata['Sample File Name'].astype(str)
sampleMetadata['Acquired Time'] = sampleMetadata['Acquired Time'].apply(pandas.to_datetime)
sampleMetadata['Acquired Time'] = sampleMetadata['Acquired Time']
# If AssayRole or SampleType columns are present parse strings into enums
if 'AssayRole' in sampleMetadata.columns:
for role in AssayRole:
sampleMetadata.loc[sampleMetadata['AssayRole'].values == str(role), 'AssayRole'] = role
if 'SampleType' in sampleMetadata.columns:
for stype in SampleType:
sampleMetadata.loc[sampleMetadata['SampleType'].values == str(stype), 'SampleType'] = stype
return (objectName, intensityData, featureMetadata, sampleMetadata)
def _matchDatasetToISATAB(self, pathToISATABFile, filenameSpec=None, studyID = 1, assayID=1, assay='MS'):
"""
Match the Sample IDs in :py:attr:`sampleMetadata` to the subject and assay information in the ISATAB File.
The column *Sampling ID* in :py:attr:`sampleMetadata` is matched to *Sample Name* in the *ISATAB Study* sheet
:param str pathToISATABFile: path to the ISATAB File
:param int studyID: the Study index in the ISATAB File
:param int assayID: the Assay index in the ISATAB File
:param str assay: the assay type 'MS' or 'NMR'
"""
#if 'Dilution' in self.sampleMetadata.columns:
# self.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)
if not (assay in ['MS','NMR']):
raise ValueError('assay should be either \'MS\' or \'NMR\'')
# Load ISATAB file
with open(os.path.join(pathToISATABFile,'i_Investigation.txt')) as fp:
isa_tab_record = isatab.load(fp)
# subject info === study
# limsFile info === assay
study1 = isa_tab_record.studies[studyID - 1] # get the 1st study
study_filename = study1.filename
subjectInfo = pandas.read_csv(os.path.join(pathToISATABFile, study_filename), sep='\t')
# We're only interested in these fields so keep them
subjectInfo = subjectInfo[
['Source Name', 'Characteristics[age]', 'Characteristics[gender]', 'Date', 'Comment[study name]',
'Characteristics[organism]', 'Characteristics[material role]', 'Characteristics[material type]',
'Sample Name']]
# rename Characteristics[material role] to Status
subjectInfo.rename(columns={'Characteristics[material role]': 'Status'}, inplace=True)
# rename these fields to something similar to excel sheet
subjectInfo.rename(columns={'Source Name': 'Subject ID'}, inplace=True)
subjectInfo.rename(columns={'Characteristics[age]': 'Age'}, inplace=True)
subjectInfo.rename(columns={'Characteristics[gender]': 'Gender'}, inplace=True)
subjectInfo.rename(columns={'Date': 'Sampling Date'}, inplace=True)
subjectInfo.rename(columns={'Characteristics[organism]': 'Organism'}, inplace=True)
subjectInfo.rename(columns={'Characteristics[material type]': 'Material Type'}, inplace=True)
subjectInfo.rename(columns={'Comment[study name]': 'Study'}, inplace=True)
# subjectInfo.rename(columns={'Characteristics[dilution factor]': 'Dilution'}, inplace=True)
# Enforce string type on Subject ID and Sampling ID columns
subjectInfo['Subject ID'] = subjectInfo['Subject ID'].astype('str')
subjectInfo['Sample Name'] = subjectInfo['Sample Name'].astype('str')
assay1 = study1.assays[assayID - 1] # get the assay
assay_filename = assay1.filename
# Read in ISATAB file
# The reason it's called limsFile is because this object is used in other modules such as reports
limsFile = pandas.read_csv(os.path.join(pathToISATABFile, assay_filename), sep='\t')
if not self.__validateColumns(limsFile, assay):
warnings.warn("One or more required fields are missing in your Assay table, results maybe unreliable!")
# rename Sample Name column to Sampling ID
if any(limsFile.columns.str.match('Sample Name')):
limsFile.rename(columns={'Sample Name': 'Sampling ID'}, inplace=True)
#rename fields according to assay type
if assay == 'NMR':
with open(os.path.join(toolboxPath(), 'StudyDesigns', 'ISATABFieldMappings','NMRFields.json')) as data_file:
nmrFieldDict = json.load(data_file)
limsFile.rename(columns = nmrFieldDict, inplace=True)
self.Attributes['Log'].append([datetime.now(), 'NMR Assay field names have been mapped into NPC field names' ])
else:
with open(os.path.join(toolboxPath(), 'StudyDesigns', 'ISATABFieldMappings','MSFields.json')) as data_file:
msFieldDict = json.load(data_file)
limsFile.rename(columns=msFieldDict, inplace=True)
self.Attributes['Log'].append([datetime.now(), 'MS Assay field names have been mapped into NPC field names' ])
#remove fields inserted by ISATAB and only keep the fields we're interested in
if assay == 'MS':
limsFile = limsFile[['Sampling ID','Assay data name','Dilution','Run Order','Acquisition Date','Acquisition Time','Instrument','Chromatography','Ionisation','Batch','Sample batch','Plate','Well','Correction Batch','Detector']]
else:
limsFile = limsFile[['Sampling ID','Assay data name','Run Order','Acquisition Date','Acquisition Time','Instrument','Batch','Sample batch']]
#self.Attributes['DataPath'] = limsFile['Data Path'][0]
#merge the two fields 'Acquisition Date','Acquisition Time' into one field 'Acquired Time'
#a few lines down we make sure 'Acquired Time' is a proper date/time field that pandas is happy with!
limsFile['Acquired Time'] = limsFile[['Acquisition Date', 'Acquisition Time']].apply(lambda x: ' '.join(x), axis=1)
limsFile.drop(['Acquisition Date', 'Acquisition Time'], axis=1, inplace=True)
self.Attributes['Log'].append([datetime.now(), '\'Acquisition Date\', \'Acquisition Time\' read from ISATAB have been merged into \'Acquired Time\' and removed' ])
# retrieve the material role or Sample Type or Status of each sample in the assay
# one way to do this is by merging the assay and study based on sample name!
self.limsFile = pandas.merge(limsFile, subjectInfo, left_on='Sampling ID', right_on='Sample Name')
# Remove duplicate columns (these will be appended with _x or _y)
self.limsFile = removeDuplicateColumns(self.limsFile)
# this is becuase when NMR metadata is read from raw data, it already contains a field 'Acquired Time'
# remove this field so we don't confuse it with the one read from isatab
# Log it!
if any(self.sampleMetadata.columns.str.match('Acquired Time')):
self.sampleMetadata.drop('Acquired Time', axis=1, inplace=True)
self.Attributes['Log'].append(
[datetime.now(), 'Acquired Time has been read from ISATAB instead of raw data'])
# this is becuase when NMR metadata is read from raw data, it already contains a field 'Run Order'
# remove this field so we don't confuse it with the one read from isatab
if any(self.sampleMetadata.columns.str.match('Run Order')):
self.sampleMetadata.drop('Run Order', axis=1, inplace=True)
self.Attributes['Log'].append([datetime.now(), 'Run Order has been read from ISATAB instead of raw data'])
# Prepare data
self.sampleMetadata.loc[:, 'Sample Base Name'] = self.sampleMetadata['Sample File Name']
self.sampleMetadata.loc[:, 'Sample Base Name Normalised'] = self.sampleMetadata['Sample Base Name'].str.lower()
# Enforce string type on 'Sampling ID'
sampleIDmask = pandas.isnull(self.limsFile['Sampling ID']) == False
self.limsFile.loc[sampleIDmask, 'Sampling ID'] = self.limsFile.loc[sampleIDmask, 'Sampling ID'].astype('str')
self.limsFile.loc[:, 'Assay data name Normalised'] = self.limsFile['Assay data name'].str.lower()
# Match limsFile to sampleMetdata for samples with data PRESENT
self.sampleMetadata = | pandas.merge(self.limsFile,self.sampleMetadata, left_on='Assay data name Normalised', right_on='Sample Base Name Normalised', how='right', sort=False) | pandas.merge |
from datetime import date
from pprint import pprint
from typing import List, Any, Union
import pandas as pd
from pandas import DataFrame
import Common.Measures.TradingDateTimes.PyDateTimes as PyDays
import Common.Readers.TickerNameList as PyTickers
import Common.Readers.YahooTicker as PyTicker
from Common.TimeSeries import AlphaVantageManager
from Common.WebScrappers import YahooScrapper, FmpScrapper, StockRowScrapper
from Common.Readers.YahooTicker import YahooTicker
from Common.Readers.Engine import YahooFinancialEngine
from Common.Readers.Engine.YahooFinanceEngine import YahooFinanceEngine
from Common.Readers.Engine.FinVizEngine import FinVizEngine
from Common.Readers import YahooPdrManager
from Common.TechnicalIndicators.MovingAverageConvergenceDivergenceManager import MovingAverageConvergenceDivergenceManager
from Common.TechnicalIndicators.AverageTrueRangeManager import AverageTrueRangeManager
from Common.TechnicalIndicators import BollingerBandsManager
from Common.TechnicalIndicators.RelativeStrengthIndexManager import RelativeStrengthIndexManager
from Common.TechnicalIndicators.OnBalanceVolumeManager import OnBalanceVolumeManager
from Common.TechnicalIndicators import SlopesManager
from Common.TechnicalIndicators.RenkoManager import RenkoManager
from Common.TechnicalIndicators.AverageDirectionalIndexManager import AverageDirectionalIndexManager
from Common.PerformanceIndicators import SortinoRatioIndicator
from Common.PerformanceIndicators.SharpeRatioIndicator import SharpeRatioIndicator
from Common.PerformanceIndicators import MaximumDrawDownManager
from Common.PerformanceIndicators.CumulativeAnnualGrowthRateIndicator import CumulativeAnnualGrowthRateIndicator
from Common.PerformanceIndicators.CalmarRatioIndicator import CalmarRatioIndicator
to_day: date = PyDays.DateTimeNow()
to_day_s: str = to_day.strftime('%Y-%m-%d')
ya_day: date = PyDays.DateTime52WeekAgo()
ya_day_s: str = ya_day.strftime('%Y-%m-%d')
alpha_key = "Data/alphavantage-key.txt"
# extracting stock data (historical close price) for the stocks identified
ticker_list = PyTickers.PortfolioDjiStocks()
# ticker_list = PyTickers.NameList50()
counter: int = 0
pdr_adjclose_df: DataFrame = pd.DataFrame()
financial_df: DataFrame = pd.DataFrame()
alpha_close_df: DataFrame = pd.DataFrame()
drop_list: List[Union[str, Any]] = []
py_tickers: List[YahooTicker] = []
new_tickers = ticker_list
yTicker = PyTicker.YahooTicker('NYSE', ticker_list[1], 26, 0)
financeManager: YahooFinanceEngine = YahooFinanceEngine(yTicker)
finVizManager: FinVizEngine = FinVizEngine(yTicker)
financialManager: YahooFinancialEngine = YahooFinancialEngine(yTicker)
yahooPdrManager: YahooPdrManager = YahooPdrManager(yTicker, ya_day, to_day)
print('MACD')
macdManager: MovingAverageConvergenceDivergenceManager = MovingAverageConvergenceDivergenceManager(yahooPdrManager.YahooData, ya_day, to_day)
pprint(macdManager.IndicatorDf.tail().iloc[:, -5:])
print('ATR')
atrManager: AverageTrueRangeManager = AverageTrueRangeManager(yahooPdrManager.YahooData, 20)
pprint(atrManager.IndicatorDf.tail().iloc[:, -5:])
print('BollingerBands')
babeManager: BollingerBandsManager = BollingerBandsManager(yahooPdrManager.YahooData)
pprint(babeManager.IndicatorDf.tail().iloc[:, -5:])
print('RSI')
rsiManager: RelativeStrengthIndexManager = RelativeStrengthIndexManager(yahooPdrManager.YahooData)
pprint(rsiManager.IndicatorDf.tail().iloc[:, -5:])
print('OBV')
obvManager: OnBalanceVolumeManager = OnBalanceVolumeManager(yahooPdrManager.YahooData)
pprint(obvManager.IndicatorDf.head().iloc[:, -5:])
print('Slopes')
slopesManager: SlopesManager = SlopesManager(yahooPdrManager.YahooData)
pprint(slopesManager.IndicatorDf.head().iloc[:, -5:])
print('Renko')
renkoManager: RenkoManager = RenkoManager(yahooPdrManager.YahooData)
pprint(renkoManager.IndicatorDf.head().iloc[:, -5:])
print('ADX')
adxManager: AverageDirectionalIndexManager = AverageDirectionalIndexManager(yahooPdrManager.YahooData)
pprint(adxManager.IndicatorDf.tail().iloc[:, -5:])
print('SortinoRatio')
sortinoRatioManage: SortinoRatioIndicator = SortinoRatioIndicator(yahooPdrManager.YahooData)
print(sortinoRatioManage.KPIdf)
print('SharpeRatio')
sharpeRatioManager: SharpeRatioIndicator = SharpeRatioIndicator(yahooPdrManager.YahooData)
print(sharpeRatioManager.KPIdf)
print('MaximumDrawDown')
mddManager: MaximumDrawDownManager = MaximumDrawDownManager(yahooPdrManager.YahooData)
print(mddManager.KPIdf)
print('CumulativeAnnualGrowthRate')
cagrManager: CumulativeAnnualGrowthRateIndicator = CumulativeAnnualGrowthRateIndicator(yahooPdrManager.YahooData)
print(cagrManager.KPIdf)
print('CalmarRatio')
crManager: CalmarRatioIndicator = CalmarRatioIndicator(yahooPdrManager.YahooData)
print(crManager.KPIdf)
avManager: AlphaVantageManager = AlphaVantageManager(alpha_key, yTicker)
yahooScrapper: YahooScrapper = YahooScrapper(yTicker)
pprint(yahooScrapper.BalanceSheetUrl)
pprint(yahooScrapper.CashFlowUrl)
pprint(yahooScrapper.FinancialUrl)
pprint(yahooScrapper.KeyStatsUrl)
fmpScrapper: FmpScrapper = FmpScrapper(yTicker)
pprint(fmpScrapper.BalanceSheetUrl)
pprint(fmpScrapper.CashFlowUrl)
pprint(fmpScrapper.FinancialUrl)
pprint(fmpScrapper.KeyStatsUrl)
pprint(fmpScrapper.IncomeUrl)
stockRowScrapper: StockRowScrapper = StockRowScrapper(yTicker)
pprint(stockRowScrapper.BalanceSheetUrl)
pprint(stockRowScrapper.BalanceSheetCsv)
# removing stocks whose data has been extracted from the ticker list
while len(new_tickers) != 0 and counter <= 5:
new_tickers = [j for j in new_tickers if
j not in drop_list]
for new_ticker in new_tickers:
try:
y_ticker = PyTicker.YahooTicker('NYSE', new_ticker, 52, 0)
# pdr
# pdr_df = get_data_yahoo(new_ticker, ya_day, to_day)
# pdr_df.dropna(inplace=True)
# pdr_df = getPdrDataFrame(new_ticker, ya_day, to_day)
# pdr_df = y_ticker.PdrDf
# pdr_adjclose_df[new_ticker] = pdr_df["Adj Close"]
pdr_adjclose_df[new_ticker] = y_ticker.PdrDf["Adj Close"]
# yahoo financial
# new_financial: YahooFinancials = YahooFinancials(new_ticker)
# new_financial: YahooFinancials = getYfDic(new_ticker)
# new_financial: YahooFinancials = y_ticker.FinancialDf
# new_dic = new_financial.get_historical_price_data(ya_day_s, to_day_s, "daily")[new_ticker]
# new_dic = y_ticker.FinancialDf.get_historical_price_data(ya_day_s, to_day_s, "daily")[new_ticker]
fm = YahooFinancialEngine(y_ticker)
new_dic_field = fm.GetDailyHistoricalDataPrices(ya_day, to_day)
new_data_frame = | pd.DataFrame(new_dic_field) | pandas.DataFrame |
from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
| tm.assert_index_equal(result, result2) | pandas.util.testing.assert_index_equal |
import pandas as pd
data = | pd.read_csv("2018_Central_Park_Squirrel_Census_-_Squirrel_Data.csv") | pandas.read_csv |
from myutils.utils import getConnection, cronlog
import pandas as pd
import numpy as np
import datetime
import requests
class TestRequest:
def __init__(self, url, method='GET', META=None, postdata=None):
self.method = method
u = url.split('?')
self.path_info = u[0]
self.META = META or {}
self.GET = {}
if len(u)>1:
for x in u[1].split('&'):
y = x.split('=')
if len(y)==1:
self.GET[x] = ''
else:
self.GET[y[0]] = y[1]
self.PUT = postdata
def get_full_path(self):
return url
conn, cur = getConnection()
if False:
s = """
DROP TABLE IF EXISTS price_function;
CREATE TABLE price_function (
id smallserial PRIMARY KEY
, date DATE NOT NULL
, slope FLOAT(8) NOT NULL
, intercept FLOAT(8) NOT NULL
, r FLOAT(8) NOT NULL
, created_on TIMESTAMP NOT NULL
);
"""
cur.execute(s)
conn.commit()
if False:
s = """
INSERT INTO price_function (date, slope, intercept, r, created_on)
VALUES
('2020-07-05', 3, 2.8, 0.9, CURRENT_TIMESTAMP),
('2020-07-04', 2., 2.9, 0.7, CURRENT_TIMESTAMP);
"""
cur.execute(s)
conn.commit()
s = 'select * from price_function;'
cur.execute(s)
list_tables = cur.fetchall()
print(list_tables)
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS price_forecast;
CREATE TABLE price_forecast (
id serial PRIMARY KEY
, datetime TIMESTAMP NOT NULL
, demand Float(8) NOT NULL
, solar Float(8) NOT NULL
, wind Float(8) NOT NULL
, price Float(4) NOT NULL
, created_on TIMESTAMP NOT NULL
);
"""
cur.execute(s)
conn.commit()
if False:
s = """
DROP TABLE IF EXISTS testing;
CREATE TABLE testing (
id serial PRIMARY KEY
, created_on TIMESTAMP NOT NULL
); """
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_periods;
CREATE TABLE sm_periods (
period_id serial PRIMARY KEY
, period Char(16) not null
, local_date Date not null
, local_time char(5) not null
, timezone_adj smallint not null
);
"""
cur.execute(s)
conn.commit()
df_idx = pd.date_range(datetime.datetime(2019,1,1), datetime.datetime(2020,10,1), freq='30min')
df_idx_local = df_idx.tz_localize('UTC').tz_convert('Europe/London')
df = pd.DataFrame(index=df_idx)
df['period'] = df_idx.strftime('%Y-%m-%d %H:%M')
df['local_date'] = df_idx_local.strftime('%Y-%m-%d')
df['local_time'] = df_idx_local.strftime('%H:%M')
df['timezone_adj'] = df_idx_local.strftime('%z').str[0:3].astype(int)
df.reset_index(inplace=True)
start = """
INSERT INTO sm_periods (period_id, period, local_date, local_time, timezone_adj)
VALUES
"""
s=""
for i, j in df.iterrows():
s+= "({},'{}', '{}', '{}', {}),".format(i, j['period'], j['local_date'],j['local_time'], j['timezone_adj'])
if (i+1)%1000==0:
print('done: {}'.format(i+1))
cur.execute(start + s[:-1] + ';')
conn.commit()
s=""
print('done: {}'.format(i+1))
cur.execute(start + s[:-1] + ';')
conn.commit()
s=""
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_accounts;
CREATE TABLE sm_accounts (
account_id serial PRIMARY KEY
, type_id smallint not null
, first_period varChar(16) not null
, last_period varChar(16) not null
, last_updated TIMESTAMP not null
, hash varChar(64) not null
, region varChar(1)
, source_id smallint not null
);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_quantity;
CREATE TABLE sm_quantity (
id serial PRIMARY KEY
, account_id integer not null
, period_id integer not null
, quantity float(8) not null
);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_hh_variables;
CREATE TABLE sm_hh_variables (
var_id serial PRIMARY KEY
, var_name varchar(32) not null
, var_type varchar(32));
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_d_variables;
CREATE TABLE sm_d_variables (
var_id serial PRIMARY KEY
, var_name varchar(32) not null
, var_type varchar(32));
"""
cur.execute(s)
conn.commit()
if False: # Creates new hh tariff variables in sm_hh_variables and sm_tariffs
product = 'AGILE-OUTGOING-19-05-13'
type_id=2
s = f"""
delete from sm_hh_variables where var_name like '{product}%';
delete from sm_tariffs where product='{product}';
"""
cur.execute(s)
conn.commit()
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"""
INSERT INTO sm_hh_variables (var_name) values ('{product}-{region}');
"""
cur.execute(s)
conn.commit()
s = f"select var_id from sm_hh_variables where var_name='{product}-{region}';"
cur.execute(s)
var_id = cur.fetchone()[0]
conn.commit()
s = f"""
INSERT INTO sm_tariffs (type_id, product, region, granularity_id, var_id) values
({type_id}, '{product}', '{region}', 0, {var_id});
"""
cur.execute(s)
conn.commit()
START='201901010000'
if False: #Inserts initial prices into hh tariff variables
import requests
idx = pd.date_range(START, '202101010000', freq='30T')
df = pd.DataFrame()
df['timestamp'] = idx
df = pd.DataFrame(idx, columns=['timestamp'])
for region in ['B','C','D','E','F','G','H','J','K','L','M','N','P']:
tariff = 'AGILE-OUTGOING-19-05-13'
url = ('https://api.octopus.energy/v1/products/{}/' +
'electricity-tariffs/E-1R-{}-{}/standard-unit-rates/' +
'?period_from={}Z&period_to={}Z&page_size=15000')
url = url.format(tariff, tariff, region,
df.timestamp.iloc[0].strftime('%Y-%m-%dT%H:%M'),
df.timestamp.iloc[-1].strftime('%Y-%m-%dT%H:%M'))
r = requests.get(url)
dfs = []
dfs.append(pd.DataFrame(r.json()['results'])[['valid_from','value_exc_vat']])
while r.json()['next'] is not None:
r = requests.get(r.json()['next'])
dfs.append(pd.DataFrame(r.json()['results'])[['valid_from','value_exc_vat']])
if len(dfs)>30:
raise Exception
dfs = pd.concat(dfs)
dfs['timestamp'] = pd.DatetimeIndex(dfs.valid_from.str[:-1])
dfs = df.merge(right=dfs, how='left', on='timestamp')
dfs = dfs[dfs.value_exc_vat.notna()]
s = f"select var_id from sm_hh_variables where var_name='{tariff}-{region}';"
cur.execute(s)
var_id = cur.fetchone()[0]
conn.commit()
print(f'{var_id} {tariff} {region}' )
s = """
delete from sm_hh_variable_vals where var_id={};
"""
s = s.format(var_id)
cur.execute(s)
conn.commit()
s = """
INSERT INTO sm_hh_variable_vals (var_id, period_id, value) values
"""
s = s.format(var_id)
for i, j in dfs.iterrows():
s+= " ({}, {}, {}),".format(var_id, i, j.value_exc_vat)
s = s[:-1] + ';'
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_hh_variable_vals;
CREATE TABLE sm_hh_variable_vals (
id serial primary key
, var_id integer not null
, period_id integer not null
, value float(8) not null);
"""
cur.execute(s)
conn.commit()
if False:
conn.commit()
s = """
DROP TABLE IF EXISTS sm_d_variable_vals;
CREATE TABLE sm_d_variable_vals (
id serial primary key
, var_id integer not null
, local_date date not null
, value float(8) not null);
"""
cur.execute(s)
conn.commit()
from myutils.utils import loadDataFromDb
if False: #Creates daily tracker variables
product = 'SILVER-2017-1'
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"""
insert into sm_d_variables (var_name) values ('{product}-{region}') returning var_id; """
var_id = loadDataFromDb(s)[0][0]
print(var_id)
s = f"""
insert into sm_tariffs (product, region, var_id, type_id, granularity_id) values
('{product}', '{region}', {var_id}, 1, 1); """
loadDataFromDb(s)
if False:
product = 'SILVER-2017-1'
for region in ['A','B','C','D','E','F','G','H','J','K','L','M','N','P']:
s = f"select var_id from sm_variables where product='{product}' and region='{region}' ;"
var_id = loadDataFromDb(s)[0][0]
r = requests.get(f'https://octopus.energy/api/v1/tracker/G-1R-SILVER-2017-1-{region}/daily/past/540/1/')
dates = [x['date'] for x in r.json()['periods']]
prices = [x['unit_rate'] for x in r.json()['periods']]
d = pd.Series(prices, index=dates)
d = d[:datetime.date.today().strftime('%Y-%m-%d')]
d = d/1.05
d = d.round(2)
s = 'insert into sm_d_variable_vals (var_id, local_date, value) values '
for i, j in d.iteritems():
s+= f"({var_id}, '{i}', {j}),"
s = s[:-1]+';'
loadDataFromDb(s)
print(region)
if False:
conn.commit()
import requests
idx = pd.date_range(START, '202101010000', freq='30T')
df = pd.DataFrame()
df['timestamp'] = idx
df = | pd.DataFrame(idx, columns=['timestamp']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# four representative days in each season
winter_day = '01-15'
spring_day = '04-15'
summer_day = '07-15'
fall_day = '10-15'
# define a function to plot household profile and battery storage level
def plot_4days(mode, tmy_code, utility, year, c_cost):
df = pd.read_csv('/Users/jiajiazheng/Box/Suh\'s lab/GSRs/Jiajia/3-Residential Solar-plus-storage/Results/'
'optimized/minCost_%(mode)s_%(year)s_cc_%(c_cost)s/'
'optimal_minCost_%(tmy_code)s_%(utility)s_%(year)s_cc_%(c_cost)s.csv'
% {'year': year, 'mode': mode, 'tmy_code': tmy_code, 'utility': utility, 'c_cost': c_cost},
index_col=0)
df.rename(columns={'Time': 'Hour'}, inplace=True)
s = df['Hour'].str.split()
df['Hour'] = pd.to_datetime(s.str[0], format="%m/%d") + pd.to_timedelta(s.str[1].str.split(':').str[0] + ' hours')
df['Hour'] = df['Hour'].dt.strftime('%m-%d %H:%M:%S')
df['Hour'] = str(year) + '-' + df['Hour']
df['Hour'] = pd.to_datetime(df['Hour'])
df['batt_char_disc'] = np.where(df['p_char'] == 0, df['p_disc'],
df['p_char'] + df['e_loss']) # curve of battery charging and discharging
year = str(year)
# subset the data frame to each of the four representative days
df_spring = df[(df['Hour'] >= pd.to_datetime(year + '-' + spring_day + ' ' + '00:00:00')) &
(df['Hour'] <= pd.to_datetime(year + '-' + spring_day + ' ' + '23:00:00'))]
df_summer = df[(df['Hour'] >= pd.to_datetime(year + '-' + summer_day + ' ' + '00:00:00')) &
(df['Hour'] <= | pd.to_datetime(year + '-' + summer_day + ' ' + '23:00:00') | pandas.to_datetime |
from numpy import dtype
import pandas as pd
import logging
import json
from nestshredder.pyshred_core import _shred_recursive, pad_dict_list
from nestshredder.pyshred_util import check_arguments
def shred_json(path_or_buf,target_folder_path,object_name,batch_ref=None,orient=None,dtype=None,convert_axes=None,convert_dates=True,keep_default_dates=True,precise_float=False,date_unit=None,encoding=None,encoding_errors='strict',lines=False,chunksize=None,compression='infer',nrows=None,storage_options=None,output_method=None):
check_arguments(target_folder_path=target_folder_path,object_name=object_name,batch_ref=batch_ref)
try:
json_df = pd.read_json(path_or_buf,orient=orient,dtype=dtype,convert_axes=convert_axes,convert_dates=convert_dates,keep_default_dates=keep_default_dates,precise_float=precise_float,date_unit=date_unit,encoding=encoding,encoding_errors=encoding_errors,lines=lines,chunksize=chunksize,compression=compression,nrows=nrows,storage_options=storage_options)
shred_outcome = _shred_recursive(json_df,target_folder_path,object_name,object_name,object_name,batch_ref)
except Exception as e:
if str(e) == 'If using all scalar values, you must pass an index':
new_list = []
try:
with open(path_or_buf) as json_file:
data = json.load(json_file)
new_list.append(data)
json_df = pd.DataFrame.from_dict(new_list)
shred_outcome = _shred_recursive(json_df,target_folder_path,object_name,object_name,object_name,batch_ref,output_method)
except Exception as e:
if str(e)[0:12] == 'expected str':
path_or_buf.seek(0)
data = json.loads(path_or_buf.read())
new_list.append(data)
json_df = pd.DataFrame.from_dict(new_list)
shred_outcome = _shred_recursive(json_df,target_folder_path,object_name,object_name,object_name,batch_ref,output_method)
else:
shred_outcome = str(e)
logging.error(shred_outcome)
return
elif str(e) == 'All arrays must be of the same length':
new_list = []
try:
with open(path_or_buf) as json_file:
data = json.load(json_file)
new_list.append(data)
padded_list = pad_dict_list(new_list,'n/a')
json_df = pd.DataFrame.from_dict(padded_list)
shred_outcome = _shred_recursive(json_df,target_folder_path,object_name,object_name,object_name,batch_ref,output_method)
except Exception as e:
# logging.info(str(e)[0:12])
if str(e)[0:12] == 'expected str':
path_or_buf.seek(0)
data = json.loads(path_or_buf.read())
new_list.append(data)
json_df = | pd.DataFrame.from_dict(new_list) | pandas.DataFrame.from_dict |
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo = pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
oc2['DOB'] = | pd.to_datetime(oc2['DOB'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# <b>Python Scraping of Book Information</b>
# In[1]:
get_ipython().system('pip install bs4')
# In[2]:
get_ipython().system('pip install splinter')
# In[3]:
get_ipython().system('pip install webdriver_manager')
# In[1]:
# Setup splinter
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
import requests
# In[ ]:
# In[42]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# url = 'http://books.toscrape.com/'
# browser.visit(url)
# for x in range(50):
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# h3 = article.find("h3")
# link = h3.find("a")
# href = link["href"]
# title = link["title"]
# print("----------")
# print(title)
# url = "http://books.toscrape.com/" + href
# browser.visit(url)
# try:
# current_page = current_page + 1
# web_page_url = f"https://books.toscrape.com/catalogue/category/books_1/page-{current_page}.html"
# browser.visit(web_page_url)
# browser.links.find_by_partial_text("next").click()
# print('It worked')
# except:
# print("Scraping Complete")
# browser.quit()
# In[57]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# pageNumber= pageNumber + 1
# url = 'http://books.toscrape.com/'
# pageUrl = f'http://books.toscrape.com/catalogue/page-{pageNumber}.html'
# browser.visit(url)
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# for x in range(20):
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# h3 = article.find("h3")
# link = h3.find("a")
# href = link["href"]
# title = link["title"]
# print("----------")
# print(title)
# #time.sleep(1)
# url = "http://books.toscrape.com/" + href
# browser.visit(url)
# try:
# browser.visit(pageUrl)
# browser.links.find_by_partial_text("next").click()
# except:
# print("Scraping Complete")
# browser.quit()
# In[2]:
#Working through each book and page
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
for i in range(1,3):
if(i == 1):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
browser.quit()
# In[97]:
#Proof of concept using books.toscrape.com
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
for i in range(1,2):
if(i == 1):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
df = pd.read_html(str(table))[0]
print(df)
browser.quit()
# In[20]:
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
pageUrl=""
table_of_tables = []
for i in list(50,75,100):
table_on_page = []
if(i == 25):
pageUrl = f"https://books.toscrape.com/index.html"
else:
pageUrl = f'https://books.toscrape.com/catalogue/page-{i}.html'
print(pageUrl)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
articles = soup.find_all('article', class_='product_pod')
for article in articles:
h3 = article.find("h3")
link = h3.find("a")
href = link["href"]
title = link["title"]
print("----------")
print(title)
#time.sleep(1)
url = "http://books.toscrape.com/" + href
browser.visit(url)
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
table_on_page.append(table)
# table_of_tables.append(table_on_page)
df = pd.read_html(str(table))[0]
print(df)
browser.quit()
# In[61]:
# In[48]:
df = pd.DataFrame(table_on_page)
df.to_csv('books2scrape.csv')
# In[52]:
df_to_clean=pd.read_csv('books2scrape.csv')
# In[64]:
df_columns_cleaned = df_to_clean.drop(columns=['Unnamed: 0','0','2','4','6','8','10','12','14'])
# In[71]:
df_columns_cleaned.columns
# In[66]:
df_columns_cleaned.head()
# In[78]:
html_chars = ["<tr>","\n","</th>","<th>","<td>","</td>",
"</tr>"]
for char in html_chars:
df_columns_cleaned['1'] = df_columns_cleaned['1'].str.replace(char, ' ')
df_columns_cleaned['3'] = df_columns_cleaned['3'].str.replace(char, ' ')
df_columns_cleaned['5'] = df_columns_cleaned['5'].str.replace(char, ' ')
df_columns_cleaned['7'] = df_columns_cleaned['7'].str.replace(char, ' ')
df_columns_cleaned['9'] = df_columns_cleaned['9'].str.replace(char, ' ')
df_columns_cleaned['11'] = df_columns_cleaned['11'].str.replace(char, ' ')
df_columns_cleaned['13'] = df_columns_cleaned['13'].str.replace(char, ' ')
# In[79]:
df_columns_cleaned
# In[290]:
# executable_path = {'executable_path': ChromeDriverManager().install()}
# browser = Browser('chrome', **executable_path, headless=False)
# pageUrl=""
# table_of_tables = []
# for i in range(1):
# table_on_page = []
# pageUrl = f'ttps://www.hpb.com/books/best-sellers/784-classics?&size=350&&&'
# print(pageUrl)
# browser.visit(pageUrl)
# html = browser.html
# soup = BeautifulSoup(html, 'html.parser')
# articles = soup.find_all('article', class_='product_pod')
# for article in articles:
# time.sleep(randint(1,3))
# section = article.find("section")
# link = section.find("a")
# href = link["href"]
# print(href)
# title = link["title"]
# print("----------")
# print(title)
# time.sleep(randint(1,3))
# url = href
# browser.visit(url)
# res=requests.get(url)
# time.sleep(randint(3,5))
# soup = BeautifulSoup(res.content,'lxml')
# table = soup.find_all('table')[0]
# table_on_page.append(table)
# # table_of_tables.append(table_on_page)
# df = pd.read_html(str(table))[0]
# print(df)
# browser.quit()
# In[198]:
#https://stackoverflow.com/questions/31064981/python3-error-initial-value-must-be-str-or-none-with-stringio
import io
# In[267]:
#grab data from https://citylights.com/greek-roman/
import random
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
table_of_data = []
pageUrl=""
for i in range(1,7):
data_on_page = []
if(i == 1):
pageUrl = f"https://citylights.com/greek-roman/"
else:
pageUrl = f'https://citylights.com/greek-roman/page/{i}/'
print(pageUrl)
time.sleep(1)
browser.visit(pageUrl)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
#https://stackoverflow.com/questions/52842778/find-partial-class-names-in-spans-with-beautiful-soup
articles = soup.find_all('li', attrs={'class': lambda e: e.startswith('product type-product post') if e else False})
for article in articles:
time.sleep(1)
link = article.find('a')
href = link["href"]
print("----------")
print(href)
url = href
browser.visit(url)
time.sleep(randint(1,2))
res=requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
data = soup.find_all('div', attrs={'class': 'detail-text mb-50'})[0].get_text()
data_on_page.append(data)
table_of_data.append(data_on_page)
df = pd.DataFrame(table_of_data)[0]
print(data)
browser.quit()
# In[268]:
df.to_csv('greek-roman.csv')
# In[269]:
df_greek_roman_to_clean= | pd.read_csv('greek-roman.csv') | pandas.read_csv |
"""
===================================================================================
Train distributed CV search with a logistic regression on the breast cancer dataset
===================================================================================
In this example we optimize hyperparameters (C) for a logistic regression on the
breast cancer dataset. This is a binary target. We use both grid search and
randomized search.
Here the core difference between skdist and sklearn is to use the sparkContext
variable as an argument to the grid search and randomized search class
instantiation. Under the hood, skdist will then broadcast the training data out
to the executors for each param set, fit the estimator for each param set, return
the cross validation score to the driver for each fit, and finally refit the model
with the best param set back on the driver.
The final estimators are then nearly identical to a fitted sklearn GridSearchCV
or RandomizedSearchCV estimator as shown by looking at some of their methods
and attributes.
Finally, all spark objects are removed from the fitted skdist estimator objects
so that these objects are pickle-able as shown.
Here is a sample output run:
-- Grid Search --
Best Score: 0.9925297825837328
Best C: 1.0
param_C mean_test_score
0 0.001 0.973818
1 0.01 0.982880
2 0.1 0.989827
3 1 0.992530
4 10 0.992010
5 100 0.990754
DistGridSearchCV(estimator=LogisticRegression(C=1.0, class_weight=None,
dual=False, fit_intercept=True,
intercept_scaling=1,
l1_ratio=None, max_iter=100,
multi_class='warn', n_jobs=None,
penalty='l2', random_state=None,
solver='liblinear', tol=0.0001,
verbose=0, warm_start=False),
param_grid={'C': [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]},
partitions='auto', preds=False, sc=None)
-- Randomized Search --
Best Score: 0.9925297825837328
Best C: 1.0
param_C mean_test_score
3 0.01 0.982880
2 0.1 0.989827
4 1 0.992530
1 10 0.992010
0 100 0.990754
DistRandomizedSearchCV(estimator=LogisticRegression(C=1.0, class_weight=None,
dual=False,
fit_intercept=True,
intercept_scaling=1,
l1_ratio=None, max_iter=100,
multi_class='warn',
n_jobs=None, penalty='l2',
random_state=None,
solver='liblinear',
tol=0.0001, verbose=0,
warm_start=False),
param_distributions={'C': [0.001, 0.01, 0.1, 1.0, 10.0,
100.0]},
partitions='auto', preds=False, sc=None)
"""
print(__doc__)
import pickle
import pandas as pd
import numpy as np
from skdist.distribute.search import DistGridSearchCV, DistRandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import load_breast_cancer
from pyspark.sql import SparkSession
# spark session initialization
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
# sklearn variables
Cs = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
cv = 5
n_iter = 5
scoring = "roc_auc"
solver = "liblinear"
# load sample data (binary target)
data = load_breast_cancer()
X = data["data"]
y = data["target"]
### distributed grid search
model = DistGridSearchCV(
LogisticRegression(solver=solver), dict(C=Cs), sc, cv=cv, scoring=scoring
)
# distributed fitting with spark
model.fit(X, y)
# predictions on the driver
preds = model.predict(X)
probs = model.predict_proba(X)
# results
print("-- Grid Search --")
print("Best Score: {0}".format(model.best_score_))
print("Best C: {0}".format(model.best_estimator_.C))
result_data = pd.DataFrame(model.cv_results_)[["param_C", "mean_test_score"]]
print(result_data.sort_values("param_C"))
print(pickle.loads(pickle.dumps(model)))
### distributed randomized search
param_dist = dict(C=[])
model = DistRandomizedSearchCV(
LogisticRegression(solver=solver),
dict(C=Cs),
sc,
cv=cv,
scoring=scoring,
n_iter=n_iter,
)
# distributed fitting with spark
model.fit(X, y)
# predictions on the driver
preds = model.predict(X)
probs = model.predict_proba(X)
# results
print("-- Randomized Search --")
print("Best Score: {0}".format(model.best_score_))
print("Best C: {0}".format(model.best_estimator_.C))
result_data = | pd.DataFrame(model.cv_results_) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Folium interact with GeoJSON data
Examples: overlay another GeoJSON zipcode map to the original map
Author: <NAME>
"""
import pandas as pd
import folium
def show_zipcode_map(zipcode_path, data, col):
"""
Interact zipcode GeoJSON data with other data set (house price or crime)
and generate another layer of zipcode map onto the original map
Parameters
----------
path : string
URL or File path to GeoJSON zipcode data
data : pandas dataframe
The other data set to interact with (house price or crime)
col : string
The column name in dataset to bound zipcode with
Return
----------
Save map as .html file
return folium map
"""
# Generate original map
zipcode = folium.Map(location=[data['lat'].mean(),
data['long'].mean()], zoom_start=10)
# Add zipcode map layer to orignial map
zipcode.choropleth(geo_path=zipcode_path, data=data,
columns=['zipcode', col],
key_on='feature.properties.ZCTA5CE10',
fill_color='OrRd', fill_opacity=0.5, line_opacity=0.2)
zipcode.save('zipcode_' + col + '.html')
return zipcode
if __name__ == "__main__":
"""
Example of using show_zipcode_map function
"""
# Load King County house price data
house_data = pd.read_csv("../Data/kc_house_data.csv",
parse_dates=['date'])
house_data['zipcode'] = house_data['zipcode'].astype(str)
# Group data by zipcode and calculate the mean value in each zipcode
zipcode_data = | pd.groupby(house_data, 'zipcode') | pandas.groupby |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: main.py
import re
import pandas as pd
from os.path import isfile
try:
from .remarkuple import helper as h
except:
from remarkuple import helper as h
from IPython.display import HTML
try:
from .isopsephy import greek_letters as letters
from .isopsephy import isopsephy, unicode_isopsephy, to_roman, find_cumulative_indices
except:
from isopsephy import greek_letters as letters
from isopsephy import isopsephy, unicode_isopsephy, to_roman, find_cumulative_indices
__version__ = "0.0.1"
booknames = {
"40N": "Matthew",
"41N": "Mark",
"42N": "Luke",
"43N": "John",
"44N": "Acts of the Apostles",
"45N": "Romans",
"46N": "1 Corinthians",
"47N": "2 Corinthians",
"48N": "Galatians",
"49N": "Ephesians",
"50N": "Philippians",
"51N": "Colossians",
"52N": "1 Thessalonians",
"53N": "2 Thessalonians",
"54N": "1 Timothy",
"55N": "2 Timothy",
"56N": "Titus",
"57N": "Philemon",
"58N": "Hebrews",
"59N": "James",
"60N": "1 Peter",
"61N": "2 Peter",
"62N": "1 John",
"63N": "2 John",
"64N": "3 John",
"65N": "Jude",
"66N": "Revelation"
}
# will be transformed to pandas DataFrame
textus_vocabulary = {}
sletters = ''.join(letters)
c = '([%s]+) ([^%s]+)' % (sletters, sletters)
#c = "([%s]+.*?.-.{3})" % sletters
regex_word_strong_morph = re.compile(c)
c = '([%s]+)' % sletters
regex_word_isopsephy = re.compile(c)
c = '{VAR1: ([%s0-9A-Z\- ]+)}' % sletters
regex_variation1 = re.compile(c)
c = '{VAR2: ([%s0-9A-Z\- ]+)}' % sletters
regex_variation2 = re.compile(c)
regex_word_strong_morph_brackets = re.compile('\[(.*)\]')
textus_receptus_original_dir = "data_original/textus_receptus/"
textus_receptus_processed_dir = "data_processed/textus_receptus/"
#letters = "αΑβΒγΓδΔεΕϛϚϜϝζΖηΗθΘιΙυϒYκΚϡϠͲͳλΛωΩμΜτΤνΝξΞοΟσΣϹϲςπΠχΧϙϘϞϟρΡψΨφΦ"
#c = '([%s]+) ([^%s]+)' % (letters, letters)
#c = "([αΑβΒγΓδΔεΕϛϚϜϝζΖηΗθΘιΙυϒYκΚϡϠͲͳλΛωΩμΜτΤνΝξΞοΟσΣϹϲςπΠχΧϙϘϞϟρΡψΨφΦ]+.*?.-.{3})"
#c = u'([Ͱ-ϡ]+) ([A-Z0-9-]+)(?: ([A-Z0-9-]+))? ([A-Z0-9-]+)(?=\\s|$)'
def load_dataframe(filename):
global textus_vocabulary
csvOriginalFileName = textus_receptus_original_dir+filename
csvProcessedFileName = textus_receptus_processed_dir+filename
if isfile(csvProcessedFileName + ".csv"):
print ("Retrieving data from local csv copy...")
textus_vocabulary = pd.read_csv(csvProcessedFileName + "_dict.csv")
df = | pd.read_csv(csvProcessedFileName + ".csv") | pandas.read_csv |
import numpy as np
import pandas as pd
# If you import here, you can use it.
from sklearn.linear_model import LogisticRegression, HuberRegressor, LinearRegression,Ridge,Perceptron
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, GradientBoostingRegressor, AdaBoostRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process import GaussianProcessRegressor
class ComplementNa():
def _fillna_X_and_make_X_predict(self, data, col):
"""Supplement explanatory variables with missing values once with the average value of the column.
data : (pandas) input data
col : columns having NaN, and this column row predict other columns."""
data_columns = data.columns
y = data.loc[:,[col]]
X = data.drop(col, axis=1)
X.fillna(X.mean(), inplace=True)
data_fillna_X = pd.concat([X,y], axis=1)
data_fillna_X_for_training = data_fillna_X.loc[:, data_columns] # Sort in original column order
y_ = y.iloc[:,0] # reshape
# Extract rows with missing values in the objective variable from the newly complemented data
row_y_is_na = data_fillna_X[y_.isnull()]
X_fillna_for_predict = row_y_is_na.drop(col, axis=1)
return data_fillna_X_for_training, X_fillna_for_predict
def _X_y_split(self, data, col):
"""
Divide training data with missing values into explanatory variables and objective variables.
data : (pandas) input data
col: columns having NaN, and this column row predict other columns.
"""
y = data.loc[:, col] # The data in the specified column having NaN
data_dropna_having_y = data[~y.isnull()] # Data with deleted rows with missing values in specified column
X_train = data_dropna_having_y.drop(col, axis = 1) # Data excluding specified columns (candidate variables for training data)
y_train = data_dropna_having_y.loc[:, col] # Specified column (objective variable of training data)
return X_train, y_train.values # Training data
def _X_y_split_dropna_all(self, data, col):
"""
Of the original data that is not supplemented with missing values,
data with no missing values is divided into explanatory variables and objective variables as training data.
data : (pandas)input data
i : columns having NaN, and this column row predict other columns.
"""
data_dropna_all = data[~data.isnull().any(axis=1)] # Data with no missing values
X_train = data_dropna_all.drop(i, axis = 1) # Data excluding specified columns (candidate variables for training data)
y_train = data_dropna_all.iloc[:, i] # Specified column (objective variable of training data)
return X_train, y_train.values # Training data
def _algorithm_to_same_shape(self, algorithm):
"""
Regardless of what type of algorithm comes in, change to the pattern 4.
1: 'RandomForest'
2: ['RandomForest',{'n_estimators':1}]
3 : [['RandomForestRegressor'], ['RandomForestClassifier']]
4 : [['RandomForestRegressor',{'n_estimators':1}], ['RandomForestClassifier', {}]]
algorithm : (list or str) object of pattern 1~4
"""
# Judgment based on the type of np.array
if np.array(algorithm).shape==() and np.array(algorithm[1]).shape==():
# For pattern 1
# for making mistake
if 'Regressor' in algorithm:
algorithm = algorithm.replace('Regressor','')
elif 'Classifier' in algorithm:
algorithm = algorithm.replace('Classifier','')
algorithm_ = [[algorithm+'Regressor',{}],[algorithm+'Classifier',{}]]
elif np.array(algorithm).shape==(2,) and np.array(algorithm[1]).shape==():
# For pattern 2
if 'Regressor' in algorithm[0]:
algorithm[0] = algorithm[0].replace('Regressor','')
elif 'Classifier' in algorithm[0]:
algorithm[0] = algorithm[0].replace('Classifier','')
algorithm_ = [[algorithm[0]+'Regressor',algorithm[1]],[algorithm[0]+'Classifier',algorithm[1]]]
elif np.array(algorithm).shape==(2,1) and np.array(algorithm[1]).shape==(1,):
# For pattern 3
# for making mistake to reverse
if 'Regressor' in algorithm[1] and 'Classifier' in algorithm[0]:
copy_algorithm = algorithm.copy()
algorithm[0] = copy_algorithm[1]
algorithm[1] = copy_algorithm[0]
algorithm_ = [[algorithm[0], {}], algorithm[1],{}]
elif np.array(algorithm).shape==(2,2) and np.array(algorithm[1]).shape==(2,):
# For pattern 4
# for making mistake to reverse
if 'Regressor' in algorithm[1][0] and 'Classifier' in algorithm[0][0]:
copy_algorithm = algorithm.copy()
algorithm[0] = copy_algorithm[1]
algorithm[1] = copy_algorithm[0]
algorithm_ = algorithm
else:
raise ValueError("algorithm shape is incorrect")
return algorithm_
def _predict_na(self, X_train, y_train, X_predict, col, category_colnames, algorithm, scale=True):
"""
Data in nan columns is predicted other rows.
if i columns data is category variable, using Classification.
It is assumed that dummies variable is dropped all by preprocessing (if dummy variable is NaN, all variable dummied is 0).
X_train : explanation variable for training.
y_train : objective variable (column having nan).
X_predict : nan data raws
col : nan data (y) column number
category_colnames : All categorical variable number.
algorithm : (list) using algorithm shaped by _algorithm_to_same_shape.
scale : (bool) Whether StandardScaler is using.
"""
# Classification if there is an explanatory variable in category_colnames_number, otherwise regression
if col in category_colnames:
module = algorithm[1][0]
param = algorithm[1][1]
model = eval(module + "(**param)")
if scale:
steps = [('scale', StandardScaler()), ('est', model)]
model_c = Pipeline(steps=steps)
else:
model_c = model
model_c.fit(X_train, y_train)
predict_nan = model_c.predict(X_predict)
else: # regression
module = algorithm[0][0]
param = algorithm[0][1]
model = eval(module + "(**param)")
if scale:
steps = [('scale', StandardScaler()), ('est', model)]
model_r = Pipeline(steps=steps)
else:
model_r = model
model_r.fit(X_train, y_train)
predict_nan = model_r.predict(X_predict)
return predict_nan
def complena(self, data, corr=None, category_colnames= 'self', algorithm=['RandomForest',{'n_estimators':100}], scale=True,
decision_interpolation = True):
"""
data : (numpy or pandas) Input data
corr : (numpy or pandas) Correlation coefficient of input data
category_colnames: (list) Specify column name of categorical variable.
algorithm : (list or str) specified using ML algorithm as follows pattern. Specify regressor and classifier.
1: 'RandomForest'
2: ['RandomForest',{'n_estimators':100}]
3 : [['RandomForestRegressor'], ['RandomForestClassifier']]
4 : [['RandomForestRegressor',{'n_estimators':10}], ['RandomForestClassifier', {}]]
scale : (bool) True : pipeline of StandardScaler
decision_interpolation : (True) Complement all missing values of explanatory variables and use them for all training data
(False) Do not use data with missing values for explanatory variables, use only training data with no remaining missing values
"""
if category_colnames=='self' and hasattr(self, 'category_colnames'):
category_colnames= self.category_colnames
elif category_colnames=='self' and not hasattr(self, 'category_colnames'):
category_colnames= []
# algorithm to same shape
algorithm_ = self._algorithm_to_same_shape(algorithm)
data = pd.DataFrame(data).copy()
data_colnames = data.columns # Original data column order
# Missing values and numeric column names for each column
n_nan = data.isnull().sum(axis = 0)
# Sort by most missing values
number_of_nan = pd.DataFrame({"n_nan": n_nan}).T
plus_n_nan_data = | pd.concat([data, number_of_nan]) | pandas.concat |
# Changing the actions in self.actions should automatically change the script to function with the new number of moves.
# Developed and improved by past CG4002 TAs and students: <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
import os
import sys
import time
import traceback
import random
import socket
import threading
import base64
import tkinter as tk
from tkinter import ttk
from tkinter.constants import HORIZONTAL, VERTICAL
import pandas as pd
from Crypto.Cipher import AES
LOG_DIR = os.path.join(os.path.dirname(__file__), 'evaluation_logs')
MESSAGE_SIZE = 2
ACTIONS = ["shoot", "shield", "grenade", "reload"]
NUM_ACTION_REPEATS = 4
"""
Class that will generate randomized list of actions.
Actions will be displayed on the evaluation server UI for the
players to follow.
"""
class TurnGenerator():
def __init__(self):
self.cur_turn = 0
self.num_actions = len(ACTIONS)
# Generate random sequence of actions for Player 1
self.p1_actions = ACTIONS * NUM_ACTION_REPEATS
random.shuffle(self.p1_actions)
self.p1_actions.insert(0, "none")
self.p1_actions.append("logout")
print(self.p1_actions)
# Generate random sequence of actions for Player 2
self.p2_actions = ACTIONS * NUM_ACTION_REPEATS
random.shuffle(self.p2_actions)
self.p2_actions.insert(0, "none")
self.p2_actions.append("logout")
print(self.p2_actions)
"""
Called at the start of every turn to generate new values for player actions
"""
def iterate(self):
# Return True if we have finished going through all turns
if self.cur_turn + 1 >= len(self.p1_actions):
return True
self.cur_turn += 1
print(f"New P1 Action: {self.p1_actions[self.cur_turn]}")
print(f"New P2 Action: {self.p2_actions[self.cur_turn]}")
return False
"""
Return both player expected actions in tuple of tuples: (p1_action,p2_action)
"""
def get_correct_action(self):
return self.p1_actions[self.cur_turn], self.p2_actions[self.cur_turn]
class Server(threading.Thread):
def __init__(self, ip_addr, port_num, group_id):
super(Server, self).__init__()
# Setup logger
self.log_filename = 'group{}_logs.csv'.format(group_id)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
self.log_filepath = os.path.join(LOG_DIR, self.log_filename)
self.columns = [
'timestamp',
'p1_action', 'gt_p1_action', 'p2_action', 'gt_p2_action',
'response_time',
'is_p1_action_correct', 'is_p2_action_correct'
]
self.df = | pd.DataFrame(columns=self.columns) | pandas.DataFrame |
import os
import gzip
import random
import pickle
import yaml
import pandas as pd
from base64 import b64encode
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sqlalchemy.orm import declarative_base, sessionmaker
from sqlalchemy import create_engine, Column, Integer, String, LargeBinary
Base = declarative_base()
CONFIG_PATH = ""
# Create a table for the models
class Models(Base):
__tablename__ = 'models'
model_id = Column(Integer, primary_key=True)
model_name = Column(String(30))
blob = Column(LargeBinary)
def load_config(config_name):
"""
Loads the base configurations for the model
* Configuration for model from config.yml
:param: config_name
:return: loaded config dictionary
"""
with open(os.path.join(CONFIG_PATH, config_name)) as file:
config = yaml.safe_load(file)
return config
def create_data(config):
"""
Generates data for linear regression
* Configuration dictionary
:param config: config dict for dataset configurations
:return: dataset as a pandas dataframe
"""
m = config["gradient"]
c = config["y_intercept"]
data = []
for x in range(config["x_start"], config["x_end"]):
# for x from x_start to x_end generate y using y=mx+c with random noise based on range specified in config
y = m * x + c + random.uniform(config["noise_range_start"], config["noise_range_end"])
data.append([x, y])
dataframe = | pd.DataFrame(data, columns=['x', 'y']) | pandas.DataFrame |
import os
import sys
sys.path.append("../..")
import datetime
import pymongo
from pandas.io.json import json_normalize
import pandas as pd
class Test():
"""
This page is used to create a Graph in Sqlgraph which include three kinds of node ----Document, Event and Knowledge.
"""
def __init__(self, coll):
host = '10.60.1.140'
port = 6080
usr = 'root'
pwd = '<PASSWORD>'
self.mongoclient = pymongo.MongoClient(host, port)
self.admin = self.mongoclient.admin
self.admin.authenticate(usr, pwd)
self.coll = coll
self.document = self.mongoclient["QBData"]["documents_task"]
def run(self):
doc_list = list(self.document.find({"task_id": self.coll}))
if not os.path.exists('./api/fake/fake_data/{}'.format(self.coll)):
os.makedirs('./api/fake/fake_data/{}'.format(self.coll))
relation_file = './api/fake/fake_data/{}/set_relation_{}.csv'.format(self.coll, self.coll)
document_file = './api/fake/fake_data/{}/set_document_{}.csv'.format(self.coll, self.coll)
# 这里存在问题,第一条数据不显示,从第二条数据开始
doc_frame = json_normalize(doc_list)
# Write document
document = pd.DataFrame(doc_frame, columns=["_id",
"raw_id",
"channel",
"site_name",
"title.en",
"publish_time",
"topic",
"entity_list",
"meta_type"])
document.rename(columns={"_id": "Entity_id"}, inplace=True)
document.rename(columns={"title.en": "title"}, inplace=True)
document["Entity_type"] = "document"
document.to_csv(document_file, index=False)
# Generate relation
relation_list = []
# entity 和 doc关系
# for 循环里面的Entity_id是什么意思,没明白,待弄懂
for index, row in doc_frame.iterrows():
entity_dict = []
document_id = row["_id"]
for sen in row["entity_list"]:
if "Entity_id" in sen:
sen["id"] = sen["Entity_id"]
if sen["id"] in entity_dict:
continue
else:
entity_dict.append(sen["id"])
relation_id1 = "{}-{}".format(document_id, sen["id"])
relation_list.append([document_id, sen["id"], relation_id1, "include_entity", "include_entity"])
relation_dataframe = | pd.DataFrame(relation_list, columns=["Head_id", "Tail", "id", "relation_id", "type"]) | pandas.DataFrame |
"""General data-related utilities."""
import functools
import operator
import pandas as pd
def cartesian(ranges, names=None):
"""Generates a data frame that is a cartesian product of ranges."""
if names is None:
names = range(len(ranges))
if not ranges:
return pd.DataFrame()
if len(ranges) == 1:
return | pd.DataFrame({names[0]: ranges[0]}) | pandas.DataFrame |
import numpy
import pyearth
import pandas as pd
from pyearth import Earth
pathToInputData = 'C:\\__DEMO1\\Memory.csv'
dateTimeFormat = '%d/%m/%Y %H:%M'
pathToOutputData = 'C:\\__DEMO1\\output.txt'
# Write array to file
def array_to_file(the_array, file_name):
the_file = open(file_name, 'w')
for item in the_array:
the_file.write('%s\n' % item)
def buildModel():
# Read our data
data = pd.read_csv(pathToInputData,index_col=0)
data.head()
data.index = | pd.to_datetime(data.index, format=dateTimeFormat) | pandas.to_datetime |
import pytest
from pigging.connectors import googleBigQueryConnector, googleSheetsConnector
import os
import warnings
import pandas as pd
### Credentials ###
CREDENTIALS_PATH = os.environ.get('CREDENTIALS_PATH')
### Google Big Query ###
SELECT_QUERY = os.environ.get('SELECT_QUERY')
PROEJCT_ID = os.environ.get('PROEJCT_ID')
TABLE_DESTINATION = os.environ.get('TABLE_DESTINATION')
gbq_connector = googleBigQueryConnector(CREDENTIALS_PATH)
class TestGoogleBigQueryConnector(object):
def test_connected(self, capsys):
gbq_connector = googleBigQueryConnector(CREDENTIALS_PATH)
captured = capsys.readouterr()
assert captured.out == "Successfully loaded Google BigQuery credentials\n"
def test_data_import_successfull(self):
df = gbq_connector.import_data(SELECT_QUERY, PROEJCT_ID)
assert len(df) > 0, "There should be a df"
def test_data_export_successful(self, capsys):
df = gbq_connector.import_data(SELECT_QUERY, PROEJCT_ID)
gbq_connector.export_data(
df, TABLE_DESTINATION, PROEJCT_ID, "EU", 'replace')
captured = capsys.readouterr()
assert captured.out == "Successfully exported data to Google BigQuery\n"
### Google Sheets ###
WORKSHEET_ID = os.environ.get('WORKSHEET_ID')
WORKSHEET_NAME = os.environ.get('WORKSHEET_NAME')
DF = | pd.DataFrame(["test value"], columns=['Test col']) | pandas.DataFrame |
# Copyright (C) 2021 ServiceNow, Inc.
import pytest
import pandas as pd
import numpy as np
from nrcan_p2.data_processing.utils import (
produce_updown_df,
decide_lang
)
def test_produce_updown_df():
df = pd.DataFrame({
'text': ['a', "b", "c", "d", "e"],
'mycol': [0,1,2,3,4],
'othercol': ['z','y', 'x', 'v', 'w']
}, index=[5,6,7,8,9])
expected = pd.DataFrame({
'text': ['a', "b", "c", "d", "e"],
'mycol': [0,1,2,3,4],
'othercol': ['z','y', 'x', 'v', 'w'],
#'index_up': [None,5,6,7,8],
'mycol_up': [None,0,1,2,3],
#'index_down': [6,7,8,9,None],
'mycol_down': [1,2,3,4,None]
}, index=[5,6,7,8,9]).fillna(np.nan)
expected['mycol'] = expected.mycol.astype('float')
expected['mycol_up'] = expected.mycol_up.astype('float')
expected['mycol_down'] = expected.mycol_down.astype('float')
expected.index = expected.index.astype('float')
output = produce_updown_df(df,
col='mycol',
)
#ol_set=['text', 'mycol', 'othercol'])
print(output)
print(expected)
| pd.testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 16:27:12 2017
@author: xinruyue
"""
import pandas as pd
import numpy as np
import xlrd
import pickle
import os
def get_country():
f = open('country.txt','r')
country = []
for line in f:
line = line.strip('\n')
country.append(line)
return country
#get F matrix
def get_f(df,country):
size = len(country)
f_matrix = np.zeros((size,size))
for index,row in df.iterrows():
imp = row[2]
exp = row[4]
value = row[8]
i = country.index(imp)
j = country.index(exp)
f_matrix[i][j] = value
return f_matrix
def processing(file1,y):
# get all data
df = pd.DataFrame()
book = xlrd.open_workbook(file1)
for sheet in book.sheets():
f = | pd.read_excel(file1, sheetname=sheet.name) | pandas.read_excel |
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import re
from library.utils import StatisticResult, statistic_test
from collections import defaultdict
import pandas as pd
from typing import final
from library.RecRunner import NameType, RecRunner
from library.constants import METRICS_PRETTY, RECS_PRETTY, experiment_constants, CITIES_PRETTY,DATA,UTIL
import argparse
import app_utils
LATEX_HEADER = r"""\documentclass{article}
\usepackage{graphicx}
\usepackage[utf8]{inputenc}
\usepackage{xcolor}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{underscore}
\usepackage[margin=0.5in]{geometry}
\usepackage{booktabs}
\begin{document}
"""
LATEX_FOOT = r"""
\end{document}"""
argparser =argparse.ArgumentParser()
app_utils.add_cities_arg(argparser)
app_utils.add_base_recs_arg(argparser)
app_utils.add_final_recs_arg(argparser)
args = argparser.parse_args()
# cities = ['lasvegas', 'phoenix']
# base_recs = [ 'usg','geosoca','geomf',]
# final_recs = ['geodiv']
final_rec_list_size = experiment_constants.K
rr=RecRunner(args.base_recs[0],args.final_recs[0],args.cities[0],experiment_constants.N,final_rec_list_size,DATA)
metrics_k = experiment_constants.METRICS_K
final_recs_metrics= defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict())))
base_recs_metrics= defaultdict(lambda: defaultdict(lambda: defaultdict()))
latex_table = ""
main_metrics = ['precision','recall','gc','ild','pr','epc']
def get_metrics_renamed_order(METRICS_PRETTY_k):
METRICS_PRETTY_k = [METRICS_PRETTY_k[v] for v in main_metrics]
return METRICS_PRETTY_k
def get_metrics_pretty_k(metric_k:int) -> dict:
return {k: v+f'@{metric_k}' for k, v in METRICS_PRETTY.items()}
def df_format(df_unf:pd.DataFrame,metric_k):
# METRICS_PRETTY_k = {k: v+f'@{metric_k}' for k, v in METRICS_PRETTY.items()}
METRICS_PRETTY_k = get_metrics_pretty_k(metric_k)
df_unf=df_unf[main_metrics].rename(columns=METRICS_PRETTY_k)
# print(df_unf)
return df_unf
def get_base_name(base_name):
return RECS_PRETTY[base_name]
def get_final_name(base_name,final_name):
return get_base_name(base_name)+'+'+RECS_PRETTY[final_name]
for city in args.cities:
for base_rec in args.base_recs:
rr.city = city
rr.base_rec = base_rec
metrics = rr.load_metrics(
base=True, name_type=NameType.PRETTY, METRICS_KS=metrics_k)
for metric_k in metrics_k:
base_recs_metrics[city][base_rec][metric_k] = pd.DataFrame(
metrics[metric_k])
base_recs_metrics[city][base_rec][metric_k]=df_format(base_recs_metrics[city][base_rec][metric_k],metric_k)
for final_rec in args.final_recs:
rr.final_rec = final_rec
metrics = rr.load_metrics(
base=False, name_type=NameType.PRETTY, METRICS_KS=metrics_k)
for metric_k in metrics_k:
final_recs_metrics[city][base_rec][final_rec][metric_k] = pd.DataFrame(
metrics[metric_k])
final_recs_metrics[city][base_rec][final_rec][metric_k] =df_format(final_recs_metrics[city][base_rec][final_rec][metric_k],metric_k)
num_metrics = 6
num_columns= num_metrics+1
latex_table_header= """
\\begin{{tabular}}{{{}}}
""".format('l'*(num_columns))
latex_table_footer= r"""
\end{tabular}
"""
top_count = defaultdict(lambda:defaultdict(int))
for count1, city in enumerate(args.cities):
if count1 == 0:
latex_table += '\\toprule\n'
latex_table += '\\multicolumn{{{}}}{{l}}{{{}}}\\\\\n'.format((num_columns),CITIES_PRETTY[city])
latex_table += '\\bottomrule\n'
for metric_k in metrics_k:
dfs = []
names_recs_in_order = []
for base_rec in args.base_recs:
current_metrics = {}
current_metrics[get_base_name(base_rec)] = base_recs_metrics[city][base_rec][metric_k]
names_recs_in_order.append(get_base_name(base_rec))
for final_rec in args.final_recs:
current_metrics[get_final_name(base_rec,final_rec)] = final_recs_metrics[city][base_rec][final_rec][metric_k]
names_recs_in_order.append(get_final_name(base_rec,final_rec))
df = pd.concat(current_metrics, axis=1)
# print(df)
dfs.append(df)
df = | pd.concat(dfs,axis=1) | pandas.concat |
import multiprocessing
import pandas as pd
import numpy as np
from tqdm import tqdm
from gensim.models import Doc2Vec
from sklearn import utils
from gensim.models.doc2vec import TaggedDocument
import re
import nltk
from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
nltk.download('punkt')
def tokenize_text(text):
tokens = []
for sent in nltk.sent_tokenize(text):
for word in nltk.word_tokenize(sent):
if len(word) < 2:
continue
tokens.append(word.lower())
return tokens
def get_vectors(model, tagged_docs):
sents = tagged_docs.values
targets, regressors = zip(*[(doc.tags[0], model.infer_vector(doc.words, epochs=20)) for doc in sents])
return targets, regressors
cores = multiprocessing.cpu_count()
training_set = pd.read_json('../processed_data/train_set.json')
test_set = pd.read_json('../processed_data/test_set.json')
train_tagged_summary = training_set.apply(
lambda r: TaggedDocument(words=tokenize_text(r['summary']), tags=[r.label]), axis=1)
train_tagged_document = training_set.apply(
lambda r: TaggedDocument(words=tokenize_text(r['document']), tags=[r.label]), axis=1)
model_dbow = Doc2Vec(dm=0, vector_size=300, negative=5, hs=0, min_count=2, sample = 0, workers=cores)
print("\n> Building model vocab")
model_dbow.build_vocab([x for x in tqdm(train_tagged_summary.values)] + [x for x in tqdm(train_tagged_document.values)])
print("\n> Training bag of words model")
for epoch in range(30):
model_dbow.train(utils.shuffle([x for x in tqdm(train_tagged_summary.values, desc = 'epoch {}'.format(epoch))]), total_examples=len(train_tagged_summary.values), epochs=30)
model_dbow.alpha -= 0.002
model_dbow.min_alpha = model_dbow.alpha
model_dmm = Doc2Vec(dm=1, dm_mean=1, vector_size=300, window=10, negative=5, min_count=1, workers=cores, alpha=0.065, min_alpha=0.065)
print("\n> Building model vocab")
model_dmm.build_vocab([x for x in tqdm(train_tagged_summary.values)] + [x for x in tqdm(train_tagged_document.values)])
print("\n> Training Distributed memory model")
for epoch in range(30):
model_dmm.train(utils.shuffle([x for x in tqdm(train_tagged_summary.values, desc = 'epoch {}'.format(epoch))]), total_examples=len(train_tagged_summary.values), epochs=30)
model_dmm.alpha -= 0.002
model_dmm.min_alpha = model_dmm.alpha
new_model = ConcatenatedDoc2Vec([model_dbow, model_dmm])
test_tagged_summary = test_set.apply(
lambda r: TaggedDocument(words=tokenize_text(r['summary']), tags=[1]), axis=1)
test_tagged_document = test_set.apply(
lambda r: TaggedDocument(words=tokenize_text(r['document']), tags=[1]), axis=1)
print('\n> Generating final vectors')
y_train, X_train = get_vectors(new_model, train_tagged_summary)
y_test, X_test = get_vectors(new_model, test_tagged_summary)
y_train, X_train_document = get_vectors(new_model, train_tagged_document)
y_test, X_test_document = get_vectors(new_model, test_tagged_document)
print('> Saving result')
training_result = pd.DataFrame()
training_result.insert(0, "summary_embedding", X_train)
training_result.insert(0, "document_embedding", X_train_document)
training_result.insert(2, "labels", training_set['label'])
testing_result = | pd.DataFrame() | pandas.DataFrame |
"""Data Profiling
This script runs the routine of applying data profiling metrics
using the pydeequ library.
github: (https://github.com/awslabs/python-deequ)
This function receives configuration parameters,
process the analyses and saves the results in a BigQuery table.
An way to call this module would be:
gcloud dataproc jobs submit pyspark # submit job to dataproc
data_quality_pyspark.py # this module
--project my-gcp-project # gcp project id
--cluster my-spark-cluster # cluster
--region us-central1 # region
-- gcs # source type (bq | gcs)
gs://project-bucket/path/to/file.csv | .parquet
DATA_PROFILING.ANALYSIS_RESULTS # Bigquery table where will be saved the results
"col1 = 'a' or|and col2 = 'b'" # bq table filter optional, like a sql where clause
"""
import datetime
import logging
import os
import sys
import pandas as pd
from pydeequ import analyzers as A
from pydeequ import profiles as P
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
def main(argv):
"""Run Data Profiling on table or file using Deequ library
and save results to a bigquery table.
Parameters
----------
source_type : str
The type of location of the table to be analyzed ('bq' or 'gcs')
source_path : str
The path to a gcs (cloud storage eg.: gs://path/to/file.parquet|csv) file
or a bq (bigquery eg.: bigquery-public-data:samples.shakespeare) table.
destination : str
The bigquery table where the result will be saved
staging_bucket : str
The GCS bucket used by BigQuery connector for staging files
app_name : str
Spark app name
table_filter : str, optional
The filter applied on bq table (eg.: 'col1 = 'a' and|or col2 = 'b''). (deafult is
'' that means no filter)
Returns
-------
There's no return for this function
"""
"""Configurations"""
# Configuration sys.argv parameters
logging.info("Configuration sys.argv parameters")
source_type = argv[0] # 'bq' or 'gcs'
source_path = argv[1] # bq table ('dataset.table') or gcs file (gs://path/to/file.parquet)
destination = argv[2] # table in bq
staging_bucket = argv[3]
app_name = argv[4]
table_filter = ""
if len(argv) > 5:
table_filter = argv[5] # [optional] filter table, e.g.: 'col1 = a and|or col2 = b'
# Config Spark Session
logging.info("Config Spark Session")
spark = SparkSession.builder.master("yarn").appName(app_name).getOrCreate()
# Config Temp GCS Bucket
logging.info("Config Temp GCS Bucket")
spark.conf.set("temporaryGcsBucket", staging_bucket)
"""Read GCS File or Bigquery Table"""
# Getting file extension
file_extension = os.path.splitext(source_path)[1]
# Read File or Table
logging.info("Read File or Table")
df = None
if source_type == "gcs":
logging.info(f"Reading from gcs file: {source_path}")
if file_extension == ".parquet":
df = spark.read.load(source_path)
elif file_extension == ".csv":
df = spark.read.options(
inferSchema="True", header=True, delimiter="\t"
).csv(source_path)
elif source_type == "bq":
logging.info(f"Reading from bq table: {source_path}")
if table_filter == "":
df = (
# Load data from BigQuery.
spark.read.format("bigquery")
.option('table', source_path)
.load()
.select("*")
)
else:
df = (
# Load data from BigQuery.
spark.read.format("bigquery")
.option('table', source_path)
.load()
.select("*")
.filter(table_filter)
)
else:
logging.info(f"Unknown value {argv[0]} for source_type parameter")
sys.exit(1)
df = df.drop("METADATA")
# Executing Profile to get Datatype for each column
logging.info("Executing Profile to get Datatype for each column")
result = P.ColumnProfilerRunner(spark).onData(df).run()
# Getting columns names and types
d = []
for col, profile in result.profiles.items():
d.append({"instance": col, "datatype": profile.dataType})
df_column_types = spark.createDataFrame( | pd.DataFrame(d) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
expected = np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = np.array([1, np.nan, np.nan, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
def test_pow_array(self):
a = integer_array([0, 0, 0, 1, 1, 1, None, None, None])
b = integer_array([0, 1, None, 0, 1, None, 0, 1, None])
result = a ** b
expected = integer_array([1, 0, None, 1, 1, 1, 1, None, None])
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na(self):
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = integer_array([np.nan, np.nan])
result = np.array([1.0, 2.0]) ** arr
expected = np.array([1.0, np.nan])
tm.assert_numpy_array_equal(result, expected)
class TestComparisonOps(BaseOpsUtil):
def _compare_other(self, data, op_name, other):
op = self.get_op_from_name(op_name)
# array
result = pd.Series(op(data, other))
expected = pd.Series(op(data._data, other), dtype="boolean")
# fill the nan locations
expected[data._mask] = pd.NA
tm.assert_series_equal(result, expected)
# series
s = pd.Series(data)
result = op(s, other)
expected = op(pd.Series(data._data), other)
# fill the nan locations
expected[data._mask] = pd.NA
expected = expected.astype("boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1])
def test_scalar(self, other, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([1, 0, None], dtype="Int64")
result = op(a, other)
if other is pd.NA:
expected = pd.array([None, None, None], dtype="boolean")
else:
values = op(a._data, other)
expected = pd.arrays.BooleanArray(values, a._mask, copy=True)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(a, pd.array([1, 0, None], dtype="Int64"))
def test_array(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([0, 1, 2, None, None, None], dtype="Int64")
b = pd.array([0, 1, None, 0, 1, None], dtype="Int64")
result = op(a, b)
values = op(a._data, b._data)
mask = a._mask | b._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
a, pd.array([0, 1, 2, None, None, None], dtype="Int64")
)
tm.assert_extension_array_equal(
b, pd.array([0, 1, None, 0, 1, None], dtype="Int64")
)
def test_compare_with_booleanarray(self, all_compare_operators):
op = self.get_op_from_name(all_compare_operators)
a = pd.array([True, False, None] * 3, dtype="boolean")
b = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype="Int64")
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(a, other)
result = op(a, b)
tm.assert_extension_array_equal(result, expected)
def test_no_shared_mask(self, data):
result = data + 1
assert np.shares_memory(result._mask, data._mask) is False
def test_compare_to_string(self, any_nullable_int_dtype):
# GH 28930
s = pd.Series([1, None], dtype=any_nullable_int_dtype)
result = s == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
self.assert_series_equal(result, expected)
def test_compare_to_int(self, any_nullable_int_dtype, all_compare_operators):
# GH 28930
s1 = pd.Series([1, None, 3], dtype=any_nullable_int_dtype)
s2 = pd.Series([1, None, 3], dtype="float")
method = getattr(s1, all_compare_operators)
result = method(2)
method = getattr(s2, all_compare_operators)
expected = method(2).astype("boolean")
expected[s2.isna()] = pd.NA
self.assert_series_equal(result, expected)
class TestCasting:
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(self, all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(integer_array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(self, all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index(np.array(other))
assert isinstance(idx, ABCIndexClass)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(self, all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = | pd.Series(mixed) | pandas.Series |
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
from supervised.preprocessing.label_encoder import LabelEncoder
class LabelEncoderTest(unittest.TestCase):
def test_fit(self):
# training data
d = {"col1": ["a", "a", "c"], "col2": ["w", "e", "d"]}
df = pd.DataFrame(data=d)
le = LabelEncoder()
# check first column
le.fit(df["col1"])
data_json = le.to_json()
# values from column should be in data json
self.assertTrue("a" in data_json)
self.assertTrue("c" in data_json)
self.assertTrue("b" not in data_json)
# there is alphabetical order for values
self.assertEqual(0, data_json["a"])
self.assertEqual(1, data_json["c"])
# check next column
le.fit(df["col2"])
data_json = le.to_json()
self.assertEqual(0, data_json["d"])
self.assertEqual(1, data_json["e"])
self.assertEqual(2, data_json["w"])
def test_transform(self):
# training data
d = {"col1": ["a", "a", "c"]}
df = pd.DataFrame(data=d)
# fit encoder
le = LabelEncoder()
le.fit(df["col1"])
# test data
d_test = {"col2": ["c", "c", "a"]}
df_test = pd.DataFrame(data=d_test)
# transform
y = le.transform(df_test["col2"])
self.assertEqual(y[0], 1)
self.assertEqual(y[1], 1)
self.assertEqual(y[2], 0)
def test_transform_with_new_values(self):
# training data
d = {"col1": ["a", "a", "c"]}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# -*- coding:utf-8 -*-
__author__ = 'boredbird'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from datetime import datetime
from sklearn.svm import l1_min_c
from woe.eval import compute_ks
import pickle
import time
"""
Search for optimal hyper parametric C in LogisticRegression
"""
def grid_search_lr_c(X_train,y_train,cs,df_coef_path=False
,pic_coefpath_title='Logistic Regression Path',pic_coefpath=False
,pic_performance_title='Logistic Regression Performance',pic_performance=False):
"""
grid search optimal hyper parameters c with the best ks performance
:param X_train: features dataframe
:param y_train: target
:param cs: list of regularization parameter c
:param df_coef_path: the file path for logistic regression coefficient dataframe
:param pic_coefpath_title: the pic title for coefficient path picture
:param pic_coefpath: the file path for coefficient path picture
:param pic_performance_title: the pic title for ks performance picture
:param pic_performance: the file path for ks performance picture
:return: a tuple of c and ks value with the best ks performance
"""
# init a LogisticRegression model
clf_l1_LR = LogisticRegression(C=0.1, penalty='l1', tol=0.01,class_weight='balanced')
# cs = l1_min_c(X_train, y_train, loss='log') * np.logspace(0, 9,200)
print("Computing regularization path ...")
start = datetime.now()
print(start)
coefs_ = []
ks = []
for c in cs:
clf_l1_LR.set_params(C=c)
clf_l1_LR.fit(X_train, y_train)
coefs_.append(clf_l1_LR.coef_.ravel().copy())
proba = clf_l1_LR.predict_proba(X_train)[:,1]
ks.append(compute_ks(proba,y_train))
end = datetime.now()
print(end)
print("This took ", end - start)
coef_cv_df = pd.DataFrame(coefs_,columns=X_train.columns)
coef_cv_df['ks'] = ks
coef_cv_df['c'] = cs
if df_coef_path:
file_name = df_coef_path if isinstance(df_coef_path, str) else None
coef_cv_df.to_csv(file_name)
coefs_ = np.array(coefs_)
fig1 = plt.figure('fig1')
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title(pic_coefpath_title)
plt.axis('tight')
if pic_coefpath:
file_name = pic_coefpath if isinstance(pic_coefpath, str) else None
plt.savefig(file_name)
else:
plt.show()
fig2 = plt.figure('fig2')
plt.plot(np.log10(cs), ks)
plt.xlabel('log(C)')
plt.ylabel('ks score')
plt.title(pic_performance_title)
plt.axis('tight')
if pic_performance:
file_name = pic_performance if isinstance(pic_performance, str) else None
plt.savefig(file_name)
else:
plt.show()
flag = coefs_<0
idx = np.array(ks)[flag.sum(axis=1) == 0].argmax()
return (cs[idx],ks[idx])
def grid_search_lr_c_validation(X_train,y_train,validation_dataset_list,cs=[0.01],df_coef_path=False
,pic_coefpath_title='Logistic Regression Path',pic_coefpath=False
,pic_performance_title='Logistic Regression Performance',pic_performance=False):
"""
grid search optimal hyper parameters c with the best ks performance
:param X_train: features dataframe
:param y_train: target
:param cs: list of c value
:param df_coef_path: the file path for logistic regression coefficient dataframe
:param pic_coefpath_title: the pic title for coefficient path picture
:param pic_coefpath: the file path for coefficient path picture
:param pic_performance_title: the pic title for ks performance picture
:param pic_performance: the file path for ks performance picture
:return: a tuple of c and ks value with the best ks performance
"""
# init a LogisticRegression model
clf_l1_LR = LogisticRegression(C=0.1, penalty='l1', tol=0.01,class_weight='balanced')
print("Computing regularization path ...")
start = datetime.now()
print(start)
coefs_ = []
ks = []
ks_validation1 = []
ks_validation2 = []
counter = 0
for c in cs:
print('time: ',time.asctime(time.localtime(time.time())),'counter: ',counter, ' c: ',c)
clf_l1_LR.set_params(C=c)
clf_l1_LR.fit(X_train, y_train)
coefs_.append(clf_l1_LR.coef_.ravel().copy())
proba = clf_l1_LR.predict_proba(X_train)[:,1]
validation_proba1 = clf_l1_LR.predict_proba(validation_dataset_list[0][X_train.columns])[:,1]
ks.append(compute_ks(proba,y_train))
ks_validation1.append(compute_ks(validation_proba1,validation_dataset_list[0]['target']))
print('ks:\t',ks[-1],'ks_validation1:\t',ks_validation1[-1])
counter += 1
end = datetime.now()
print(end)
print("This took ", end - start)
coef_cv_df = pd.DataFrame(coefs_,columns=X_train.columns)
coef_cv_df['ks'] = ks
coef_cv_df['ks_validation1'] = ks_validation1
coef_cv_df['c'] = cs
if df_coef_path:
file_name = df_coef_path if isinstance(df_coef_path, str) else None
coef_cv_df.to_csv(file_name)
coefs_ = np.array(coefs_)
fig1 = plt.figure('fig1')
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title(pic_coefpath_title)
plt.axis('tight')
if pic_coefpath:
file_name = pic_coefpath if isinstance(pic_coefpath, str) else None
plt.savefig(file_name)
plt.close()
else:
pass
# plt.show()
# plt.close()
fig2 = plt.figure('fig2')
plt.plot(np.log10(cs), ks)
plt.xlabel('log(C)')
plt.ylabel('ks score')
plt.title(pic_performance_title)
plt.axis('tight')
if pic_performance:
file_name = pic_performance if isinstance(pic_performance, str) else None
plt.savefig(file_name)
plt.close()
else:
pass
# plt.show()
# plt.close()
flag = coefs_<0
if np.array(ks)[flag.sum(axis=1) == 0].__len__()>0:
idx = np.array(ks)[flag.sum(axis=1) == 0].argmax()
else:
idx = np.array(ks).argmax()
return (cs[idx],ks[idx])
def grid_search_lr_c_main(params):
print('run into grid_search_lr_c_main:')
dataset_path = params['dataset_path']
validation_path = params['validation_path']
config_path = params['config_path']
df_coef_path = params['df_coef_path']
pic_coefpath = params['pic_coefpath']
pic_performance = params['pic_performance']
pic_coefpath_title = params['pic_coefpath_title']
pic_performance_title = params['pic_performance_title']
dataset_train = pd.read_csv(dataset_path)
cfg = | pd.read_csv(config_path) | pandas.read_csv |
from pathlib import Path
import sklearn
import numpy as np
import pandas as pd
from scipy.stats import pearsonr, spearmanr
def calc_preds(model, x, y, mltype):
""" Calc predictions. """
if mltype == 'cls':
def get_pred_fn(model):
if hasattr(model, 'predict_proba'):
return model.predict_proba
if hasattr(model, 'predict'):
return model.predict
pred_fn = get_pred_fn(model)
if (y.ndim > 1) and (y.shape[1] > 1):
y_pred = pred_fn(x)
y_pred = np.argmax(y_pred, axis=1)
y_true = np.argmax(ydata, axis=1)
else:
y_pred = pred_fn(x)
y_true = y
elif mltype == 'reg':
y_pred = np.squeeze(model.predict(x))
y_true = np.squeeze(y)
return y_pred, y_true
def dump_preds(y_true, y_pred, meta=None, outpath='./preds.csv'):
""" Dump prediction and true values, with optional with metadata. """
y_true = pd.Series(y_true, name='y_true')
y_pred = | pd.Series(y_pred, name='y_pred') | pandas.Series |
from __future__ import absolute_import, division, print_function
from pandas import DataFrame, Series
from numpy import zeros
from pennies.trading.assets import Swap, Annuity, IborLeg, FixedLeg, VanillaSwap
from pennies.market.market import RatesTermStructure
from pennies.market.curves import ConstantDiscountRateCurve
from multipledispatch import dispatch
@dispatch(Annuity, RatesTermStructure, str)
def present_value(contract, market, reporting_ccy):
"""Present Value as sum of discount cash flows.
This assumes that one has already computed the rates.
For fixed rate annuities, this will be done during construction.
For floating rate annuities, this will have to be pre-computed,
typically via psuedo-discount factors of other curves."""
a = contract.frame
discount_factors = market.discount_factor(a.pay, currency=contract.currency)
alive = a.pay >= market.dt_valuation
if not alive.any():
return 0.0
pv = (a.rate * a.period * discount_factors * a.notional).loc[alive].sum()
if contract.notl_exchange:
pv += a.notional.iloc[-1] * discount_factors.iloc[-1]
if reporting_ccy != contract.currency:
pv *= market.fx(reporting_ccy, contract.currency)
return pv
@dispatch(Swap, RatesTermStructure, str)
def present_value(contract, market, reporting_ccy):
"""Present Value of a generic Swap."""
return (present_value(contract.leg_receive, market, reporting_ccy) +
present_value(contract.leg_pay, market, reporting_ccy))
@dispatch(IborLeg, RatesTermStructure, str)
def present_value(contract, market, reporting_ccy):
"""Present Value as sum of discounted IBOR cash flows.
Forward LIBOR rates are calculated, and inserted into contract.frame.rate,
for all fixing dates after dt_valuation. For fixing dates in the past,
this assumes that contract.frame.rate is populated, and meaningful.
"""
a = contract.frame
forwards = ibor_rate(contract, market)
# replace rate with forwards for any fixing date after valuation date
a.rate = a.rate.where(a.fixing < market.dt_valuation, forwards)
# do not sum past cash flows
discount_factors = market.discount_factor(a.pay, currency=contract.currency)
alive = a.pay >= market.dt_valuation
if not alive.any():
return 0.0
pv = (a.rate * a.period * discount_factors * a.notional).loc[alive].sum()
if contract.notl_exchange:
pv += a.notional.iloc[-1] * discount_factors.iloc[-1]
if reporting_ccy != contract.currency:
pv *= market.fx(reporting_ccy, contract.currency)
return pv
@dispatch(VanillaSwap, RatesTermStructure)
def par_rate(contract, market):
df_fixed = contract.leg_fixed.frame.copy()
df_fixed.rate = 1.0
df_fixed.notional *= -1
annuity = FixedLeg.from_frame(df_fixed, notl_exchange=False)
df_float = contract.leg_float.frame.copy()
floating_no_xch = IborLeg.from_frame(df_float, notl_exchange=False)
ccy = contract.leg_fixed.currency
assert ccy == contract.leg_float.currency
pv_fix = present_value(annuity, market, ccy)
pv_flt = present_value(floating_no_xch, market, ccy)
return pv_flt / pv_fix
def ibor_rate(contract, market):
"""ALL the natural (L)IBOR rates implied by the start and end schedules.
Returns
-------
Series
Pandas Series containing Forward IBOR rates
This assumes that there is no convexity caused by lags between accrual dates
and fixing and payment dates.
"""
assert isinstance(contract, IborLeg)
assert isinstance(market, RatesTermStructure)
crv_fwd, key = market.curve(contract.currency, contract.frequency)
zcb_pay = crv_fwd.discount_factor(contract.frame.pay)
zcb_fix = crv_fwd.discount_factor(contract.frame.fixing)
return (zcb_fix / zcb_pay - 1.0) / contract.frame.period
def d_price_d_rate(crv):
"""First derivative of each node in a discount curve to it's discount rates.
The crv holds zero coupon bond prices in the form: z_i = exp(-r_i * ttm_i)
"""
return -1 * crv.sched_maturity * crv.discount_factor(crv.sched_maturity)
@dispatch(Annuity, RatesTermStructure, object)
def sens_to_zero_price(contract, market, curve_key):
"""Return Series of sensitivities to the natural ZeroCouponBond prices.
By natural, we mean each of the payment dates of the Annuity.
Sensitivities are only to the curve specified in the RatesTermStructure.
"""
if curve_key == 'discount':
df = contract.frame
alive = df.pay >= market.dt_valuation
sens = (df.rate * df.period).loc[alive]
if contract.notl_exchange and alive.any():
sens.iloc[-1] += df.notional.iloc[-1]
else:
return 0
@dispatch(VanillaSwap, RatesTermStructure, object)
def sens_to_zero_price(contract, market, curve_key):
"""Return Series of sensitivities to the natural ZeroCouponBond prices.
By natural, we mean each of the payment dates of the Annuity.
Sensitivities are only to the curve specified in the RatesTermStructure.
"""
raise NotImplementedError('For Swaps, call each leg separately.')
@dispatch(Annuity, RatesTermStructure, str, object, str)
def sens_to_zero_rates(contract, market, curve_ccy, curve_key, reporting_ccy):
"""Sensitivity of each cashflow to the curve specified by currency and key
The fixed rate annuity is only sensitive to the discount curve
of the currency in which the cash flows (coupons) are paid.
If curve_ccy does not match contract.currency,
and curve_key is not 'discount' an empty DataFrame is returned.
"""
df_sens = DataFrame(columns=['ttm', 'sens', 'ccy', 'curve'])
if curve_ccy == contract.currency:
if curve_key == 'discount':
a = contract.frame
alive = a.pay >= market.dt_valuation
crv = market.discount_curve(curve_ccy)
pay_dates = a.pay[alive]
ttm = crv.daycount_fn(market.dt_valuation, pay_dates)
zcb = market.discount_factor(pay_dates, currency=contract.currency)
sens = -ttm * zcb * (a.rate * a.period * a.notional).loc[alive]
if contract.notl_exchange and alive.any():
sens.iloc[-1] += a.notional.iloc[-1]
if reporting_ccy != contract.currency:
sens *= market.fx(reporting_ccy, contract.currency)
df_sens = DataFrame({'ttm': ttm, 'sens': sens,
'ccy': curve_ccy, 'curve': curve_key})
return df_sens
@dispatch(IborLeg, RatesTermStructure, str, object, str)
def sens_to_zero_rates(contract, market, curve_ccy, rate_key, reporting_ccy):
"""Sensitivity of each cashflow to the curve specified by currency and key
A leg that pays IBOR is sensitive to both the discount and tenor curve
of the currency in which the cash flows (coupons) are paid.
"""
df_sens = DataFrame(columns=['ttm', 'sens', 'ccy', 'curve'])
if curve_ccy == contract.currency:
forwards = ibor_rate(contract, market)
# replace rate with forwards for any fixing date after valuation date
a = contract.frame
a.rate = a.rate.where(a.fixing < market.dt_valuation, forwards)
zcb_pay = market.discount_factor(a.pay, currency=contract.currency)
if rate_key == 'discount':
unpaid = a.pay >= market.dt_valuation
crv = market.discount_curve(curve_ccy)
pay_dates = a.pay[unpaid]
ttm_pay = crv.daycount_fn(market.dt_valuation, pay_dates)
sens = -ttm_pay * (zcb_pay * a.notional * a.rate * a.period).loc[unpaid]
if contract.notl_exchange and unpaid.any():
sens.iloc[-1] += a.notional.iloc[-1]
if reporting_ccy != contract.currency:
sens *= market.fx(reporting_ccy, contract.currency)
df_sens = DataFrame({'ttm': ttm_pay, 'sens': sens,
'ccy': curve_ccy, 'curve': 'discount'})
elif rate_key == contract.frequency: # TODO - Review and add comments
crv, crv_key = market.curve(contract.currency, contract.frequency)
unfixed = a.fixing >= market.dt_valuation
pay_dates = a.pay.loc[unfixed]
ttm_pay = crv.daycount_fn(market.dt_valuation, pay_dates)
zcbi_pay = crv.discount_factor(pay_dates)
fix_dates = a.fixing.loc[unfixed]
ttm_fix = crv.daycount_fn(market.dt_valuation, fix_dates)
zcbi_fix = crv.discount_factor(contract.frame.fixing)
scale_factor = zcbi_fix / zcbi_pay * (a.notional * zcb_pay).loc[unfixed]
sens_pay = ttm_pay * scale_factor
sens_fix = -ttm_fix * scale_factor
if reporting_ccy != contract.currency:
fx = market.fx(reporting_ccy, contract.currency)
sens_pay *= fx
sens_fix *= fx
df_pay = DataFrame({'ttm': ttm_pay, 'sens': sens_pay}).set_index('ttm')
df_fix = DataFrame({'ttm': ttm_fix, 'sens': sens_fix}).set_index('ttm')
df_sens = df_pay.add(df_fix, fill_value=0)
df_sens['ttm'] = df_sens.index
df_sens['ccy'] = curve_ccy
df_sens['curve'] = crv_key
return df_sens
@dispatch(Annuity, RatesTermStructure, str)
def sens_to_market_rates(contract, market, reporting_ccy):
"""Compute sensitivity of contract to each node in the market's curves."""
# 1. Sensitivity of the CONTRACT PV to CONTRACT RATES: dV/dR_k
# i.e. rates at contract dates, such as fixing, and maturity
# ==> Only sensitive to discount curve
ccy = contract.currency
df_pv_sens = sens_to_zero_rates(contract, market, ccy, 'discount', reporting_ccy)
dv_drk = df_pv_sens.sens.values
ttm_k = df_pv_sens.ttm
# 2. Sensitivity of CONTRACT RATES to MARKET RATES: dR_k / dR_j
# This is a function of the curve's interpolator
drk_drj_disc = market.rate_sensitivity(ttm_k, ccy, 'discount')
# 3. Sensitivity of the CONTRACT PV to MARKET RATES, dV / dR_j
# Multiple 1 and 2, and sum over contract dates
dv_drj = zeros(len(market.nodes))
mask_disc = ((market.nodes.ccy == contract.currency) &
(market.nodes.curve == 'discount')).values
dv_drj[mask_disc] = drk_drj_disc.T.dot(dv_drk) # TODO NEED TO EXAMINE. Should this be: dv_drk.dot(drk_drj_disc) ?
# 1d-array of sensitivities to each of the market's nodes. Lots of 0's
return dv_drj
@dispatch(IborLeg, RatesTermStructure, str)
def sens_to_market_rates(contract, market, reporting_ccy):
"""Compute sensitivity of contract to each node in the market's curves."""
# 1. Sensitivity of the CONTRACT PV to CONTRACT RATES: dV/dR_k
# i.e. rates at contract dates, such as fixing, and maturity
ccy = contract.currency
# 1a. discount curve
df_pv_sens = sens_to_zero_rates(contract, market, ccy, 'discount', reporting_ccy)
dv_drk_disc = df_pv_sens.sens.values
ttm_k_disc = df_pv_sens.ttm
# 1b. ibor curve
ibor_key = contract.frequency # TODO Rate and frequency should be separate
df_pv_sens = sens_to_zero_rates(contract, market, ccy, ibor_key, reporting_ccy)
ibor_key = df_pv_sens.curve.iat[0] # May be 'discount', not frequency
dv_drk_ibor = df_pv_sens.sens.values
ttm_k_ibor = df_pv_sens.ttm
# 2. Sensitivity of CONTRACT RATES to MARKET RATES: dR_k / dR_j
# This is a function of the curve's interpolator
# Sensitivity to the discount curve
drk_drj_disc = market.rate_sensitivity(ttm_k_disc, ccy, 'discount')
# Sensitivity to the ibor curve
drk_drj_ibor = market.rate_sensitivity(ttm_k_ibor, ccy, ibor_key)
# 3. Sensitivity of the CONTRACT PV to MARKET RATES
# For each curve, multiply 1 and 2, and sum over contract dates
dv_drj = zeros(len(market.nodes))
mask_disc = ((market.nodes.ccy == contract.currency) &
(market.nodes.curve == 'discount')).values
dv_drj[mask_disc] = dv_drj[mask_disc] + drk_drj_disc.T.dot(dv_drk_disc) # TODO NEED TO EXAMINE
mask_ibor = ((market.nodes.ccy == contract.currency) &
(market.nodes.curve == ibor_key)).values
dv_drj[mask_ibor] = dv_drj[mask_ibor] + drk_drj_ibor.T.dot(dv_drk_ibor) # TODO NEED TO EXAMINE !!!!!!!!!!!!!!
# 1d-array of sensitivities to each of the market's nodes.
# May contain many 0's
return dv_drj
@dispatch(Swap, RatesTermStructure, str)
def sens_to_market_rates(contract, market, reporting_ccy):
"""Compute sensitivity of contract to each node in the market's curves."""
return (sens_to_market_rates(contract.leg_receive, market, reporting_ccy) +
sens_to_market_rates(contract.leg_pay, market, reporting_ccy))
if __name__ == '__main__':
import pandas as pd
dt_val = pd.to_datetime('today')
dt_settle = dt_val - | pd.Timedelta(days=200) | pandas.Timedelta |
import pandas as pd
from expenses_report.config import config
from itertools import product
class DataProvider(object):
_transactions = list()
_columns = None
def __init__(self, transactions):
self._transactions = transactions
self._columns = list(config.import_mapping.keys()) + [config.CATEGORY_MAIN_COL, config.CATEGORY_SUB_COL]
@staticmethod
def load(transactions):
instance = DataProvider(transactions)
instance._rebuild_dataframes()
return instance
def _rebuild_dataframes(self):
# creates DataFrames from imported transactions
ta_tuples = list(map(lambda ta: ta.as_tuple(), self._transactions))
self._df_all = pd.DataFrame.from_records(data=ta_tuples, columns=self._columns, index=config.DATE_COL)
self._df_all[config.ABSAMOUNT_COL] = self._df_all.amount.apply(abs)
self._df_all[config.LABEL] = self._df_all[config.PAYMENT_REASON_COL] + '<br>' + self._df_all[config.RECIPIENT_COL]
self._df_in = self._df_all[self._df_all.main_category == config.INCOME_CATEGORY]
self._df_out = self._df_all[self._df_all.main_category != config.INCOME_CATEGORY]
def get_all_transactions(self):
if self._df_all is None:
self._rebuild_dataframes()
return self._df_all
def get_in_transactions(self):
if self._df_in is None:
self._rebuild_dataframes()
return self._df_in
def get_out_transactions(self):
if self._df_out is None:
self._rebuild_dataframes()
return self._df_out
def get_full_date_range(self, aggregation_period='MS'):
"""
Builds a DataFrame containing the full date range of the specified period
:param aggregation_period: {'MS' for month start, 'YS' for year start, ... }, default 'MS'
see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
:return:
"""
period = DataProvider.as_period(aggregation_period)
df_all = self.get_all_transactions()
df_all_dates = pd.date_range(df_all.index.min().to_period(period).to_timestamp(), # sets to first day of period to include it in range
df_all.index.max(),
freq=aggregation_period,
normalize=True).to_period().to_frame(name=config.DATE_COL)
return df_all_dates
def aggregate_by_category_as_tuple(self, df, aggregation_period, category_column, category_root=None):
df_agg = self.aggregate_by_category(df, aggregation_period, category_column, category_root)
return self.expand_by_categories(df_agg, category_column)
def aggregate_by_category(self, df, aggregation_period, category_column, category_root=None) -> pd.DataFrame:
df_all_dates = self.get_full_date_range(aggregation_period)
categories = self._get_categories_for_level(category_root) # df[category_column].unique()
df_prod = pd.DataFrame(list(product(df_all_dates[config.DATE_COL].unique(), categories)),
columns=[config.DATE_COL, category_column])
period = DataProvider.as_period(aggregation_period)
df_agg = df.groupby([df.index.to_period(period), category_column])[config.ABSAMOUNT_COL].sum().reset_index()
df_agg_full_range = df_prod.merge(df_agg, how='left').fillna(0)
return df_agg_full_range.set_index(config.DATE_COL)
def expand_by_categories(self, df, category_column):
x_axis = list(df.index.unique().to_timestamp())
values = dict()
categories = df[category_column].unique()
for category_name in categories:
values[category_name] = df.loc[df[category_column] == category_name, config.ABSAMOUNT_COL].values
return x_axis, values
def _get_categories_for_level(self, root):
categories = None
if root is None:
categories = list(config.categories.keys())
elif root in config.categories.keys():
sub_categories = config.categories[root]
if type(sub_categories) is dict:
categories = list(sub_categories.keys())
return categories or [config.MISC_CATEGORY]
@staticmethod
def build_hierarchical_dataframe(df, root_label, levels, value_column, color_map):
"""
Build a hierarchy of levels for Sunburst or Treemap charts.
Levels are given starting from the bottom to the top of the hierarchy,
ie the last level corresponds to the root.
"""
columns = ['id', 'parent', 'value', 'color']
df_all_trees = pd.DataFrame(columns=columns)
for i, level in enumerate(levels):
df_tree = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import datetime
import numpy as np
import pandas as pd
from poor_trader import chart
from poor_trader import utils
TRADE_DAYS_PER_YEAR = 244
def SQN(df_trades):
"""
System Quality Number = (Expectancy / Standard Deviation R) * sqrt(Number of Trades)
:param df_trades:
:return:
"""
try:
sqn = (df_trades.LastRMultiple.mean() / df_trades.LastRMultiple.std()) * np.sqrt(len(df_trades.index.values))
return np.round(sqn, 2)
except:
return 0
def drawdown(equities):
return -np.round(equities.max() - equities[-1], 4)
def drawdown_pct(equities):
dd = equities[-1] - equities.max()
dd_pct = 100 * dd / equities.max()
return np.round(dd_pct, 2)
def exposure(open_trades, portfolio_equity):
return open_trades['LastValue'].apply(lambda last_value: 100 * last_value / portfolio_equity)
def exposure_pct(df_trades, df_backtest, starting_capital):
df = pd.DataFrame()
def calc(row):
date = row.name
cur_trades = backtest.update_open_trades_last_value(df_trades[df_trades['StartDate'] <= date], date=date)
portfolio_equity = starting_capital + cur_trades['LastPnL'].sum()
open_trades = cur_trades[pd.isnull(cur_trades['EndDate'])]
return open_trades['LastPnL'].sum() / portfolio_equity
df['Exposure'] = df_backtest.apply(calc, axis=1)
return df['Exposure']
def avg_expectancy(df_trades):
return df_trades['LastPnL'].mean()
def avg_expectancy_pct(df_trades):
expectancy_pct = 100 * df_trades['LastPnL'] / df_trades['BuyValue']
return expectancy_pct.mean()
def avg_bars_held(df_backtest, df_trades):
bars_held = df_trades.apply(lambda trade: len(df_backtest.loc[pd.to_datetime(trade['StartDate']):pd.to_datetime(trade['LastRecordDate'])].index.values), axis=1)
bars_held = bars_held.dropna()
if bars_held.empty:
return 0
return np.round(bars_held.mean(), 2)
def max_drawdown(df_backtest):
return df_backtest['Equity'].expanding().apply(drawdown).min()
def max_pct_drawdown(df_backtest):
return df_backtest['Equity'].expanding().apply(drawdown_pct).min()
def ulcer_index(df_backtest):
df_dd = df_backtest['Equity'].expanding().apply(drawdown_pct)
squared_dd = df_dd * df_dd
return np.sqrt(squared_dd.sum()) / squared_dd.count()
def performance_data(starting_capital, df_backtest, df_trades, index='Performance'):
df = pd.DataFrame()
equities = df_backtest['Equity'].values
years = len(equities) / TRADE_DAYS_PER_YEAR
ending_capital = df_backtest['Equity'].values[-1]
net_profit = ending_capital - starting_capital
net_profit_pct = 100 * net_profit / starting_capital
annualized_gain = ((ending_capital/starting_capital)**(1/years) - 1)
max_system_dd = max_drawdown(df_backtest)
max_system_pct_dd = max_pct_drawdown(df_backtest)
max_peak = df_backtest.Equity.max()
df_winning_trades = df_trades[df_trades['LastPnL'] > 0]
df_losing_trades = df_trades[df_trades['LastPnL'] <= 0]
ui = ulcer_index(df_backtest)
avg_bars_held_value = avg_bars_held(df_backtest, df_trades)
avg_expectancy_pct_value = avg_expectancy_pct(df_trades)
risk_free_rate = 0.01
df.loc[index, 'Number of Trading Days'] = df_backtest.Equity.count()
df.loc[index, 'Starting Capital'] = starting_capital
df.loc[index, 'Ending Capital'] = ending_capital
df.loc[index, 'Net Profit'] = net_profit
df.loc[index, 'Net Profit %'] = net_profit_pct
df.loc[index, 'SQN'] = SQN(df_trades)
df.loc[index, 'Annualized Gain'] = annualized_gain
df.loc[index, 'Max Profit'] = df_trades.LastPnL.max()
df.loc[index, 'Max Loss'] = df_trades.LastPnL.min()
df.loc[index, 'Number of Trades'] = len(df_trades.index.values)
df.loc[index, 'Winning Trades'] = len(df_winning_trades.index.values)
df.loc[index, 'Losing Trades'] = len(df_losing_trades.index.values)
try:
df.loc[index, 'Winning Trades %'] = np.round(100 * (len(df_winning_trades.index.values) / len(df_trades.index.values)), 2)
except:
df.loc[index, 'Winning Trades %'] = 0
df.loc[index, 'Avg Profit/Loss'] = avg_expectancy(df_trades)
df.loc[index, 'Avg Profit'] = avg_expectancy(df_winning_trades)
df.loc[index, 'Avg Loss'] = avg_expectancy(df_losing_trades)
df.loc[index, 'Avg Profit/Loss %'] = avg_expectancy_pct_value
df.loc[index, 'Avg Profit %'] = avg_expectancy_pct(df_winning_trades)
df.loc[index, 'Avg Loss %'] = avg_expectancy_pct(df_losing_trades)
df.loc[index, 'Avg Bars Held'] = avg_bars_held_value
df.loc[index, 'Avg Winning Bars Held'] = avg_bars_held(df_backtest, df_winning_trades)
df.loc[index, 'Avg Losing Bars Held'] = avg_bars_held(df_backtest, df_losing_trades)
df.loc[index, 'Max System Drawdown'] = max_system_dd
df.loc[index, 'Max System % Drawdown'] = max_system_pct_dd
df.loc[index, 'Max Peak'] = max_peak
df.loc[index, 'Recovery Factor'] = net_profit / abs(max_system_pct_dd)
try:
df.loc[index, 'Profit Factor'] = df_winning_trades['LastPnL'].sum() / abs(df_losing_trades['LastPnL'].sum())
except:
df.loc[index, 'Profit Factor'] = 0.0
df.loc[index, 'Payoff Ratio'] = df_winning_trades['LastPnL'].mean() / abs(df_losing_trades['LastPnL'].mean())
return utils.round_df(df, places=2)
def generate_equity_curve(df_trades, starting_balance, historical_data, selling_fees_method=None, start_date=None, end_date=None):
df_trades['StartDate'] = pd.to_datetime(df_trades['StartDate'])
df_trades['EndDate'] = pd.to_datetime(df_trades['EndDate'])
df_trades['LastRecordDate'] = pd.to_datetime(df_trades['LastRecordDate'])
if start_date is None:
start_date = df_trades.StartDate.min()
if end_date is None:
end_date = df_trades.LastRecordDate.max()
start_date = pd.to_datetime(start_date)
df_quotes = historical_data.copy()
if start_date:
df_quotes = df_quotes.loc[start_date:]
if end_date:
df_quotes = df_quotes.loc[:end_date]
df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
check_exact=False):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
if check_dtype:
assert_attr_equal('dtype', left, right, obj='ExtensionArray')
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
else:
_testing.assert_almost_equal(left_valid, right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj='ExtensionArray')
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals( | Index(right.values) | pandas.Index |
import logging
import math
import pandas
import numpy
from statsmodels.formula.api import OLS
from statsmodels.tools import add_constant
from fls import FlexibleLeastSquare
_LOGGER = logging.getLogger('regression')
class RegressionModelFLS(object):
def __init__(self, securities, delta, with_constant_term=True):
self.with_constant_term = with_constant_term
size = len(securities) - 1
if self.with_constant_term:
size += 1
initial_state_mean = numpy.zeros(size)
initial_state_covariance = numpy.ones((size, size))
observation_covariance = 5E-5
trans_cov = delta / (1. - delta) * numpy.eye(size)
self.result = None
self.fls = FlexibleLeastSquare(initial_state_mean, initial_state_covariance, observation_covariance, trans_cov)
def compute_regression(self, y_value, x_values):
independent_values = x_values
if self.with_constant_term:
independent_values += [1.]
self.result = self.fls.estimate(y_value, independent_values)
def get_residual_error(self):
return math.sqrt(self.result.var_output_error)
def get_factors(self):
return self.result.beta
def get_estimate(self):
return self.result.estimated_output
def get_weights(self):
weights = self.get_factors()
if self.with_constant_term:
weights = weights[:-1]
return numpy.array([-1.] + weights)
def get_residual(self):
return self.result.error
class RegressionModelOLS(object):
def __init__(self, securities, with_constant_term=True, lookback_period=200):
self._lookback = lookback_period
self.current_x = None
self.current_y = None
self._with_constant_term= with_constant_term
self._counter = 0
self._y_values = list()
self._x_values = [list() for item in securities[1:]]
self.securities = securities
self.result = None
def add_samples(self, y_value, x_values):
self._counter += 1
self._y_values.append(y_value)
if self._counter > self._lookback:
self._y_values.pop(0)
for target_lists in self._x_values:
target_lists.pop(0)
for target_list, new_item in zip(self._x_values, x_values):
target_list.append(new_item)
independent_values = x_values
if self._with_constant_term:
independent_values += [1.]
self.current_x = independent_values
self.current_y = y_value
def compute_regression(self):
if len(self._y_values) < len(self.securities) - 1:
# not enough values for regression
_LOGGER.error('not enough values for regression')
dependent = | pandas.DataFrame({self.securities[0]: self._y_values}) | pandas.DataFrame |
import operator
from enum import Enum
from typing import Union, Any, Optional, Hashable
import numpy as np
import pandas as pd
import pandas_flavor as pf
from pandas.core.construction import extract_array
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_numeric_dtype,
is_string_dtype,
)
from pandas.core.reshape.merge import _MergeOperation
from janitor.utils import check, check_column
@pf.register_dataframe_method
def conditional_join(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
*conditions,
how: str = "inner",
sort_by_appearance: bool = False,
df_columns: Optional[Any] = None,
right_columns: Optional[Any] = None,
) -> pd.DataFrame:
"""
This is a convenience function that operates similarly to `pd.merge`,
but allows joins on inequality operators,
or a combination of equi and non-equi joins.
Join solely on equality are not supported.
If the join is solely on equality, `pd.merge` function
covers that; if you are interested in nearest joins, or rolling joins,
or the first match (lowest or highest) - `pd.merge_asof` covers that.
There is also the IntervalIndex, which is usually more efficient
for range joins, especially if the intervals do not overlap.
Column selection in `df_columns` and `right_columns` is possible using the
[`select_columns`][janitor.functions.select_columns.select_columns] syntax.
This function returns rows, if any, where values from `df` meet the
condition(s) for values from `right`. The conditions are passed in
as a variable argument of tuples, where the tuple is of
the form `(left_on, right_on, op)`; `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. For multiple conditions, the and(`&`)
operator is used to combine the results of the individual conditions.
The operator can be any of `==`, `!=`, `<=`, `<`, `>=`, `>`.
A binary search is used to get the relevant rows for non-equi joins;
this avoids a cartesian join, and makes the process less memory intensive.
For equi-joins, Pandas internal merge function is used.
The join is done only on the columns.
MultiIndex columns are not supported.
For non-equi joins, only numeric and date columns are supported.
Only `inner`, `left`, and `right` joins are supported.
If the columns from `df` and `right` have nothing in common,
a single index column is returned; else, a MultiIndex column
is returned.
Example:
>>> import pandas as pd
>>> import janitor
>>> df1 = pd.DataFrame({"value_1": [2, 5, 7, 1, 3, 4]})
>>> df2 = pd.DataFrame({"value_2A": [0, 3, 7, 12, 0, 2, 3, 1],
... "value_2B": [1, 5, 9, 15, 1, 4, 6, 3],
... })
>>> df1
value_1
0 2
1 5
2 7
3 1
4 3
5 4
>>> df2
value_2A value_2B
0 0 1
1 3 5
2 7 9
3 12 15
4 0 1
5 2 4
6 3 6
7 1 3
>>> df1.conditional_join(
... df2,
... ("value_1", "value_2A", ">="),
... ("value_1", "value_2B", "<=")
... )
value_1 value_2A value_2B
0 2 1 3
1 2 2 4
2 5 3 5
3 5 3 6
4 7 7 9
5 1 0 1
6 1 0 1
7 1 1 3
8 3 1 3
9 3 2 4
10 3 3 5
11 3 3 6
12 4 2 4
13 4 3 5
14 4 3 6
:param df: A pandas DataFrame.
:param right: Named Series or DataFrame to join to.
:param conditions: Variable argument of tuple(s) of the form
`(left_on, right_on, op)`, where `left_on` is the column
label from `df`, `right_on` is the column label from `right`,
while `op` is the operator. The operator can be any of
`==`, `!=`, `<=`, `<`, `>=`, `>`. For multiple conditions,
the and(`&`) operator is used to combine the results
of the individual conditions.
:param how: Indicates the type of join to be performed.
It can be one of `inner`, `left`, `right`.
Full join is not supported. Defaults to `inner`.
:param sort_by_appearance: Default is `False`.
This is useful for strictly non-equi joins,
where the user wants the original order maintained.
If True, values from `df` and `right`
that meet the join condition will be returned
in the final dataframe in the same order
that they were before the join.
:param df_columns: Columns to select from `df`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:param right_columns: Columns to select from `right`.
It can be a single column or a list of columns.
It is also possible to rename the output columns via a dictionary.
:returns: A pandas DataFrame of the two merged Pandas objects.
"""
return _conditional_join_compute(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
class _JoinOperator(Enum):
"""
List of operators used in conditional_join.
"""
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
STRICTLY_EQUAL = "=="
NOT_EQUAL = "!="
class _JoinTypes(Enum):
"""
List of join types for conditional_join.
"""
INNER = "inner"
LEFT = "left"
RIGHT = "right"
operator_map = {
_JoinOperator.STRICTLY_EQUAL.value: operator.eq,
_JoinOperator.LESS_THAN.value: operator.lt,
_JoinOperator.LESS_THAN_OR_EQUAL.value: operator.le,
_JoinOperator.GREATER_THAN.value: operator.gt,
_JoinOperator.GREATER_THAN_OR_EQUAL.value: operator.ge,
_JoinOperator.NOT_EQUAL.value: operator.ne,
}
less_than_join_types = {
_JoinOperator.LESS_THAN.value,
_JoinOperator.LESS_THAN_OR_EQUAL.value,
}
greater_than_join_types = {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.GREATER_THAN_OR_EQUAL.value,
}
def _check_operator(op: str):
"""
Check that operator is one of
`>`, `>=`, `==`, `!=`, `<`, `<=`.
Used in `conditional_join`.
"""
sequence_of_operators = {op.value for op in _JoinOperator}
if op not in sequence_of_operators:
raise ValueError(
"The conditional join operator "
f"should be one of {sequence_of_operators}"
)
def _conditional_join_preliminary_checks(
df: pd.DataFrame,
right: Union[pd.DataFrame, pd.Series],
conditions: tuple,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> tuple:
"""
Preliminary checks for conditional_join are conducted here.
Checks include differences in number of column levels,
length of conditions, existence of columns in dataframe, etc.
"""
check("right", right, [pd.DataFrame, pd.Series])
df = df.copy()
right = right.copy()
if isinstance(right, pd.Series):
if not right.name:
raise ValueError(
"Unnamed Series are not supported for conditional_join."
)
right = right.to_frame()
if df.columns.nlevels != right.columns.nlevels:
raise ValueError(
"The number of column levels "
"from the left and right frames must match. "
"The number of column levels from the left dataframe "
f"is {df.columns.nlevels}, while the number of column levels "
f"from the right dataframe is {right.columns.nlevels}."
)
if not conditions:
raise ValueError("Kindly provide at least one join condition.")
for condition in conditions:
check("condition", condition, [tuple])
len_condition = len(condition)
if len_condition != 3:
raise ValueError(
"condition should have only three elements; "
f"{condition} however is of length {len_condition}."
)
for left_on, right_on, op in conditions:
check("left_on", left_on, [Hashable])
check("right_on", right_on, [Hashable])
check("operator", op, [str])
check_column(df, [left_on])
check_column(right, [right_on])
_check_operator(op)
if all(
(op == _JoinOperator.STRICTLY_EQUAL.value for *_, op in conditions)
):
raise ValueError("Equality only joins are not supported.")
check("how", how, [str])
checker = {jointype.value for jointype in _JoinTypes}
if how not in checker:
raise ValueError(f"'how' should be one of {checker}.")
check("sort_by_appearance", sort_by_appearance, [bool])
if (df.columns.nlevels > 1) and (
isinstance(df_columns, dict) or isinstance(right_columns, dict)
):
raise ValueError(
"Column renaming with a dictionary is not supported "
"for MultiIndex columns."
)
return (
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
def _conditional_join_type_check(
left_column: pd.Series, right_column: pd.Series, op: str
) -> None:
"""
Raise error if column type is not any of numeric or datetime or string.
"""
permitted_types = {
is_datetime64_dtype,
is_numeric_dtype,
is_string_dtype,
is_categorical_dtype,
}
for func in permitted_types:
if func(left_column):
break
else:
raise ValueError(
"conditional_join only supports "
"string, category, numeric, or date dtypes (without timezone) - "
f"'{left_column.name} is of type {left_column.dtype}."
)
lk_is_cat = is_categorical_dtype(left_column)
rk_is_cat = is_categorical_dtype(right_column)
if lk_is_cat & rk_is_cat:
if not left_column.array._categories_match_up_to_permutation(
right_column.array
):
raise ValueError(
f"'{left_column.name}' and '{right_column.name}' "
"should have the same categories, and the same order."
)
elif not is_dtype_equal(left_column, right_column):
raise ValueError(
f"Both columns should have the same type - "
f"'{left_column.name}' has {left_column.dtype} type;"
f"'{right_column.name}' has {right_column.dtype} type."
)
if (op in less_than_join_types.union(greater_than_join_types)) & (
(is_string_dtype(left_column) | is_categorical_dtype(left_column))
):
raise ValueError(
"non-equi joins are supported "
"only for datetime and numeric dtypes. "
f"{left_column.name} in condition "
f"({left_column.name}, {right_column.name}, {op}) "
f"has a dtype {left_column.dtype}."
)
return None
def _conditional_join_compute(
df: pd.DataFrame,
right: pd.DataFrame,
conditions: list,
how: str,
sort_by_appearance: bool,
df_columns: Any,
right_columns: Any,
) -> pd.DataFrame:
"""
This is where the actual computation
for the conditional join takes place.
A pandas DataFrame is returned.
"""
(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
) = _conditional_join_preliminary_checks(
df,
right,
conditions,
how,
sort_by_appearance,
df_columns,
right_columns,
)
eq_check = False
le_lt_check = False
for condition in conditions:
left_on, right_on, op = condition
_conditional_join_type_check(df[left_on], right[right_on], op)
if op == _JoinOperator.STRICTLY_EQUAL.value:
eq_check = True
elif op in less_than_join_types.union(greater_than_join_types):
le_lt_check = True
df.index = range(len(df))
right.index = range(len(right))
multiple_conditions = len(conditions) > 1
if not multiple_conditions:
left_on, right_on, op = conditions[0]
result = _generic_func_cond_join(
df[left_on], right[right_on], op, multiple_conditions
)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df,
right,
*result,
how,
sort_by_appearance,
df_columns,
right_columns,
)
if eq_check:
result = _multiple_conditional_join_eq(df, right, conditions)
elif le_lt_check:
result = _multiple_conditional_join_le_lt(df, right, conditions)
else:
result = _multiple_conditional_join_ne(df, right, conditions)
if result is None:
return _create_conditional_join_empty_frame(
df, right, how, df_columns, right_columns
)
return _create_conditional_join_frame(
df, right, *result, how, sort_by_appearance, df_columns, right_columns
)
def _less_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is less than or equal to right_c.
If strict is True, then only indices
where `left_c` is less than
(but not equal to) `right_c` are returned.
A tuple of integer indexes
for left_c and right_c is returned.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="left")
# if any of the positions in `search_indices`
# is equal to the length of `right_keys`
# that means the respective position in `left_c`
# has no values from `right_c` that are less than
# or equal, and should therefore be discarded
len_right = right_c.size
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift to the right to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices]
rows_equal = left_c == rows_equal
# replace positions where rows are equal
# with positions from searchsorted('right')
# positions from searchsorted('right') will never
# be equal and will be the furthermost in terms of position
# example : right_c -> [2, 2, 2, 3], and we need
# positions where values are not equal for 2;
# the furthermost will be 3, and searchsorted('right')
# will return position 3.
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="right")
# now we can safely replace values
# with strictly less than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# check again if any of the values
# have become equal to length of right_c
# and get rid of them
rows_equal = search_indices == len_right
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
right_c = [right_index[ind:len_right] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, len_right - search_indices)
return left_c, right_c
def _greater_than_indices(
left_c: pd.Series,
right_c: pd.Series,
strict: bool,
multiple_conditions: bool,
) -> tuple:
"""
Use binary search to get indices where left_c
is greater than or equal to right_c.
If strict is True, then only indices
where `left_c` is greater than
(but not equal to) `right_c` are returned.
if multiple_conditions is False, a tuple of integer indexes
for left_c and right_c is returned;
else a tuple of the index for left_c, right_c, as well
as the positions of left_c in right_c is returned.
"""
# quick break, avoiding the hassle
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
search_indices = right_c.searchsorted(left_c, side="right")
# if any of the positions in `search_indices`
# is equal to 0 (less than 1), it implies that
# left_c[position] is not greater than any value
# in right_c
rows_equal = search_indices < 1
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
# the idea here is that if there are any equal values
# shift downwards to the immediate next position
# that is not equal
if strict:
rows_equal = right_c[search_indices - 1]
rows_equal = left_c == rows_equal
# replace positions where rows are equal with
# searchsorted('left');
# however there can be scenarios where positions
# from searchsorted('left') would still be equal;
# in that case, we shift down by 1
if rows_equal.any():
replacements = right_c.searchsorted(left_c, side="left")
# return replacements
# `left` might result in values equal to len right_c
replacements = np.where(
replacements == right_c.size, replacements - 1, replacements
)
# now we can safely replace values
# with strictly greater than positions
search_indices = np.where(rows_equal, replacements, search_indices)
# any value less than 1 should be discarded
# since the lowest value for binary search
# with side='right' should be 1
rows_equal = search_indices < 1
if rows_equal.any():
left_c = left_c[~rows_equal]
left_index = left_index[~rows_equal]
search_indices = search_indices[~rows_equal]
if not search_indices.size:
return None
if multiple_conditions:
return left_index, right_index, search_indices
right_c = [right_index[:ind] for ind in search_indices]
right_c = np.concatenate(right_c)
left_c = np.repeat(left_index, search_indices)
return left_c, right_c
def _not_equal_indices(left_c: pd.Series, right_c: pd.Series) -> tuple:
"""
Use binary search to get indices where
`left_c` is exactly not equal to `right_c`.
It is a combination of strictly less than
and strictly greater than indices.
A tuple of integer indexes for left_c and right_c
is returned.
"""
dummy = np.array([], dtype=int)
# deal with nulls
l1_nulls = dummy
r1_nulls = dummy
l2_nulls = dummy
r2_nulls = dummy
any_left_nulls = left_c.isna()
any_right_nulls = right_c.isna()
if any_left_nulls.any():
l1_nulls = left_c.index[any_left_nulls.array]
l1_nulls = l1_nulls.to_numpy(copy=False)
r1_nulls = right_c.index
# avoid NAN duplicates
if any_right_nulls.any():
r1_nulls = r1_nulls[~any_right_nulls.array]
r1_nulls = r1_nulls.to_numpy(copy=False)
nulls_count = l1_nulls.size
# blow up nulls to match length of right
l1_nulls = np.tile(l1_nulls, r1_nulls.size)
# ensure length of right matches left
if nulls_count > 1:
r1_nulls = np.repeat(r1_nulls, nulls_count)
if any_right_nulls.any():
r2_nulls = right_c.index[any_right_nulls.array]
r2_nulls = r2_nulls.to_numpy(copy=False)
l2_nulls = left_c.index
nulls_count = r2_nulls.size
# blow up nulls to match length of left
r2_nulls = np.tile(r2_nulls, l2_nulls.size)
# ensure length of left matches right
if nulls_count > 1:
l2_nulls = np.repeat(l2_nulls, nulls_count)
l1_nulls = np.concatenate([l1_nulls, l2_nulls])
r1_nulls = np.concatenate([r1_nulls, r2_nulls])
outcome = _less_than_indices(left_c, right_c, strict=True)
if outcome is None:
lt_left = dummy
lt_right = dummy
else:
lt_left, lt_right = outcome
outcome = _greater_than_indices(
left_c, right_c, strict=True, multiple_conditions=False
)
if outcome is None:
gt_left = dummy
gt_right = dummy
else:
gt_left, gt_right = outcome
left_c = np.concatenate([lt_left, gt_left, l1_nulls])
right_c = np.concatenate([lt_right, gt_right, r1_nulls])
if (not left_c.size) & (not right_c.size):
return None
return left_c, right_c
def _eq_indices(
left_c: pd.Series,
right_c: pd.Series,
) -> tuple:
"""
Use binary search to get indices where left_c
is equal to right_c.
Returns a tuple of the left_index, right_index,
lower_boundary and upper_boundary.
"""
# no point going through all the hassle
if left_c.min() > right_c.max():
return None
if left_c.max() < right_c.min():
return None
any_nulls = pd.isna(right_c)
if any_nulls.any():
right_c = right_c[~any_nulls]
if right_c.empty:
return None
any_nulls = pd.isna(left_c)
if any_nulls.any():
left_c = left_c[~any_nulls]
if left_c.empty:
return None
any_nulls = None
if not right_c.is_monotonic_increasing:
right_c = right_c.sort_values(kind="stable")
left_index = left_c.index.to_numpy(dtype=int, copy=False)
left_c = extract_array(left_c, extract_numpy=True)
right_index = right_c.index.to_numpy(dtype=int, copy=False)
right_c = extract_array(right_c, extract_numpy=True)
lower_boundary = right_c.searchsorted(left_c, side="left")
upper_boundary = right_c.searchsorted(left_c, side="right")
keep_rows = lower_boundary < upper_boundary
if not keep_rows.any():
return None
if not keep_rows.all():
left_index = left_index[keep_rows]
lower_boundary = lower_boundary[keep_rows]
upper_boundary = upper_boundary[keep_rows]
return left_index, right_index, lower_boundary, upper_boundary
def _generic_func_cond_join(
left_c: pd.Series,
right_c: pd.Series,
op: str,
multiple_conditions: bool,
) -> tuple:
"""
Generic function to call any of the individual functions
(_less_than_indices, _greater_than_indices,
or _not_equal_indices).
"""
strict = False
if op in {
_JoinOperator.GREATER_THAN.value,
_JoinOperator.LESS_THAN.value,
_JoinOperator.NOT_EQUAL.value,
}:
strict = True
if op in less_than_join_types:
return _less_than_indices(left_c, right_c, strict)
elif op in greater_than_join_types:
return _greater_than_indices(
left_c, right_c, strict, multiple_conditions
)
elif op == _JoinOperator.NOT_EQUAL.value:
return _not_equal_indices(left_c, right_c)
def _generate_indices(
left_index: np.ndarray, right_index: np.ndarray, conditions: list
) -> tuple:
"""
Run a for loop to get the final indices.
This iteratively goes through each condition,
builds a boolean array,
and gets indices for rows that meet the condition requirements.
`conditions` is a list of tuples, where a tuple is of the form:
`(Series from df, Series from right, operator)`.
"""
for condition in conditions:
left_c, right_c, op = condition
left_c = | extract_array(left_c, extract_numpy=True) | pandas.core.construction.extract_array |
import os
import shutil
from deepsense import neptune
import pandas as pd
import math
from .pipeline_config import DESIRED_CLASS_SUBSET, ID_COLUMN, SEED, SOLUTION_CONFIG
from .pipelines import PIPELINES
from .utils import competition_metric_evaluation, generate_list_chunks, get_img_ids_from_folder, \
init_logger, reduce_number_of_classes, set_seed, submission_formatting, add_missing_image_ids, read_params
LOGGER = init_logger()
CTX = neptune.Context()
PARAMS = read_params(CTX)
set_seed(SEED)
class PipelineManager:
def train(self, pipeline_name, dev_mode):
train(pipeline_name, dev_mode)
def evaluate(self, pipeline_name, dev_mode, chunk_size):
evaluate(pipeline_name, dev_mode, chunk_size)
def predict(self, pipeline_name, dev_mode, submit_predictions, chunk_size):
predict(pipeline_name, dev_mode, submit_predictions, chunk_size)
def make_submission(self, submission_filepath):
make_submission(submission_filepath)
def train(pipeline_name, dev_mode):
LOGGER.info('training')
if PARAMS.clone_experiment_dir_from != '':
if os.path.exists(PARAMS.experiment_dir):
shutil.rmtree(PARAMS.experiment_dir)
shutil.copytree(PARAMS.clone_experiment_dir_from, PARAMS.experiment_dir)
if bool(PARAMS.clean_experiment_directory_before_training) and os.path.isdir(PARAMS.experiment_dir):
shutil.rmtree(PARAMS.experiment_dir)
annotations = pd.read_csv(PARAMS.annotations_filepath)
annotations_human_labels = | pd.read_csv(PARAMS.annotations_human_labels_filepath) | pandas.read_csv |
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import tinkoff_data as td
import edhec_risk_kit as erk
import csv
#l=[]
#l=["TIPO", "TGLD", "TUSD", "TSPX", "TBIO", "TECH"]
l=["FXUS","FXRW","FXWO","FXKZ","FXCN","FXIT","FXDE","FXRL","FXRB","FXRU","FXGD","FXMM","FXTB"]
pddf = td.getTinkoffETFsLYPrices(l)
pddf.index = | pd.to_datetime(pddf.index) | pandas.to_datetime |
"""
Date: Nov 2018
Author: <NAME>
Retrieves sample counts to help select train, validation and testing subsets.
We have already created sync samples using script "create_sync_samples".
This script gets the numbers of samples for each datasets, speakers, and sessions.
These counts are used to select training, validation and test subsets.
These are chosen randomly based only on the relative sizes of subsets.
We end up with ~80% training, 10% validation and 10% testing (in terms of number of samples, which are 200 ms each).
"""
import pandas as pd
from synchronisation.create_experiment_data_utils import get_sync_file_names, split_name
def main():
PATH = "/disk/scratch_big/../SyncDataSmallSil/"
output_file_names = "/disk/scratch_big/../SyncDataSmallSil/docs/df_file_names.csv"
output_file_info = "/disk/scratch_big/../SyncDataSmallSil/docs/df_file_info.csv"
output_speakers = "/disk/scratch_big/../SyncDataSmallSil/docs/df_speakers.csv"
print("finding .npz files in ", PATH)
files = get_sync_file_names(PATH)
files.sort()
# file names
df_files = pd.DataFrame(columns={"filename"}, data=files)
| pd.DataFrame.to_csv(df_files, output_file_names, index=False) | pandas.DataFrame.to_csv |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/24 15:02
Desc: 东方财富网-数据中心-新股数据-打新收益率
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
东方财富网-数据中心-新股数据-新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
"""
import pandas as pd
import requests
from tqdm import tqdm
from akshare.utils import demjson
def _get_page_num_dxsyl() -> int:
"""
东方财富网-数据中心-新股数据-打新收益率-总页数
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 总页数
:rtype: int
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": '1',
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
total_page = data_json["pages"]
return total_page
def stock_dxsyl_em() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-打新收益率
http://data.eastmoney.com/xg/xg/dxsyl.html
:return: 指定市场的打新收益率数据
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
page_num = _get_page_num_dxsyl()
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num + 1), leave=False):
params = {
"st": "16",
"sr": "-1",
"ps": "500",
"p": str(page),
"type": "NS",
"sty": "NSDXSYL",
"js": "({data:[(x)],pages:(pc)})",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json["data"]])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
"-",
]
big_df = big_df[[
"序号",
"股票代码",
"股票简称",
"发行价",
"最新价",
"网上-发行中签率",
"网上-有效申购股数",
"网上-有效申购户数",
"网上-超额认购倍数",
"网下-配售中签率",
"网下-有效申购股数",
"网下-有效申购户数",
"网下-配售认购倍数",
"总发行数量",
"开盘溢价",
"首日涨幅",
"打新收益",
"上市日期",
]]
big_df["发行价"] = pd.to_numeric(big_df["发行价"], errors='coerce')
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["网上-发行中签率"] = pd.to_numeric(big_df["网上-发行中签率"])
big_df["网上-有效申购股数"] = pd.to_numeric(big_df["网上-有效申购股数"])
big_df["网上-有效申购户数"] = pd.to_numeric(big_df["网上-有效申购户数"])
big_df["网上-超额认购倍数"] = pd.to_numeric(big_df["网上-超额认购倍数"])
big_df["网下-配售中签率"] = pd.to_numeric(big_df["网下-配售中签率"])
big_df["网下-有效申购股数"] = pd.to_numeric(big_df["网下-有效申购股数"])
big_df["网下-有效申购户数"] = pd.to_numeric(big_df["网下-有效申购户数"])
big_df["网下-配售认购倍数"] = pd.to_numeric(big_df["网下-配售认购倍数"])
big_df["总发行数量"] = pd.to_numeric(big_df["总发行数量"])
big_df["开盘溢价"] = pd.to_numeric(big_df["开盘溢价"])
big_df["首日涨幅"] = pd.to_numeric(big_df["首日涨幅"])
big_df["打新收益"] = pd.to_numeric(big_df["打新收益"])
return big_df
def stock_xgsglb_em(symbol: str = "京市A股") -> pd.DataFrame:
"""
新股申购与中签查询
http://data.eastmoney.com/xg/xg/default_2.html
:param symbol: choice of {"全部股票", "沪市A股", "科创板", "深市A股", "创业板", "京市A股"}
:type symbol: str
:return: 新股申购与中签数据
:rtype: pandas.DataFrame
"""
market_map = {
"全部股票": """(APPLY_DATE>'2010-01-01')""",
"沪市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE in ("069001001001","069001001003","069001001006"))""",
"科创板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE in ("058001001","058001008"))(TRADE_MARKET_CODE="069001001006")""",
"深市A股": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE in ("069001002001","069001002002","069001002003","069001002005"))""",
"创业板": """(APPLY_DATE>'2010-01-01')(SECURITY_TYPE_CODE="058001001")(TRADE_MARKET_CODE="069001002002")""",
}
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
if symbol == "京市A股":
params = {
'sortColumns': 'APPLY_DATE',
'sortTypes': '-1',
'pageSize': '500',
'pageNumber': '1',
'columns': 'ALL',
'reportName': 'RPT_NEEQ_ISSUEINFO_LIST',
'quoteColumns': 'f14~01~SECURITY_CODE~SECURITY_NAME_ABBR',
'source': 'NEEQSELECT',
'client': 'WEB',
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, 1+int(total_page)), leave=False):
params.update({
'pageNumber': page
})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
'序号',
'-',
'代码',
'-',
'简称',
'申购代码',
'发行总数',
'-',
'发行价格',
'发行市盈率',
'申购日',
'发行结果公告日',
'上市日',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'网上申购缴款日',
'网上申购退款日',
'-',
'网上获配比例',
'最新价',
'首日收盘价',
'网下有效申购倍数',
'每百股获利',
'-',
'-',
'-',
'-',
'-',
'-',
]
big_df = big_df[[
'序号',
'代码',
'简称',
'申购代码',
'发行总数',
'网上发行数量',
'顶格申购所需资金',
'申购上限',
'发行价格',
'最新价',
'首日收盘价',
'申购日',
'网上申购缴款日',
'网上申购退款日',
'上市日',
'发行结果公告日',
'发行市盈率',
'网上获配比例',
'网下有效申购倍数',
'每百股获利',
]]
big_df['发行总数'] = pd.to_numeric(big_df['发行总数'])
big_df['网上发行数量'] = pd.to_numeric(big_df['网上发行数量'])
big_df['顶格申购所需资金'] = pd.to_numeric(big_df['顶格申购所需资金'])
big_df['申购上限'] = pd.to_numeric(big_df['申购上限'])
big_df['发行价格'] = pd.to_nu | meric(big_df['发行价格']) | pandas.to_numeric |
import pandas as pd
import numpy as np
try:
from paraview.vtk.numpy_interface import dataset_adapter as dsa
from paraview.vtk.numpy_interface import algorithms as algs
from paraview import servermanager as sm
from paraview.simple import *
except:
pass
from vtk.util.numpy_support import vtk_to_numpy
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
class BaseFilter:
"""
Class used as a basecase class for the different Paraview filters
"""
filter_type: str = "VTK_reader"
counter: int = 0
filter: object
vector_keys: List = ["x", "y", "z"]
x_min: float
x_max: float
y_min: float
y_max: float
z_min: float
z_max: float
def __init__(self, name):
self.name = name
@property
def cell_keys(self):
vtk_object = sm.Fetch(self.filter)
vtk_object = dsa.WrapDataObject(vtk_object)
return vtk_object.CellData.keys()
@property
def point_keys(self):
vtk_object = sm.Fetch(self.filter)
vtk_object = dsa.WrapDataObject(vtk_object)
return vtk_object.PointData.keys()
@property
def field_keys(self):
vtk_object = sm.Fetch(self.filter)
vtk_object = dsa.WrapDataObject(vtk_object)
return vtk_object.FieldData.keys()
@property
def cell_data(self) -> pd.DataFrame:
vtk_object = sm.Fetch(self.filter)
vtk_object = dsa.WrapDataObject(vtk_object)
pd_df = pd.DataFrame()
for key in self.cell_keys:
temp_dataset = np.array(vtk_object.CellData[key]).transpose()
if len(temp_dataset.shape) != 1:
# The dataset is a vector:
for idx, vector_element in enumerate(temp_dataset):
new_key = f"{key}{self.vector_keys[idx]}"
pd_df[new_key] = vector_element
else:
pd_df[key] = temp_dataset
return pd_df
@property
def point_data(self) -> pd.DataFrame:
vtk_object = sm.Fetch(self.filter)
vtk_object = dsa.WrapDataObject(vtk_object)
pd_df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import annotations
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.stats import gamma, exponnorm
sns.set()
BASE_PATH = Path('..', 'data', 'experimental')
INPUT_PATHS = [
BASE_PATH / 'control.csv',
BASE_PATH / 'tmz.csv',
]
CURVE_PARAMETERS = { # estimated from running "clovars fit" on "control.csv" and "tmz.csv"
'control_division_parameters': {
'type': gamma,
'loc': 16.23,
'scale': 2.84,
'a': 3.32,
},
'tmz_division_parameters': {
'type': exponnorm,
'loc': 12.72,
'scale': 8.50,
'K': 2.87,
},
'tmz_death_parameters': {
'type': exponnorm,
'loc': 55.09,
'scale': 23.75,
'K': 2.93,
},
}
def main(
input_paths: list[Path],
curve_parameters: dict[str, dict[str, float]],
) -> None:
"""Main function of this script."""
dfs = []
for path in input_paths:
df = pd.melt( | pd.read_csv(path, index_col=None) | pandas.read_csv |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from numpy import mean, var
from scipy import stats
from matplotlib import rc
from lifelines import KaplanMeierFitter
# python program to plot the OS difference between M2 HOXA9 low and M2 high HOXA9
def find_gene_index(gene_list,gene):
j = [i for i,x in enumerate(gene_list) if x == gene]
return j
def find_patients_index(patients, p):
j = [i for i,x in enumerate(patients) if x == p]
return j[0]
filename = "log_modified_LAML_TPM.csv"
filename2 = "laml_tcga_clinical_data.tsv" # from David download - cbioPortal
data = pd.read_csv(filename)
patient_description = pd.read_csv(filename2,sep='\t')
gene_list = data['Hybridization REF']
# find the index of HOXA9 in the data
i_HOXA9 = find_gene_index(gene_list, "HOXA9")
HOXA9_exp = data.iloc[i_HOXA9,2:]
# select patients that have HOXA9 expression in the peaks
peak1_indexes = [i+2 for i,x in enumerate(HOXA9_exp.values[0]) if x <= 1 and x >= 0.005] # +1 due to the first gene columns we removed +1 due to index shift
peak2_indexes = [i+2 for i,x in enumerate(HOXA9_exp.values[0]) if x <= 5.5 and x >= 4]
# 32 patients for low and 80 for high
peak1_patients = data.iloc[:,peak1_indexes].columns
peak2_patients = data.iloc[:,peak2_indexes] .columns
# only keep the patient number
peak1_patients = [item.split('-')[2] for item in peak1_patients]
peak2_patients = [item.split('-')[2] for item in peak2_patients]
patient2 = patient_description['Patient ID']
patient2 = [item.split('-')[2] for item in patient2]
M2_low_indexes = [i for i,x in enumerate(patient2) if x in peak1_patients and patient_description['FAB'][i] == 'M2']
M2_high_indexes = [i for i,x in enumerate(patient2) if x in peak2_patients and patient_description['FAB'][i] == 'M2']
M4_low_indexes = [i for i,x in enumerate(patient2) if x in peak1_patients and patient_description['FAB'][i] == 'M4']
M4_high_indexes = [i for i,x in enumerate(patient2) if x in peak2_patients and patient_description['FAB'][i] == 'M4']
M2_low_vital = patient_description["Patient's Vital Status"][M2_low_indexes]
M2_high_vital = patient_description["Patient's Vital Status"][M2_high_indexes ]
M4_low_vital = patient_description["Patient's Vital Status"][M4_low_indexes]
M4_high_vital = patient_description["Patient's Vital Status"][M4_high_indexes ]
M2_low_vital2 = [0 if item == "Alive" else 1 for item in M2_low_vital]
M2_high_vital2 = [0 if item == "Alive" else 1 for item in M2_high_vital]
M4_low_vital2 = [0 if item == "Alive" else 1 for item in M4_low_vital]
M4_high_vital2 = [0 if item == "Alive" else 1 for item in M4_high_vital]
M2_low_OS = patient_description["Overall Survival (Months)"][M2_low_indexes]
M2_high_OS = patient_description["Overall Survival (Months)"][M2_high_indexes]
M4_low_OS = patient_description["Overall Survival (Months)"][M4_low_indexes]
M4_high_OS = patient_description["Overall Survival (Months)"][M4_high_indexes]
M2_low_tab = {'OS':M2_low_OS, 'vital':M2_low_vital, 'vital2':M2_low_vital2}
M2_high_tab = {'OS':M2_high_OS, 'vital':M2_high_vital, 'vital2':M2_high_vital2}
M4_low_tab = {'OS':M4_low_OS, 'vital':M4_low_vital, 'vital2':M4_low_vital2}
M4_high_tab = {'OS':M4_high_OS, 'vital':M4_high_vital, 'vital2':M4_high_vital2}
M2_low_tab = | pd.DataFrame(data=M2_low_tab) | pandas.DataFrame |
#
# Collective Knowledge ()
#
#
#
#
# Developer:
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
import os
import sys
import json
import re
import pandas as pd
import numpy as np
# Local settings
selector=[
]
selector2=[
{'name':'Device', 'key':'_platform'},
{'name':'Team', 'key':'_team'},
{'name':'Minimizer', 'key':'_minimizer_method'}
]
selector3=[
]
metrics_selector_s=[
{'name':'Fun Key', 'key':'__fun_key', 'values':['fun','fun_exact','fun_validated']},
{'name':'Time Key', 'key':'__time_key', 'values':['total_q_shots','total_q_seconds','total_seconds']},
{'name':'Delta', 'key':'__delta', 'config':{'type':'number','min':0,'step':0.05}},
{'name':'Prob', 'key':'__prob', 'config':{'type':'number','min':0,'max':1,'step':0.1}}
]
dimensions=[
{"key":"experiment", "name":"Experiment number", "view_key":"__number"},
{"key":"__energies", "name":"Energy convergence", "view_key":"__energies"},
{"key":"__fevs", "name":"Function evaluation", "view_key":"__fevs"},
{"key":"_point", "name":"Point", "view_key":"_point"},
{"key":"fun", "name":"fun", "view_key":"fun"},
{"key":"fun_exact", "name":"fun_exact", "view_key":"fun_exact"},
{"key":"fun_validated", "name":"fun_validated", "view_key":"fun_validated"},
{"key":"total_q_seconds", "name":"total_q_seconds", "view_key":"total_q_seconds"},
{"key":"total_q_shots", "name":"total_q_shots", "view_key":"total_q_shots"},
{"key":"total_seconds", "name":"total_seconds", "view_key":"total_seconds"},
]
metrics_dimensions=[
{"key":"experiment", "name":"Experiment number", "view_key":"__number"},
{"key":"_point", "name":"Point", "view_key":"_point"},
{"key":"__energies", "name":"Energy", "view_key":"__energies"},
{"key":"T_ave", "name":"T_ave", "view_key":"T_ave"},
{"key":"_sample_number", "name":"Sample number", "view_key":"_sample_number"},
{"key":"__times", "name":"Time", "view_key":"__times"},
{"key":"t_ave", "name":"t_ave", "view_key":"t_ave"},
]
view_cache=[
]
table_view=[
{"key":"_platform", "name":"Device"},
{"key":"_team", "name":"Team"},
{"key":"_minimizer_method", "name":"Minimizer"},
{"key":"_sample_number", "name":"Sample number"},
{"key":"_max_iterations", "name":"Max iterations"},
{"key":"_point", "name":"Point"},
{"key":"_repetition_id", "name":"Repetition ID"},
{"key":"fun", "name":"fun", "format":"%.3f"},
{"key":"fun_exact", "name":"fun_exact", "format":"%.3f"},
{"key":"fun_validated", "name":"fun_validated", "format":"%.3f"},
{"key":"nfev", "name":"nfev"},
{"key":"nit", "name":"nit"},
{"key":"success", "name":"success"},
{"key":"total_q_seconds", "name":"total_q_seconds", "format":"%.3f"},
{"key":"total_q_shots", "name":"total_q_shots"},
{"key":"total_seconds", "name":"total_seconds", "format":"%.3f"},
{"key":"_minimizer_src", "name":"Minimizer Source"},
]
metrics_table_view=[
{"key":"_platform", "name":"Device"},
{"key":"_team", "name":"Team"},
{"key":"_minimizer_method", "name":"Minimizer"},
{"key":"_sample_number", "name":"Sample number"},
{"key":"_max_iterations", "name":"Max iterations"},
{"key":"_point", "name":"Point"},
{"key":"T_ave", "name":"T_ave", "format":"%.3f"},
{"key":"T_err", "name":"T_err", "format":"%.3f"},
{"key":"num_repetitions", "name":"num_repetitions"},
{"key":"s", "name":"s", "format":"%.3f"},
{"key":"s_err", "name":"s_err", "format":"%.3f"},
{"key":"t_ave", "name":"t_ave", "format":"%.3f"},
{"key":"t_err", "name":"t_err", "format":"%.3f"},
]
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# TBD: action description
def detect(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
ck.out('TBD: action description')
ck.out('')
ck.out('Command line: ')
ck.out('')
import json
cmd=json.dumps(i, indent=2)
ck.out(cmd)
return {'return':0}
##############################################################################
# get raw data for repo-widget
def get_raw_data(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
repo_uoa = 'ck-quantum-hackathon-20180615'
def get_experimental_results(repo_uoa, tags='qck', module_uoa='experiment'):
r = ck.access({'action':'search', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'tags':tags})
if r['return']>0:
print('Error: %s' % r['error'])
exit(1)
experiments = r['lst']
dfs = []
for experiment in experiments:
data_uoa = experiment['data_uoa']
r = ck.access({'action':'list_points', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'data_uoa':data_uoa})
if r['return']>0:
print('Error: %s' % r['error'])
exit(1)
tags = r['dict']['tags']
skip = False
# Get team name (final data) or email (submission data).
team_tags = [ tag for tag in tags if tag.startswith('team-') ]
email_tags = [ tag for tag in tags if tag.find('@')!=-1 ]
if len(team_tags) > 0:
team = team_tags[0][0:7]
elif len(email_tags) > 0:
team = email_tags[0]
else:
print('[Warning] Cannot determine team name for experiment in: \'%s\'' % r['path'])
team = 'team-default'
if skip:
print('[Warning] Skipping experiment with bad tags:')
print(tags)
continue
# For each point.
for point in r['points']:
point_file_path = os.path.join(r['path'], 'ckp-%s.0001.json' % point)
with open(point_file_path) as point_file:
point_data_raw = json.load(point_file)
characteristics_list = point_data_raw['characteristics_list']
num_repetitions = len(characteristics_list)
data = [
{
# features
'platform': characteristics['run'].get('vqe_input', {}).get('q_device_name', 'unknown').lower(),
# choices
'minimizer_method': characteristics['run'].get('vqe_input', {}).get('minimizer_method', 'n/a'),
'minimizer_options': characteristics['run'].get('vqe_input', {}).get('minimizer_options', {'maxfev':-1}),
'minimizer_src': characteristics['run'].get('vqe_input', {}).get('minimizer_src', ''),
'sample_number': characteristics['run'].get('vqe_input', {}).get('sample_number','n/a'),
# statistical repetition
'repetition_id': repetition_id,
# runtime characteristics
'run': characteristics['run'],
'report': characteristics['run'].get('report', {}),
'vqe_output': characteristics['run'].get('vqe_output', {}),
}
for (repetition_id, characteristics) in zip(range(num_repetitions), characteristics_list)
if len(characteristics['run']) > 0
]
index = [
'platform', 'team', 'minimizer_method', 'sample_number', 'max_iterations', 'point', 'repetition_id'
]
for datum in data:
datum['team'] = team
datum['point'] = point
datum['success'] = datum.get('vqe_output',{}).get('success',False)
datum['nfev'] = np.int64(datum.get('vqe_output',{}).get('nfev',-1))
datum['nit'] = np.int64(datum.get('vqe_output',{}).get('nit',-1))
datum['fun'] = np.float64(datum.get('vqe_output',{}).get('fun',0))
datum['fun_validated'] = np.float64(datum.get('vqe_output',{}).get('fun_validated',0))
datum['fun_exact'] = np.float64(datum.get('vqe_output',{}).get('fun_exact',0))
datum['total_seconds'] = np.float64(datum.get('report',{}).get('total_seconds',0))
datum['total_q_seconds'] = np.float64(datum.get('report',{}).get('total_q_seconds',0))
datum['total_q_shots'] = np.int64(datum.get('report',{}).get('total_q_shots',0))
tmp_max_iterations = list(datum.get('minimizer_options',{'maxfev':-1}).values())
datum['max_iterations'] = tmp_max_iterations[0] if len(tmp_max_iterations)>0 else -1
for key in index:
datum['_' + key] = datum[key]
datum['_minimizer_src'] = datum['minimizer_src']
# Construct a DataFrame.
df = pd.DataFrame(data)
df = df.set_index(index)
# Append to the list of similarly constructed DataFrames.
dfs.append(df)
if dfs:
# Concatenate all thus constructed DataFrames (i.e. stack on top of each other).
result = pd.concat(dfs)
result.sort_index(ascending=True, inplace=True)
else:
# Construct a dummy DataFrame the success status of which can be safely checked.
result = pd.DataFrame(columns=['success'])
return result
# Merge experimental results from the same team with the same parameters
# (minimizer_method, sample_number, max_iterations) and minimizer source.
def merge_experimental_results(df):
dfs = []
df_prev = None
for index, row in df.iterrows():
# Construct a DataFrame.
df_curr = pd.DataFrame(row).T
# Check if this row is similar to the previous row.
if df_prev is not None: # if not the very first row
if df_prev.index.levels[:-2]==df_curr.index.levels[:-2]: # if the indices match for all but the last two levels
if df_prev.index.levels[-2]!=df_curr.index.levels[-2]: # if the experiments are different
if df_prev['minimizer_src'].values==df_curr['minimizer_src'].values: # if the minimizer source is the same
print('[Info] Merging experiment:')
print(df_curr.index.levels)
print('[Info] into:')
print(df_prev.index.levels)
print('[Info] as:')
# df_curr.index = df_prev.index.copy() # TODO: increment repetition_id
df_curr.index = | pd.MultiIndex.from_tuples([(x[0],x[1],x[2],x[3],x[4],x[5],x[6]+1) for x in df_prev.index]) | pandas.MultiIndex.from_tuples |
# -*- coding: utf-8 -*-
# Copyright © 2021 by <NAME>. All rights reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Set of operations to transform pandas DataFrames given user specified config.
These operations are ordered according to `ndj_pipeline.model.run_model_training`.
"""
import logging
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split as tts
from ndj_pipeline import utils
def load_data_and_key(model_config: Dict[str, Any]) -> pd.DataFrame:
"""Uses config to load data and assign key.
Args:
model_config: Loaded model experiment config, specifically for
data path and index column(s)
Returns:
Pandas dataframe with optionally assigned index
"""
input_path = Path(*model_config["data_file"])
logging.info(f"Loading parquet from {input_path}")
data = | pd.read_parquet(input_path) | pandas.read_parquet |
"""
Original work Copyright 2017 <NAME>
Modified work Copyright 2018 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import numpy as np
import pandas as pd
from cvxpy import Problem, Minimize, Variable, sum_entries,\
mul_elemwise, norm
from logging import info, debug, warn
class OptTools():
'''
Class that implements the optimization for optimized pre-processing. Based on:
http://papers.nips.cc/paper/6988-optimized-pre-processing-for-discrimination-prevention
and
https://github.com/fair-preprocessing/nips2017
The particular formulation implemented here is:
1. l1 distance between input and transformed distributions
2. "Excess distortion constraint" - eqn 5 in paper.
3. Discrimination constraints for all combinations of groups specified (there is no
distinction between protected and unprotected groups). The constraints are given in
eqn 2, 3 in the paper. We use a single epsilon value for all combinations of
y and d values
See section 4.3 in supplementary material of the paper for an example
Attributes:
features (list): All features
df (DataFrame): Input data
dfJoint (DataFrame): Empirical joint distribution
D_features (list): protected attribute names
X_features (list): feature names for input data
Y_features (list): feature names for binary label
X_values (list): Values that features can take
Y_values (list): Values that the label can take
D_values (list): Values that protected attributes can take
XY_features (list): Combination of X, and Y features
DXY_features (list): Combination of D, X, and Y features
XY_values (list): Combination of X, and Y values
DXY_values (list): Combination of D, X, and Y values
y_index (Int64Index): Indices for the Y values
XY_index (MultiIndex): Indices for the combination of X, and Y values
DXY_index (MultiIndex): Indices for the combination of D, X, and Y values
YD_features_index (MultiIndex): Indices for the combination of Y, and D values
clist (list): Distance thresholds for individual distortion
CMlist (list): List of constraint matrices corresponding to each threshold
in clist
dfD (DataFrame): distortion matrix with indices and columns
dlist (list): Probability bounds given in eq. 5 of the paper for
each threshold in clist
epsilon (float): epsilon value used in discrimination constraint
dfD_to_Y_address (Dataframe): matrix for p_yd, with y varying in the columns
dfMask_Pxyd_to_Pyd (DataFrame): Mask to transform P_XYD to P_YD
dfMask_Pxyd_to_Pxy (DataFrame): Mask to convert from P_XYD to P_XY
dfPxyd (DataFrame): Representation of only frequencies from dfJoint
dfMask_Pxyd_to_Py (DataFrame): Mask to convert from P_XYD to P_Y
dfMask_Pxy_to_Py (DataFrame): Mask to convert from P_XY to P_Y
dfMask_Pxyd_to_Pd (DataFrame): Mask to convert from P_XYD to P_D
dfP (DataFrame): Mapping transformation learned from the data
'''
def __init__(self, df=None, features=None):
""" Initialize the problem. Not all attributes are initialized when
creating the object.
Args:
df (DataFrame): Input dataframe
features (list): Optional features to subset the dataframe
"""
self.df = df.copy()
if not isinstance(df, pd.DataFrame):
raise TypeError("`df` must be a pandas DataFrame")
if not features:
self.features = list(df)
else:
self.features = features
# build joint distribution
self.dfJoint = self.df.groupby(self.features).size().reset_index()
self.dfJoint.rename(columns={0:'Count'},inplace=True)
self.dfJoint['Frequency'] = self.dfJoint['Count'].apply(lambda x : x/float(len(self.df)))
# initialize the features that will be used for optimization
self.D_features = [] # discriminatory features
self.Y_features = [] # binary decision variable
self.X_features = [] # variables used for decision making
# values that each feature can assume
self.D_values = []
self.Y_values = []
# place holder for mapping dataframe
self.dfP = pd.DataFrame() # this will hold the conditional mappings
# place holder for the distortion mapping
self.dfD = pd.DataFrame()
# excess distortion constraint placeholder
self.clist = []
# excess distortion matrices
self.CMlist = []
def get_mask(self, dfRef):
""" Create a mask assuming the multindex column is a
subset of the multindex rows. This mask will be used for
marginalizing distributions.
Args:
dfRef (DataFrame): Reference data frame
"""
# generates a mask assuming the multindex column is a subset of the multindex rows
target_ix = list(dfRef.columns.names)
dfRows = pd.DataFrame(index = dfRef.index).reset_index()[target_ix].values
dfCols = pd.DataFrame(index = dfRef.columns).reset_index()[target_ix].values
for i in range(dfRef.shape[0]):
val1 = dfRows[i,:]
for j in range(dfRef.shape[1]):
val2 = dfCols[j,:]
if np.all(val1 == val2):
dfRef.iat[i,j] = 1.0
return dfRef
# method for setting the features
def set_features(self,D = [], X = [], Y = []):
""" Set many features for the class
Args:
D (list): names of D features
X (list): names of X features
Y (list): names of Y features
"""
self.D_features = D
self.Y_features = Y
self.X_features = X
# Get values for Pandas multindex
self.D_values = [self.dfJoint[feature].unique().tolist() for feature in self.D_features]
self.Y_values = [self.dfJoint[feature].unique().tolist() for feature in self.Y_features]
self.X_values = [self.dfJoint[feature].unique().tolist() for feature in self.X_features]
# Create multindex for mapping dataframe
self.DXY_features = self.D_features+self.X_features+self.Y_features
self.DXY_values = self.D_values+self.X_values+self.Y_values
self.DXY_index = pd.MultiIndex.from_product(self.DXY_values, names = self.DXY_features)
# Create multindex for distortion dataframe
self.XY_features = self.X_features+self.Y_features
self.XY_values = self.X_values+self.Y_values
self.XY_index = pd.MultiIndex.from_product(self.XY_values, names = self.XY_features)
# Initialize mapping dataframe
self.dfP = pd.DataFrame(np.zeros((len(self.DXY_index),len(self.XY_index))), \
index=self.DXY_index, columns = self.XY_index)
# Initialize distortion dataframe
self.dfD = pd.DataFrame(np.zeros((len(self.XY_index),len(self.XY_index))), \
index=self.XY_index.copy(), columns = self.XY_index.copy())
###
# Generate masks for recovering marginals
###
self.dfPxyd = | pd.DataFrame(index=self.dfP.index,columns=['Frequency']) | pandas.DataFrame |
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import random
import math
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from mlos.Optimizers.RegressionModels.Prediction import Prediction
from mlos.Optimizers.RegressionModels.LassoCrossValidatedConfigStore import lasso_cross_validated_config_store
from mlos.Optimizers.RegressionModels.LassoCrossValidatedRegressionModel import LassoCrossValidatedRegressionModel
from mlos.Spaces import SimpleHypergrid, ContinuousDimension, CategoricalDimension
from mlos.Spaces.HypergridAdapters.ContinuousToPolynomialBasisHypergridAdapter import ContinuousToPolynomialBasisHypergridAdapter
from mlos.OptimizerEvaluationTools.ObjectiveFunctionFactory import ObjectiveFunctionFactory, objective_function_config_store
import mlos.global_values as global_values
class TestLassoCrossValidatedRegressionModel:
@classmethod
def setup_class(cls):
global_values.declare_singletons()
def setup_method(self, method):
self.model_config = lasso_cross_validated_config_store.default
self.max_basis_function_degree = 2
self.test_case_globals = {
'2d_X_deg2_poly_input_space': SimpleHypergrid(
name="2d_X_deg2_poly_search_domain",
dimensions=[
ContinuousDimension(name="1", min=0.0, max=5.0),
ContinuousDimension(name="x1", min=0.0, max=5.0),
ContinuousDimension(name="x2", min=0.0, max=5.0),
ContinuousDimension(name="x1**2", min=0.0, max=25.0),
ContinuousDimension(name="x1*x2", min=0.0, max=25.0),
ContinuousDimension(name="x2**2", min=0.0, max=25.0)
]
),
'categorical_deg2_poly_input_space': SimpleHypergrid(
name="categorical_search_domain",
dimensions=[
CategoricalDimension(name='x0', values=['a', 'b', 'c']),
ContinuousDimension(name="1", min=0.0, max=5.0),
ContinuousDimension(name="x1", min=0.0, max=5.0),
ContinuousDimension(name="x2", min=0.0, max=5.0),
ContinuousDimension(name="x1**2", min=0.0, max=25.0),
ContinuousDimension(name="x1*x2", min=0.0, max=25.0),
ContinuousDimension(name="x2**2", min=0.0, max=25.0),
CategoricalDimension(name='i0', values=['-5', '5'])
]
),
'degree2_output_space': SimpleHypergrid(
name="degree2_polynomial",
dimensions=[
ContinuousDimension(name="degree2_polynomial_y", min=-10 ** 15, max=10 ** 15)
]
)
}
@staticmethod
def n_choose_k(n, k):
return math.factorial(n) / math.factorial(k) / math.factorial(n - k)
@staticmethod
def get_simple_quadratic_coefficients():
return np.array([1, -3, -4, -0.5, 0.0, -2.0])
@staticmethod
def generate_points_simple_quadratic(num_points, num_features):
x = np.random.uniform(0, 5, [num_points, num_features])
# y = 1 -3*X_1 -4*X_2 -0.5*X_1**2 -2*X_2**2
y_coef_true = TestLassoCrossValidatedRegressionModel.get_simple_quadratic_coefficients()
poly_reg = PolynomialFeatures(degree=2)
poly_terms_x = poly_reg.fit_transform(x)
y = np.matmul(poly_terms_x, y_coef_true)
y_df = pd.DataFrame(y, columns=['degree2_polynomial_y'])
x_df = | pd.DataFrame(poly_terms_x, columns=['1', 'x1', 'x2', 'x1**2', 'x1*x2', 'x2**2']) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
import sys
import pandas as pd
from sqlalchemy import *
def load_data(messages_filepath, categories_filepath):
'''
load the data set from the csv file and convert it to pandas
dataframe and combine the two data frame
Argument :
messages_filepath - path of the csv file disaster_messages.csv
categories_filepath - path of the csv file disaster_categories.csv
return :
df - uncleaned data frame
'''
# load messages and categories datasets
messages = | pd.read_csv('disaster_messages.csv') | pandas.read_csv |
import os.path
import logging
import pandas as pd
from common.constants import *
from common.base_parser import BaseParser
PATHWAY_FILE = 'pathway.tsv'
KO_FILE = 'ko.tsv'
GENE_FILE = 'gene.tsv'
GENOME_FILE = 'genome.tsv'
KO2PATHWAY_FILE = 'ko2pathway.tsv'
GENOME2PATHWAY_FILE = 'genome2pathway.tsv'
GENE2KO_FILE = 'gene2ko.tsv'
class KeggParser(BaseParser):
def __init__(self, prefix: str, basedir=None):
BaseParser.__init__(self, prefix, DB_KEGG.lower(), basedir)
# self.logger = logging.getLogger(__name__)
def parse_pathway_file(self):
file = os.path.join(self.download_dir, 'pathway', 'pathway.list')
df = pd.read_csv(file, sep='\t', names=[PROP_ID, PROP_NAME])
df = df[~df[PROP_ID].str.startswith('#')]
return df
def parse_ko_file(self):
ENTRY = 'ENTRY'
NAME = PROP_NAME.upper()
DEF = PROP_DEF.upper()
file = os.path.join(self.download_dir, 'genes', 'ko', 'ko')
entries = []
entry = {}
with open(file, 'r') as f:
for line in f:
if line.startswith(ENTRY):
entry = dict()
entries.append(entry)
val = self.get_attr_value(ENTRY, line).split(' ')[0]
entry[PROP_ID] = val
elif line.startswith(NAME):
entry[PROP_NAME] = self.get_attr_value(NAME, line)
elif line.startswith(DEF):
entry[PROP_DEF] = self.get_attr_value(DEF, line)
return pd.DataFrame(entries, columns=[PROP_ID, PROP_NAME, PROP_DEF])
def parse_pathway2ko_file(self):
file = os.path.join(self.download_dir, 'pathway', 'links', 'pathway_ko.list')
df = pd.read_csv(file, sep='\t', header=None, names=['pathway', 'ko'])
# print(len(df))
df = df[df.pathway.str.contains('map')]
df['pathway'] = df['pathway'].str.replace('path:map', '')
df['ko'] = df['ko'].str.replace('ko:', '')
return df
def parse_pathway2genome_file(self):
file = os.path.join(self.download_dir, 'pathway', 'links', 'pathway_genome.list')
df = pd.read_csv(file, sep='\t', header=None, names=['pathway', 'genome'])
# print(len(df))
df['pathway'] = df.pathway.str.replace('[\D:]+', '', regex=True)
df['genome'] = df.genome.str.replace('gn:', '')
df_genome = df[['genome']].copy().drop_duplicates()
df_genome.columns = [PROP_ID]
print(df_genome.head())
return df, df_genome
@classmethod
def get_attr_value(self, attr_name, line):
if line.startswith(attr_name):
val = line[len(attr_name):].strip()
return val
return ''
def parse_and_write_data_files(self):
df_pathway = self.parse_pathway_file()
logging.info('kegg pathways: ' + str(len(df_pathway)))
df_pathway.to_csv(os.path.join(self.output_dir, self.file_prefix + PATHWAY_FILE), sep='\t', index=False)
df_ko = self.parse_ko_file()
logging.info('kegg ko: ' + str(len(df_ko)))
df_ko.to_csv(os.path.join(self.output_dir, self.file_prefix + KO_FILE), sep='\t', index=False)
# Write gene data file
outfile = os.path.join(self.output_dir, self.file_prefix + GENE_FILE)
infile = os.path.join(self.download_dir, 'genes', 'genes_ncbi-geneid.list')
header = True
chunks = | pd.read_csv(infile, sep='\t', chunksize=3000, header=None, names=[PROP_ID, 'gene_id']) | pandas.read_csv |
import glob
import os
import pathlib
import tempfile
import warnings
import logging
from abc import ABC
from pathlib import Path
from shutil import copy
from tempfile import mkstemp
from typing import Union, Dict
from zipfile import ZipFile
import numpy as np
import pandas as pd
from flask import send_from_directory, jsonify
from shapely.geometry import GeometryCollection, mapping
from openeo.metadata import CollectionMetadata
from openeo_driver.datacube import DriverDataCube
from openeo_driver.errors import OpenEOApiException
from openeo_driver.utils import replace_nan_values
_log = logging.getLogger(__name__)
class SaveResult(ABC):
"""
A class that generates a Flask response.
"""
def __init__(self, format: str = None, options: dict = None):
self.format = format and format.lower()
self.options = options or {}
def is_format(self, *args):
return self.format.lower() in {f.lower() for f in args}
def set_format(self, format: str, options: dict = None):
self.format = format.lower()
self.options = options or {}
def create_flask_response(self):
"""
Returns a Flask compatible response.
:return: A response that can be handled by Flask
"""
pass
def get_mimetype(self, default="application/octet-stream"):
return {
"gtiff": "image/tiff; application=geotiff",
"cog": "image/tiff; application=geotiff; profile=cloud-optimized",
"netcdf": "application/x-netcdf",
"png": "image/png",
"json": "application/json",
"geojson": "application/geo+json",
"covjson": "application/json",
"csv": "text/csv",
# TODO: support more formats
}.get(self.format.lower(), default)
def get_temp_file(suffix="", prefix="openeo-pydrvr-"):
# TODO: make sure temp files are cleaned up when read
_, filename = tempfile.mkstemp(suffix=suffix, prefix=prefix)
return filename
class ImageCollectionResult(SaveResult):
def __init__(self, cube: DriverDataCube, format: str, options: dict):
super().__init__(format=format, options=options)
self.cube = cube
def save_result(self, filename: str) -> str:
return self.cube.save_result(filename=filename, format=self.format, format_options=self.options)
def write_assets(self, directory:str) -> Dict:
"""
Save generated assets into a directory, return asset metadata.
TODO: can an asset also be a full STAC item? In principle, one openEO job can either generate a full STAC collection, or one STAC item with multiple assets...
:return: STAC assets dictionary: https://github.com/radiantearth/stac-spec/blob/master/item-spec/item-spec.md#assets
"""
if "write_assets" in dir(self.cube):
return self.cube.write_assets(filename=directory, format=self.format, format_options=self.options)
else:
filename = self.cube.save_result(filename=directory, format=self.format, format_options=self.options)
return {filename:{"href":filename}}
def create_flask_response(self):
filename = get_temp_file(suffix=".save_result.{e}".format(e=self.format.lower()))
filename = self.save_result(filename)
mimetype = self.get_mimetype()
return send_from_directory(os.path.dirname(filename), os.path.basename(filename), mimetype=mimetype)
class JSONResult(SaveResult):
def __init__(self, data, format: str = "json", options: dict = None):
super().__init__(format=format, options=options)
self.data = data
def write_assets(self, path:str) -> Dict:
"""
Save generated assets into a directory, return asset metadata.
TODO: can an asset also be a full STAC item? In principle, one openEO job can either generate a full STAC collection, or one STAC item with multiple assets...
:return: STAC assets dictionary: https://github.com/radiantearth/stac-spec/blob/master/item-spec/item-spec.md#assets
"""
output_dir = Path(path).parent
output_file = output_dir / "result.json"
with open(output_file, 'w') as f:
import json
json.dump(self.prepare_for_json(), f)
return {"result.json":{
"href":str(output_file),
"roles": ["data"],
"type": "application/json",
"description": "json result generated by openEO"
}}
def get_data(self):
return self.data
def prepare_for_json(self):
return replace_nan_values(self.get_data())
def create_flask_response(self):
return jsonify(self.prepare_for_json())
class AggregatePolygonResult(JSONResult):
"""
Container for timeseries result of `aggregate_polygon` process (aka "zonal stats")
Expects internal representation of timeseries as nested structure:
dict mapping timestamp (str) to:
a list, one item per polygon:
a list, one float per band
"""
def __init__(self, timeseries: dict, regions: GeometryCollection, metadata:CollectionMetadata=None):
super().__init__(data=timeseries)
if not isinstance(regions, GeometryCollection):
# TODO: raise exception instead of warning?
warnings.warn("AggregatePolygonResult: GeometryCollection expected but got {t}".format(t=type(regions)))
self._regions = regions
self._metadata = metadata
def get_data(self):
if self.is_format('covjson', 'coveragejson'):
return self.to_covjson()
# By default, keep original (proprietary) result format
return self.data
def write_assets(self, directory:str) -> Dict:
"""
Save generated assets into a directory, return asset metadata.
TODO: can an asset also be a full STAC item? In principle, one openEO job can either generate a full STAC collection, or one STAC item with multiple assets...
:return: STAC assets dictionary: https://github.com/radiantearth/stac-spec/blob/master/item-spec/item-spec.md#assets
"""
directory = pathlib.Path(directory).parent
filename = str(Path(directory)/"timeseries.json")
asset = {
"roles": ["data"],
"type": "application/json"
}
if self.is_format('netcdf', 'ncdf'):
filename = str(Path(directory) / "timeseries.nc")
self.to_netcdf(filename)
asset["type"] = "application/x-netcdf"
elif self.is_format('csv'):
filename = str(Path(directory) / "timeseries.csv")
self.to_csv(filename)
asset["type"] = "text/csv"
else:
import json
with open(filename, 'w') as f:
json.dump(self.prepare_for_json(), f)
asset["href"] = filename
if self._metadata is not None and self._metadata.has_band_dimension():
bands = [b._asdict() for b in self._metadata.bands]
asset["bands"] = bands
return {str(Path(filename).name): asset}
def create_flask_response(self):
if self.is_format('netcdf', 'ncdf'):
filename = self.to_netcdf()
return send_from_directory(os.path.dirname(filename), os.path.basename(filename))
if self.is_format('csv'):
filename = self.to_csv()
return send_from_directory(os.path.dirname(filename), os.path.basename(filename))
return super().create_flask_response()
def create_point_timeseries_xarray(self, feature_ids, timestamps,lats,lons,averages_by_feature):
import xarray as xr
import pandas as pd
#xarray breaks with timezone aware dates: https://github.com/pydata/xarray/issues/1490
time_index = | pd.to_datetime(timestamps,utc=False) | pandas.to_datetime |
from difflib import SequenceMatcher
import functools
from typing import Optional
import pandas
__doc__ = """Get specialty codes and consolidate data from different sources in basic_data."""
COLUMNS = ['first_name', 'last_name', 'city', 'postal_code', 'state', 'specialty_code']
GENERIC_OPHTHALMOLOGY_CODE = '207W00000X'
GENERIC_ENDOCRINOLOGY_CODE = '207RE0101X'
def _convert_zip9_to_zip5(z: Optional[str]) -> Optional[str]:
if pandas.isnull(z):
return
return z.split('-')[0].zfill(5)
def clean_asoprs(in_df: pandas.DataFrame) -> pandas.DataFrame:
out_df: pandas.DataFrame = in_df.loc[:, ['Full Name_firstName', 'Full Name_lastName']]
all_address_columns = {col for col in in_df.columns if 'address' in col.lower()}
all_address_subfields = {col.split('_')[-1] for col in all_address_columns}
def get_first_address_subfield(row: pandas.Series):
out_dict = {i: None for i in all_address_subfields}
for col in set(row.index).intersection(all_address_columns):
subfield = col.split('_')[-1]
if not pandas.isnull(value := row[col]) and out_dict[subfield] is None:
out_dict[subfield] = row[col]
target_cols = ['city', 'zip', 'state']
out_dict = {i: out_dict[i] for i in target_cols}
return out_dict
address_data = in_df.apply(get_first_address_subfield, 1, False, 'expand')
out_df = | pandas.concat([out_df, address_data], 1) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: | pd.Timestamp("2013-05-19 00:00:00") | pandas.Timestamp |
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.api.indexers import (
BaseIndexer,
FixedForwardWindowIndexer,
)
from pandas.core.window.indexers import (
ExpandingIndexer,
FixedWindowIndexer,
VariableOffsetWindowIndexer,
)
from pandas.tseries.offsets import BusinessDay
def test_bad_get_window_bounds_signature():
class BadIndexer(BaseIndexer):
def get_window_bounds(self):
return None
indexer = BadIndexer()
with pytest.raises(ValueError, match="BadIndexer does not implement"):
Series(range(5)).rolling(indexer)
def test_expanding_indexer():
s = Series(range(10))
indexer = ExpandingIndexer()
result = s.rolling(indexer).mean()
expected = s.expanding().mean()
tm.assert_series_equal(result, expected)
def test_indexer_constructor_arg():
# Example found in computation.rst
use_expanding = [True, False, True, False, True]
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if self.use_expanding[i]:
start[i] = 0
end[i] = i + 1
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
result = df.rolling(indexer).sum()
expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]})
tm.assert_frame_equal(result, expected)
def test_indexer_accepts_rolling_args():
df = DataFrame({"values": range(5)})
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if center and min_periods == 1 and closed == "both" and i == 2:
start[i] = 0
end[i] = num_values
else:
start[i] = i
end[i] = i + self.window_size
return start, end
indexer = CustomIndexer(window_size=1)
result = df.rolling(indexer, center=True, min_periods=1, closed="both").sum()
expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
@pytest.mark.parametrize(
"func,np_func,expected,np_kwargs",
[
("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {}),
("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {}),
(
"max",
np.max,
[2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan],
{},
),
(
"std",
np.std,
[
1.0,
1.0,
1.0,
55.71654452,
54.85739087,
53.9845657,
1.0,
1.0,
0.70710678,
np.nan,
],
{"ddof": 1},
),
(
"var",
np.var,
[
1.0,
1.0,
1.0,
3104.333333,
3009.333333,
2914.333333,
1.0,
1.0,
0.500000,
np.nan,
],
{"ddof": 1},
),
(
"median",
np.median,
[1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 8.5, np.nan],
{},
),
],
)
@pytest.mark.filterwarnings("ignore:min_periods:FutureWarning")
def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs):
# GH 32865
values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=3)
match = "Forward-looking windows can't have center=True"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, center=True)
getattr(rolling, func)()
match = "Forward-looking windows don't support setting the closed argument"
with pytest.raises(ValueError, match=match):
rolling = constructor(values).rolling(window=indexer, closed="right")
getattr(rolling, func)()
rolling = constructor(values).rolling(window=indexer, min_periods=2)
result = getattr(rolling, func)()
# Check that the function output matches the explicitly provided array
expected = constructor(expected)
tm.assert_equal(result, expected)
# Check that the rolling function output matches applying an alternative
# function to the rolling window object
expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result, expected2)
# Check that the function output matches applying an alternative function
# if min_periods isn't specified
# GH 39604: After count-min_periods deprecation, apply(lambda x: len(x))
# is equivalent to count after setting min_periods=0
min_periods = 0 if func == "count" else None
rolling3 = constructor(values).rolling(window=indexer, min_periods=min_periods)
result3 = getattr(rolling3, func)()
expected3 = constructor(rolling3.apply(lambda x: np_func(x, **np_kwargs)))
tm.assert_equal(result3, expected3)
@pytest.mark.parametrize("constructor", [Series, DataFrame])
def test_rolling_forward_skewness(constructor):
values = np.arange(10.0)
values[5] = 100.0
indexer = FixedForwardWindowIndexer(window_size=5)
rolling = constructor(values).rolling(window=indexer, min_periods=3)
result = rolling.skew()
expected = constructor(
[
0.0,
2.232396,
2.229508,
2.228340,
2.229091,
2.231989,
0.0,
0.0,
np.nan,
np.nan,
]
)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"func,expected",
[
("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]),
(
"corr",
[
1.0,
1.0,
1.0,
0.8704775290207161,
0.018229084250926637,
-0.861357304646493,
1.0,
1.0,
np.nan,
np.nan,
],
),
],
)
def test_rolling_forward_cov_corr(func, expected):
values1 = np.arange(10).reshape(-1, 1)
values2 = values1 * 2
values1[5, 0] = 100
values = np.concatenate([values1, values2], axis=1)
indexer = FixedForwardWindowIndexer(window_size=3)
rolling = DataFrame(values).rolling(window=indexer, min_periods=3)
# We are interested in checking only pairwise covariance / correlation
result = getattr(rolling, func)().loc[(slice(None), 1), 0]
result = result.reset_index(drop=True)
expected = Series(expected)
expected.name = result.name
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"closed,expected_data",
[
["right", [0.0, 1.0, 2.0, 3.0, 7.0, 12.0, 6.0, 7.0, 8.0, 9.0]],
["left", [0.0, 0.0, 1.0, 2.0, 5.0, 9.0, 5.0, 6.0, 7.0, 8.0]],
],
)
def test_non_fixed_variable_window_indexer(closed, expected_data):
index = date_range("2020", periods=10)
df = DataFrame(range(10), index=index)
offset = BusinessDay(1)
indexer = VariableOffsetWindowIndexer(index=index, offset=offset)
result = df.rolling(indexer, closed=closed).sum()
expected = DataFrame(expected_data, index=index)
tm.assert_frame_equal(result, expected)
def test_fixed_forward_indexer_count():
# GH: 35579
df = DataFrame({"b": [None, None, None, 7]})
indexer = FixedForwardWindowIndexer(window_size=2)
result = df.rolling(window=indexer, min_periods=0).count()
expected = DataFrame({"b": [0.0, 0.0, 1.0, 1.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("end_value", "values"), [(1, [0.0, 1, 1, 3, 2]), (-1, [0.0, 1, 0, 3, 1])]
)
@pytest.mark.parametrize(("func", "args"), [("median", []), ("quantile", [0.5])])
def test_indexer_quantile_sum(end_value, values, func, args):
# GH 37153
class CustomIndexer(BaseIndexer):
def get_window_bounds(self, num_values, min_periods, center, closed):
start = np.empty(num_values, dtype=np.int64)
end = np.empty(num_values, dtype=np.int64)
for i in range(num_values):
if self.use_expanding[i]:
start[i] = 0
end[i] = max(i + end_value, 1)
else:
start[i] = i
end[i] = i + self.window_size
return start, end
use_expanding = [True, False, True, False, True]
df = DataFrame({"values": range(5)})
indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)
result = getattr(df.rolling(indexer), func)(*args)
expected = DataFrame({"values": values})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer_class", [FixedWindowIndexer, FixedForwardWindowIndexer, ExpandingIndexer]
)
@pytest.mark.parametrize("window_size", [1, 2, 12])
@pytest.mark.parametrize(
"df_data",
[
{"a": [1, 1], "b": [0, 1]},
{"a": [1, 2], "b": [0, 1]},
{"a": [1] * 16, "b": [np.nan, 1, 2, np.nan] + list(range(4, 16))},
],
)
def test_indexers_are_reusable_after_groupby_rolling(
indexer_class, window_size, df_data
):
# GH 43267
df = DataFrame(df_data)
num_trials = 3
indexer = indexer_class(window_size=window_size)
original_window_size = indexer.window_size
for i in range(num_trials):
df.groupby("a")["b"].rolling(window=indexer, min_periods=1).mean()
assert indexer.window_size == original_window_size
@pytest.mark.parametrize(
"window_size, num_values, expected_start, expected_end",
[
(1, 1, [0], [1]),
(1, 2, [0, 1], [1, 2]),
(2, 1, [0], [1]),
(2, 2, [0, 1], [2, 2]),
(5, 12, range(12), list(range(5, 12)) + [12] * 5),
(12, 5, range(5), [5] * 5),
(0, 0, np.array([]), np.array([])),
(1, 0, np.array([]), np.array([])),
(0, 1, [0], [0]),
],
)
def test_fixed_forward_indexer_bounds(
window_size, num_values, expected_start, expected_end
):
# GH 43267
indexer = FixedForwardWindowIndexer(window_size=window_size)
start, end = indexer.get_window_bounds(num_values=num_values)
tm.assert_numpy_array_equal(start, np.array(expected_start), check_dtype=False)
tm.assert_numpy_array_equal(end, np.array(expected_end), check_dtype=False)
assert len(start) == len(end)
@pytest.mark.parametrize(
"df, window_size, expected",
[
(
DataFrame({"b": [0, 1, 2], "a": [1, 2, 2]}),
2,
Series(
[0, 1.5, 2.0],
index=MultiIndex.from_arrays([[1, 2, 2], range(3)], names=["a", None]),
name="b",
dtype=np.float64,
),
),
(
DataFrame(
{
"b": [np.nan, 1, 2, np.nan] + list(range(4, 18)),
"a": [1] * 7 + [2] * 11,
"c": range(18),
}
),
12,
Series(
[
3.6,
3.6,
4.25,
5.0,
5.0,
5.5,
6.0,
12.0,
12.5,
13.0,
13.5,
14.0,
14.5,
15.0,
15.5,
16.0,
16.5,
17.0,
],
index=MultiIndex.from_arrays(
[[1] * 7 + [2] * 11, range(18)], names=["a", None]
),
name="b",
dtype=np.float64,
),
),
],
)
def test_rolling_groupby_with_fixed_forward_specific(df, window_size, expected):
# GH 43267
indexer = FixedForwardWindowIndexer(window_size=window_size)
result = df.groupby("a")["b"].rolling(window=indexer, min_periods=1).mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"group_keys",
[
(1,),
(1, 2),
(2, 1),
(1, 1, 2),
(1, 2, 1),
(1, 1, 2, 2),
(1, 2, 3, 2, 3),
(1, 1, 2) * 4,
(1, 2, 3) * 5,
],
)
@pytest.mark.parametrize("window_size", [1, 2, 3, 4, 5, 8, 20])
def test_rolling_groupby_with_fixed_forward_many(group_keys, window_size):
# GH 43267
df = DataFrame(
{
"a": np.array(list(group_keys)),
"b": np.arange(len(group_keys), dtype=np.float64) + 17,
"c": np.arange(len(group_keys), dtype=np.int64),
}
)
indexer = | FixedForwardWindowIndexer(window_size=window_size) | pandas.api.indexers.FixedForwardWindowIndexer |
import pandas as pd
import zipfile
import re
import collections
from lxml import etree
import pathlib
import utils
import random
docxFileName = "../resources/quicks/quick_section4.docx"
annFileName = "../resources/quicks/annotations.tsv"
### Issue a warning if either the docx Chronology or the annotations are not available:
if not pathlib.Path(docxFileName).is_file() or not pathlib.Path(annFileName).is_file():
print("\n***WARNING:***\n\nOne of the following files does not exist:")
print("* " + docxFileName)
print("* " + annFileName)
print("\nThis script will be skipped. Make sure you follow the instructions in\nhttps://github.com/Living-with-machines/station-to-station/blob/main/resources.md\nto make sure you have the files required to be able to run the linking experiments.")
print()
### Otherwise, process and parse the docx file.
else:
docxZip = zipfile.ZipFile(docxFileName)
documentXML = docxZip.read('word/document.xml')
et = etree.XML(documentXML)
ns = {'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'}
pathlib.Path('../resources/quicks/').mkdir(parents=True, exist_ok=True)
### Find main stations
mainstation = ""
lowerstation = ""
dText = dict()
counter = 0
for i, para in enumerate(et.xpath('//w:p', namespaces=ns)):
text = para.xpath('./w:r/w:t', namespaces=ns)
description = " ".join([t.text for t in text])
mainstation, counter = utils.is_mainst(para, mainstation, counter, ns)
description = description.lstrip('\x01').strip()
if description:
if (counter, mainstation) in dText:
dText[(counter, mainstation)].append(description)
else:
description = re.sub('^(' + re.escape(mainstation) + ')', '\1', description).lstrip('\x01').strip()
description = re.sub(r" +", " ", description).lstrip('\x01').strip()
if description:
dText[(counter, mainstation)] = [description]
### Index main stations
dStations = collections.OrderedDict(dText)
indices = []
stations = []
descriptions = []
for k in dStations:
indices.append(k[0])
stations.append(k[1])
descriptions.append(dStations[k])
stationdf = pd.DataFrame(columns=["Index", "Station", "Description"])
stationdf["Index"] = indices
stationdf["Station"] = stations
stationdf["Description"] = descriptions
stationdf = stationdf.set_index("Index")
### Detect substations
stations = pd.DataFrame(columns=['station','type','description'])
cols = ['MainId', 'MainStation', 'SubId', 'SubStation', 'Description']
lst = []
subInd = 0
for i, row in stationdf.iterrows():
main_station = row["Station"]
description = row["Description"]
dSubstations, subInd = utils.process_decription(main_station, description, subInd)
for ss in dSubstations:
lst.append([i, main_station, ss[0], ss[1], dSubstations[ss]])
subsdf = pd.DataFrame(lst, columns=cols)
### Renaming abbreviated substations
subsdf['SubStFormatted'] = subsdf.apply(lambda row: utils.subst_rename(row["MainStation"], row["SubStation"]), axis = 1)
subsdf = subsdf[["MainId", "SubId", "MainStation", "SubStation", "SubStFormatted", "Description"]]
subsdf.to_pickle('../resources/quicks/quicks_processed.pkl')
### Find disambiguators and companies
parsedf = subsdf.copy()
parsedf[['Disambiguator', 'Companies', 'FirstCompanyWkdt', 'AltCompaniesWkdt']] = parsedf.apply(lambda row: pd.Series(list(utils.detect_companies(row["Description"]))), axis = 1)
### Extract map information
parsedf[['LocsMaps', 'LocsMapsDescr']] = parsedf.apply(lambda row: pd.Series(list(utils.detect_mapsInfo(row["Description"]))), axis = 1)
### Extact alternate and referenced railway stations
parsedf[['Altnames', 'Referenced']] = parsedf.apply(lambda row: pd.Series(list(utils.detect_altnames(row["Description"], row["MainStation"], row["SubStFormatted"]))), axis = 1)
### Capture opening and closing dates
parsedf[['FirstOpening', 'LastClosing', 'Interrupted']] = parsedf.apply(lambda row: pd.Series(list(utils.capture_dates(row["Description"]))), axis = 1)
### Store resulting dataframe
parsedf.to_pickle('../resources/quicks/quicks_parsed.pkl')
### Drop description from dataframe for next steps:
parsedf = parsedf.drop(columns=["Description"])
### Create dev and test dataframes
# **Note:** You will need to have the `annotations.tsv` file in `resources`.
annotations = pd.read_csv(annFileName, sep='\t')
annotations = annotations[annotations["Final Wikidata ID"] != "cross_reference"]
annotations = annotations[annotations["Final Wikidata ID"] != "parsing_error"]
annotations = annotations[annotations["Final Wikidata ID"] != "unknown"]
annotations = annotations.sample(frac=1, random_state=42).reset_index(drop=True)
# Split into train and test:
queries = list(annotations.SubId.unique())
random.Random(42).shuffle(queries)
test_cutoff = int(len(queries)*.5)
train_q, test_q = queries[test_cutoff:],queries[:test_cutoff]
df_dev = annotations[annotations.SubId.isin(train_q)]
df_test = annotations[annotations.SubId.isin(test_q)]
df_test = | pd.merge(df_test, parsedf, on=["MainId", "SubId", "MainStation", "SubStation", "SubStFormatted"]) | pandas.merge |
#Library of functions called by SimpleBuildingEngine
import pandas as pd
import numpy as np
def WALLS(Btest=None):
#Building height
h_building = 2.7#[m]
h_m_building = h_building / 2
h_cl = 2.7# heigth of a storey
#number of walls
n_walls = 7
A_fl = 48
#WALLS CHARACTERISTICS
#Orientation
ori = pd.Series([('S'), ('W'), ('N'), ('E'), ('R'), ('F'), ('C')])
#Surface azimuth
surf_az = pd.Series([0, 90, 180 - 90, 0, 0, 0])
#Slopes (90:vertical; 0:horizontal)
slope = pd.Series([90, 90, 90, 90, 0, 0, 0])
#Masks
f_low_diff = pd.Series([1, 1, 1, 1, 1, 1, 1])
f_low_dir = pd.Series([1, 1, 1, 1, 1, 1, 1])
#U VALUES
U_hopw = pd.Series([0.5144, 0.5144, 0.5144, 0.5144, 0.3177, 0, 0])
U_lopw = pd.Series([3, 3, 3, 3, 3, 3, 3])
U_fr = pd.Series([2.4, 2.4, 2.4, 2.4, 2.4, 2.4, 2.4])
U_gl = pd.Series([3, 3, 3, 3, 3, 3, 3])
if (Btest == 195 or Btest == 395):
#SURFACES
#Heavy Opaque walls
A_hopw = pd.Series([21.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
elif (Btest == 200 or Btest == 210 or Btest == 230 or Btest == 240 or Btest == 250 or Btest == 400 or Btest == 410
or Btest == 420 or Btest == 430 or Btest == 800):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([12, 0, 0, 0, 0, 0, 0])
elif (Btest == 270 or Btest == 320 or Btest == 600 or Btest == 640 or Btest == 650 or Btest == 810 or Btest == 900
or Btest == 940 or Btest == 950 or Btest == 6001 or Btest == 9001 or Btest == 6501 or Btest == 9501):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([12, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
elif (Btest == 300 or Btest == 620 or Btest == 920):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 6, 0, 6, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Total
A_hopw_t = A_hopw.sum()
A_wd_t = A_wd.sum()
A_fr_t = A_fr.sum()
A_lopw_t = A_lopw.sum()
A_gl_t = max(0, A_wd_t - A_fr_t)
A_t = A_hopw_t + A_lopw_t + A_wd_t + A_fr_t
#CAPACITIES
if (Btest == 800 or Btest == 900 or Btest == 920 or Btest == 940 or Btest == 950 or Btest == 9001 or Btest == 9501):
C_hopw = ([145154, 145154, 145154, 145154, 18170, 112121, 0])
C_lopw = ([0, 0, 0, 0, 0, 0, 0])
else:
C_hopw = ([14534, 14534, 14534, 14534, 18170, 19620, 0])
C_lopw = ([0, 0, 0, 0, 0, 0, 0])
C_m = sum((A_lopw * C_lopw + A_hopw * C_hopw))
#Effective mass area [m^2]
A_m = C_m ** 2 / sum((A_lopw * np.exp2(C_lopw) + A_hopw * np.exp2(C_hopw)))
return n_walls, f_low_diff, f_low_dir, ori, surf_az, slope, A_t, A_fl, A_lopw_t, A_hopw_t, A_gl_t, A_fr_t, A_lopw,\
A_hopw, A_gl, h_cl, C_m, A_m, U_hopw, U_lopw, U_fr, U_gl
def w_t_RH(p_atm=None, t=None, RH=None):
from math import exp
#Humidity ratio as function of drybulb temperature and humidity ratio
p_w_s = exp((17.438 * t / (239.78 + t)) + 6.4147)#partial pressure of saturated water vapor
p_w = RH * p_w_s
w = (p_w * 0.62198) / (p_atm - p_w)
return w
def ZENITHANG(Lat=None, Long=None, Long_st=None, n=None, h=None):
from math import pi,cos,sin,acos
from numpy import fix
#ZENITH ANGLE
#Ref: Duffie,J.A.,<NAME>. 1980. Solar engineering of thermal
#processes. 2nd Edition. <NAME> & Sons.
#OUTPUTS
# -h_sol: Solar time (in hours)
# -h_sol_per: Solar time (in hours per day)
# -phi: Latitude in radians
# -delta: Declination angle in radians
# -omega: Hour angle in radians
# -theta_z: Zenith angle in radians, i.e. angle of incidence of beam radiation on a horizontal surface
#INPUTS
# -Lat: Latitude of the location (north positive) -90<Lat<90
# -Long: Longitude of the location (west positive) 0<Long<180
# -Long_st: Longitude of the standard meridian of the time zone
# -n: day 1<n<365
# -h: hour 1<h<8760
#Angles in radians%
phi = Lat * pi / 180
#Summer time correction (Masy, 2008)
epsilon_summer = 1
#Equation of time (minutes)
B = (n - 1) * 360 / 365 * pi / 180
E = 229.2 * (0.000075 + 0.001868 * cos(B) - 0.032077 * sin(B) - 0.014615 * cos(2 * B) - 0.04089 * sin(2 * B))
#Solar time (in hours)
h_sol = h + (4 * (Long_st - Long) + E) / 60 - epsilon_summer
#Solar time (in hours per day)
h_sol_per_1 = h_sol - 24 * fix(h_sol / 24)
if h_sol_per_1 <= 1E-6:
h_sol_per = 24
else:
h_sol_per = h_sol_per_1
#Declination (angular position of the sun at solar noon, north positive)
#-23.45<delta<23.45
delta = 23.45 * sin(360 * (284 + n) / 365 * pi / 180) * pi / 180#(daily basis, Cooper in Duffie & Beckmann)
#Hour angle (morning negative, afternoon positive)
omega = (h_sol_per - 12) * 15 * pi / 180
#Zenith angle (between the vertical and the line to the sun)
theta_z = max(1E-5, acos(cos(delta) * cos(phi) * cos(omega) + sin(delta) * sin(phi)))
return phi, delta, omega, theta_z, h_sol
def CSITH(Lat=None, Long=None, Long_st=None, n=None, h=None):
from math import cos,exp
#Clear sky solar radiation
#OUTPUTS
# -I_th_cs: Clear sky theoretical solar radiation (in W/m2)
#INPUTS
# -Lat: Latitude of the location (north positive) -90<Lat<90
# -Long: Longitude of the location (west positive) 0<Long<180
# -Long_st: Longitude of the standard meridian of the time zone
# -n: day 1<n<365
# -h: hour 1<h<8760
#Main angles and solar time for location
phi, delta, omega, theta_z, h_sol = ZENITHANG(Lat, Long, Long_st, n, h)
#Extraterrestrial radiation
G_sc = 1353#W/m2 - Solar constant
I_on = G_sc * (1 + 0.033 * cos(360 * (h_sol / 24) / 365))#Normal extraterrestrial radiation
#Atmospheric transmittance for beam radiation (altitude = 0m)
tau_b = 0.12814 + 0.7568875 * exp(-0.387225 / (cos(theta_z)))
#Clear sky beam normal radiation
I_cnb = I_on * tau_b
#Clear sky horizontal beam radiation
I_cb = I_cnb * cos(theta_z)
#Atmospheric transmittance for diffuse radiation (altitude = 0m)
tau_d = 0.271 - 0.294 * tau_b
#Clear sky horizontal diffuse radiation
I_cd = I_on * tau_d * cos(theta_z)
#Total horizontal clear sky radiation
I_th_cs = max(0, (I_cb + I_cd))
#Simplified calculation (G.Masy)
I_th_cs2 = max(0, (0.7 * I_on * cos(theta_z)))
return I_th_cs,I_th_cs2
def Btest_cases(Btest=None, h=None):
if (Btest == 195 or Btest == 200):
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 0
#infiltrations
ACH_inf = 0
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
epsilon_ir_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
epsilon_ir_gl = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Exterior natural lighting intensity for control of shadings
IAC_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Indoor solar Attenuation Coefficient (fraction of SHGC with solar shadings)
f_c_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Convective fraction of solar gains with solar shadings
#Ventilation
V_dot_vent = 0
elif Btest == 210 or Btest == 220:
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 0
#infiltrations
ACH_inf = 0
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
epsilon_ir_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
epsilon_ir_gl = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Exterior natural lighting intensity for control of shadings
IAC_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Indoor solar Attenuation Coefficient (fraction of SHGC with solar shadings)
f_c_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Convective fraction of solar gains with solar shadings
#Ventilation
V_dot_vent = 0
elif Btest == 230:
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 0
#infiltrations
ACH_inf = 1
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
epsilon_ir_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
epsilon_ir_gl = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Exterior natural lighting intensity for control of shadings
IAC_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Indoor solar Attenuation Coefficient (fraction of SHGC with solar shadings)
f_c_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Convective fraction of solar gains with solar shadings
#Ventilation
V_dot_vent = 0
elif Btest == 240:
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 200
#infiltrations
ACH_inf = 0
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
epsilon_ir_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
epsilon_ir_gl = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = | pd.Series([0, 0, 0, 0, 0, 0, 0]) | pandas.Series |
import numpy as np
import hydra
from hydra.utils import get_original_cwd
from dataset.dataloader.labeledDS import LabeledDataModule
from dataset.dataloader.unlabeledDS import UnlabeledDataModule
import os
from utils.metrics import AllMetrics
import json
from sklearn.preprocessing import StandardScaler
import warnings
from tqdm import trange
import pandas as pd
from model.SoilModel import SoilModel
from dataset.dataloader.patchDS import PatchDataModule
@hydra.main(config_path='conf', config_name='config')
def my_app(cfg):
predictions = []
targets = []
metric = AllMetrics()
if cfg.model.name == 'soilcnn':
for i in range(cfg.dataset.n_splits):
data_labeled = LabeledDataModule(path=os.path.join(get_original_cwd(), cfg.dataset.path_labeled),
features_metrical=cfg.vars.features_metrical,
features_categorical=cfg.vars.features_categorical,
levels_categorical=cfg.vars.levels_categorical,
encoding_categorical=cfg.vars.encoding_categorical,
mode='test', fold=i)
data_labeled_patch = PatchDataModule(path_lab=os.path.join(get_original_cwd(), cfg.dataset.path_labeled),
path_unlab=os.path.join(get_original_cwd(), cfg.dataset.path_unlabeled),
n=cfg.model.parameters.patch_size,
deviation_to_shrink_df=cfg.patch.parameters.deviation_to_shrink_df,
deviation_for_perfect_hit1=cfg.patch.parameters.deviation_for_perfect_hit1,
deviation_for_perfect_hit2=cfg.patch.parameters.deviation_for_perfect_hit2,
deviation_between_two_points=cfg.patch.parameters.deviation_between_two_points,
features_metrical = cfg.vars.features_metrical,
features_categorical = cfg.vars.features_categorical,
mode="test")
data_unlabeled = UnlabeledDataModule(path=os.path.join(get_original_cwd(), cfg.dataset.path_weak_labeled),
path_labeled=os.path.join(get_original_cwd(), cfg.dataset.path_labeled),
path_unlabeled=os.path.join(get_original_cwd(), cfg.dataset.path_unlabeled),
data_labeled=data_labeled,
weak_model=cfg.weak_model,
vars=cfg.vars.name,
fold=i)
model = SoilModel(cfg.model.name, cfg.model.parameters, data_labeled_patch.num_features, data_labeled_patch.num_data)
model.fit(data_labeled_patch, data_unlabeled)
pred, y = model.predict(data_labeled_patch)
metric.update(pred, y)
predictions.append(pred)
targets.append(y)
else:
for i in range(cfg.dataset.n_splits):
data_labeled = LabeledDataModule(path=os.path.join(get_original_cwd(), cfg.dataset.path_labeled),
features_metrical=cfg.vars.features_metrical,
features_categorical=cfg.vars.features_categorical,
levels_categorical=cfg.vars.levels_categorical,
encoding_categorical=cfg.vars.encoding_categorical,
mode='test', fold=i)
data_unlabeled = UnlabeledDataModule(path=os.path.join(get_original_cwd(), cfg.dataset.path_weak_labeled),
path_labeled=os.path.join(get_original_cwd(), cfg.dataset.path_labeled),
path_unlabeled=os.path.join(get_original_cwd(), cfg.dataset.path_unlabeled),
data_labeled=data_labeled,
weak_model=cfg.weak_model,
vars=cfg.vars.name,
fold=i)
data_unlabeled=None
model = SoilModel(cfg.model.name, cfg.model.parameters, data_labeled.num_features, data_labeled.num_data)
model.fit(data_labeled, data_unlabeled)
pred, y = model.predict(data_labeled)
metric.update(pred, y)
predictions.append(pred)
targets.append(y)
results = metric.calculate()
model_results = {'model': cfg.model.name}
model_results.update(results)
with open('results.json', mode='w') as file:
json.dump(model_results, file)
if cfg.general.verbose:
print(metric.calculate_string())
if cfg.general.save_predictions:
predictions = np.vstack(predictions)
targets = np.vstack(targets)
pred_target = np.hstack([targets, predictions])
columns = cfg.vars.targets + [t + "_pred" for t in cfg.vars.targets]
df = | pd.DataFrame(data=pred_target, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
df = pd.read_csv('./survey_results_public.csv')
schema = pd.read_csv('./survey_results_schema.csv')
##Categorical Variables
# Question 1
cat_df = df.select_dtypes(include=['object'])
#Question 2
cat_df_dict = {'the number of columns with no missing values': 6,
'the number of columns with more than half of the column missing': 49,
'the number of columns with more than 75% of the column missing': 13
}
#Question 3
sol_3_dict = {'Which column should you create a dummy variable for?': 'col1',
'When you use the default settings for creating dummy variables, how many are created?': 2,
'What happens with the nan values?': 'the NaNs are always encoded as 0'
}
#Question 4
#create needed dataframe
dummy_var_df = pd.DataFrame({'col1': ['a', 'a', 'b', 'b', 'a', np.nan, 'b', np.nan],
'col2': [1, np.nan, 3, np.nan, 5, 6, 7, 8]
})
#dummy cols
dummy_cols_df = pd.get_dummies(dummy_var_df['col1'], dummy_na=True)
#Question 5
cat_cols_lst = cat_df.columns
def create_dummy_df(df, cat_cols, dummy_na):
for col in cat_cols:
try:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(col, axis=1), | pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=dummy_na) | pandas.get_dummies |
# -*- coding: utf-8 -*-
# Dirichlet Mixing Module v1.2
# Implemented by <NAME>, based on original MatLab code by <NAME>.
# Mathematics described in Rudge et al.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Mean composition of melts from all lithologies
def mean_comp_total(f,w,c):
return np.sum(f*c*w)
# Import a Melts output file
def Import_Melts_output(filename,dX=0.0001,RudgeMethod=False):
"""Import a Melts csv file and recasts the melting column in terms of equal
dX increments. Converts pressure from bars to GPa.
Returns a pandas dataframe.
Parameters
----------
filename: string
Name of the file (and path relative to script) to import
dX: float
Discretization interval. Default 0.01%.
RudgeMethod: bool
Use the Rudge method for calculating melt fraction from the MELTS input.
I think this method is erroneous, but have kept it so his results may be
reproduced.
"""
meltsFile = pd.read_csv(filename,skiprows=1)
if RudgeMethod == True:
# Calculate Melt Fraction
meltsFile['F'] = (100-meltsFile.Mass)/100
# Calculate residual porosity
ResidualPorosity = meltsFile.F.iloc[0]
X = (meltsFile.F - ResidualPorosity)/(1-ResidualPorosity)
else:
X = (meltsFile.Mass[0]-meltsFile.Mass)/meltsFile.Mass[0]
# Find last X=0 term during upwelling (and first DeltaX>0 term)
# Find last melting step
MeltingBounds = [0,0]
MeltingBounds[0] = np.argmin(X[X>0]) - 1
MeltingBounds[1] = np.argmax(X)
# Make list of columns for new array
columns = ['X','P','T']
columns = columns + (meltsFile.columns[3:].tolist())
# Set up list of X values to map all other variables to
X_to_map = np.arange(X[MeltingBounds[0]],X[MeltingBounds[1]],dX)
# Create an array of zeroes with the length of the number of rows needed in the dataframe
EmptyColumns = np.array([np.zeros(np.shape(X_to_map))]*np.shape(columns)[0]).T
# Create Empty Dataframe
d = | pd.DataFrame(EmptyColumns, columns=columns) | pandas.DataFrame |
from tifffile import TiffFile
import numpy as np
import pandas as pd
import sys, hashlib, json
from scipy.ndimage.morphology import binary_dilation
from sklearn.neighbors import NearestNeighbors
from scipy.ndimage import gaussian_filter
from collections import OrderedDict
#from random import random
"""
A set of functions to help read / modify images
"""
def compare_tiff_contents(path1,path2):
"""
For two input tif image paths, see if they have the same layer structure and image descriptions
Args:
path1 (str): a path to a tif
path2 (str): a path to a tif
Returns:
result (bool): True if they are the same image False if they are not
"""
stack1 = hash_tiff_contents(path1)
stack2 = hash_tiff_contents(path2)
return stack1==stack2
def hash_tiff_contents(path):
"""
For two input tif image paths, see if they have the same layer structure and image descriptions
Args:
path (str): a path to a tif
Returns:
result (bool): True if they are the same image False if they are not
"""
stack = read_tiff_stack(path)
stack = tuple([(hashlib.sha256(x['raw_meta']['ImageDescription'].encode('utf-8')).hexdigest(),hashlib.sha256(x['raw_image'].tostring()).hexdigest()) for x in stack])
return hashlib.sha256(json.dumps(stack).encode('utf-8')).hexdigest()
def binary_image_dilation(np_array,steps=1):
"""
For an input image that gets set to 0 or 1, expand the 1's by the number of steps
Args:
np_array (numpy.array): a 2d image
steps (int): number of pixels to expand
Returns:
numpy.array: Image with that has been expanded
"""
img = make_binary_image_array(np_array)
img = binary_dilation(img,iterations=steps).astype(np.uint8)
return img
def median_id_coordinates(np_array,exclude_points=None):
"""
Locate a coordinate near the center of each object in an image
Args:
np_array (numpy.array): Take an image where pixels code for the IDs
exclude_points (list): optional. a list of tuples of 'x','y' coordinates. to exclude from being possible median outputs
Returns:
pandas.DataFrame: DataFrame indexed by ID with a near median 'x', and median 'y' for that ID
"""
nids = map_image_ids(np_array)
if exclude_points is not None:
exclude_points = pd.DataFrame(exclude_points,columns=['x','y'])
exclude_points['exclude'] = 'Yes'
nids = nids.merge(exclude_points,on=['x','y'],how='left')
nids = nids.loc[nids['exclude'].isna()].drop(columns='exclude')
# Get the median of the x dimension
ngroup = nids.groupby('id').apply(lambda x: pd.Series({'x':list(x['x'])}))
ngroup['median_x'] = ngroup['x'].apply(lambda x: np.quantile(x,0.5,interpolation='nearest'))
nids = nids.merge(ngroup[['median_x']],left_on='id',right_index=True)
# Subset to y values that fall on that x median
nids = nids.loc[nids['x']==nids['median_x']]
ngroup = nids.groupby('id').apply(lambda x: pd.Series({'x':list(x['x']),'y':list(x['y'])}))
nmedian = ngroup.applymap(lambda x: np.quantile(x,0.5,interpolation='nearest'))
return nmedian
def watershed_image(np_array,starting_points,valid_target_points,steps=1,border=1,fill_value=1,border_fill_value=0):
"""
A function for expanding a set of pixels in an image from starting_points and into valid_target_points.
Args:
np_array (numpy.array): A 2d array of the image where comprised of integer values
starting_points (list): a list of (x,y) tuples to begin filling out from. the values of these points
valid_target_points (list): a list of (x,y) tuples of valid locations to expand into
steps (int): the number of times to execute the watershed
border (int): the distance to remain away from the edge of the image
fill_value (int): The integer value to fill the area in with
border_fill_value (int): The value to fill the border area in with
Returns:
numpy.array: the image with the watershed executed
"""
output = np_array.copy()
if len(valid_target_points) > 0 and len(starting_points) > 0:
nn = NearestNeighbors(n_neighbors=1,radius=steps).\
fit(starting_points).\
radius_neighbors(valid_target_points,radius=steps)
for i,v in enumerate(nn[0]):
if len(v) == 0: continue
output[valid_target_points[i][1],valid_target_points[i][0]] = fill_value
output = _fill_borders(output,border,fill_value=border_fill_value)
return output
def _fill_borders(img,border_size_px,fill_value):
if border_size_px == 0: return img.copy()
_temp = pd.DataFrame(img.copy())
_temp.iloc[0:,0:border_size_px] = fill_value
_temp.iloc[0:border_size_px,0:] = fill_value
_temp.iloc[-1*border_size_px:,0:] = fill_value
_temp.iloc[0:,-1*border_size_px:] = fill_value
return np.array(_temp)
def split_color_image_array(np_array):
if len(np_array.shape) == 2: return [np_array]
images = []
for i in range(0,np_array.shape[2]):
image = np.array([[y[0] for y in x] for x in np_array])
images.append(image)
return np.array(images)
def make_binary_image_array(np_array):
"""
Make a binary (one channel) image from a drawn color image
Args:
np_array (numpy.array) a numpy array that came from a color image
Returns:
numpy.array: an array that is 1 where something (anything) existed vs 0 where there was nothing
"""
np_array = np.nan_to_num(np_array)
if len(np_array.shape) == 2: return np.array([[1 if y > 0 else 0 for y in x] for x in np_array])
return np.array([[1 if np.nanmax([z for z in y]) > 0 else 0 for y in x] for x in np_array]).astype(np.int8)
def read_tiff_stack(filename):
"""
Read in a tiff filestack into individual images and their metadata
Args:
filename (str): a path to a tiff file
Returns:
list: a list of dictionary entries keyed by 'raw_meta' and 'raw_image' for each image in the tiff stack
"""
data = []
with TiffFile(filename) as tif:
image_stack = tif.asarray()
for page in tif.pages:
meta = dict((tag.name,tag.value) for tag in page.tags.values())
data.append({'raw_meta':meta,'raw_image':np.array(page.asarray())})
return data
def flood_fill(image,x,y,exit_criteria,max_depth=1000,recursion=0,visited=None,border_trim=0):
"""
There is a flood_fill in scikit-image 0.15.dev0, but it is not faster than this
for this application. It may be good to revisit skikit's implemention if it is optimized.
Args:
image (numpy.array): a 2d numpy array image
x (int): x starting coordinate
y (int): y starting coordinate
exit_criteria (function): a function for which to exit i.e. ``lambda x: x!=0``
max_depth (int): a maximum recurssion depth
recursion (int): not set by user, used to keep track of recursion depth
visited (list): list of (x,y) tuple representing coordinates that have been visited
border_trim (int): the size of the border to avoid on the edge
Returns:
numpy.array: the filled image
"""
# return a list of coordinates we fill without visiting twice or hitting an exit condition
if visited is None: visited = set()
if len(visited)>=max_depth: return visited
if recursion > 1000: return visited
if y < 0+border_trim or y >= image.shape[0]-border_trim: return visited
if x < 0+border_trim or x >= image.shape[1]-border_trim: return visited
if (x,y) in visited: return visited
if exit_criteria(image[y][x]):
return visited
visited.add((x,y))
# traverse deeper
if (x,y+1) not in visited:
visited = flood_fill(image,x,y+1,exit_criteria,max_depth=max_depth,recursion=recursion+1,visited=visited,border_trim=border_trim)
if (x+1,y) not in visited:
visited = flood_fill(image,x+1,y,exit_criteria,max_depth=max_depth,recursion=recursion+1,visited=visited,border_trim=border_trim)
if (x,y-1) not in visited:
visited = flood_fill(image,x,y-1,exit_criteria,max_depth=max_depth,recursion=recursion+1,visited=visited,border_trim=border_trim)
if (x-1,y) not in visited:
visited = flood_fill(image,x-1,y,exit_criteria,max_depth=max_depth,recursion=recursion+1,visited=visited,border_trim=border_trim)
return visited
def map_image_ids(image,remove_zero=True):
"""
Convert an image into a list of coordinates and the id (coded by pixel integer value)
Args:
image (numpy.array): A numpy 2d array with the integer values representing cell IDs
remove_zero (bool): If True (default), remove all zero pixels
Returns:
pandas.DataFrame: A pandas dataframe with columns shaped as <x><y><id>
"""
nmap = pd.DataFrame(image.astype(float)).stack().reset_index().\
rename(columns={'level_0':'y','level_1':'x',0:'id'})
nmap.loc[~np.isfinite(nmap['id']),'id'] = 0
if remove_zero: nmap = nmap[nmap['id']!=0].copy()
nmap['id'] = nmap['id'].astype(int)
return nmap[['x','y','id']]
def _test_edge(image,x,y,myid):
for x_iter in [-1,0,1]:
xcoord = x+x_iter
if xcoord >= image.shape[1]-1: continue
for y_iter in [-1,0,1]:
ycoord = y+y_iter
if x_iter == 0 and y_iter==0: continue
if xcoord <= 0 or ycoord <=0: continue
if ycoord >= image.shape[0]-1: continue
if image[ycoord][xcoord] != myid: return True
return False
def image_edges(image,verbose=False):
"""
Take an image of cells where pixel intensitiy integer values represent cell ids
(fully filled-in) and return just the edges
Args:
image (numpy.array): A 2d numpy array of integers coding for cell IDs
verbose (bool): If true output more details to stderr
Returns:
numpy.array: an output image of just edges
"""
if verbose: sys.stderr.write("Making dataframe of possible neighbors.\n")
cmap = map_image_ids(image)
edge_image = np.zeros(image.shape)
if verbose: sys.stderr.write("Testing for edge.\n")
# cmap
#print(cmap.head())
mod = | pd.DataFrame({'mod':[-1,0,1]}) | pandas.DataFrame |
import pandas as pd
import datetime as dt
from scipy.interpolate import interp1d
from trios.utils.sunposition import sunpos
from trios.config import *
class awr_data:
'''
Above-water radiometry
'''
def __init__(self, idpr=None, files=None, Edf=None, Lskyf=None, Ltf=None):
# ''' get file names for Ed, Lsky and Lt data'''
if not files is None:
self.file = list(filter(lambda x: 'idpr' + idpr in x, files))
file = self.file
self.Edf = list(filter(lambda x: 'Ed' in x, file))
self.Lskyf = list(filter(lambda x: 'Lsky' in x, file))
self.Ltf = list(filter(lambda x: 'Lt' in x, file))
elif all(v is not None for v in [Edf, Lskyf, Ltf]):
self.Edf = Edf
self.Lskyf = Lskyf
self.Ltf = Ltf
else:
raise SyntaxError('ERROR: must specify `files` or `(Edf, Lskyf, Ltf)` variables')
self.idpr = idpr
def reader(self, lat, lon, alt=0, name='', index_idx=[0], utc_conv=0, file_format='csv'):
'''
Read above-water data files for a given acquisition series (idpr),
merge the different data types:
- by interpolating over wavelengths on a common band set (from those of Lt sensor)
- by searching the nearest neighbor in time
compute solar zenith angle
return full data frame
:param Edf: file path of irradiance data
:param Lskyf: file pat of sky radiance data
:param Ltf: file path of water radiance data
:param lat: latitude (decimal)
:param lon: longitude (decimal)
:param alt: altitude (m)
:param idpr: ID of the acquisition series
:param utc_conv: decimal hours added to convert local time into UTC
:return:
'''
# ''' read files with pandas format '''
d = data(index_idx, file_type=file_format)
Ed, wl_Ed = d.load_file(self.Edf, utc_conv=utc_conv)
Lsky, wl_Lsky = d.load_file(self.Lskyf, utc_conv=utc_conv)
Lt, wl_Lt = d.load_file(self.Ltf, utc_conv=utc_conv)
# ''' interpolate Ed, Lt and Lsky data upon common wavelength'''
wl = wl_common
intEd = interp1d(wl_Ed, Ed.values, fill_value='extrapolate')(wl)
newEd = pd.DataFrame(index=Ed.index, columns=pd.MultiIndex.from_tuples(zip(['Ed'] * len(wl), wl),
names=['param', 'wl']), data=intEd)
intLt = interp1d(wl_Lt, Lt.values, fill_value='extrapolate')(wl)
newLt = pd.DataFrame(index=Lt.index, columns=pd.MultiIndex.from_tuples(zip(['Lt'] * len(wl), wl),
names=['param', 'wl']), data=intLt)
intLsky = interp1d(wl_Lsky, Lsky.values, fill_value='extrapolate')(wl)
newLsky = pd.DataFrame(index=Lsky.index, columns=pd.MultiIndex.from_tuples(zip(['Lsky'] * len(wl), wl),
names=['param', 'wl']), data=intLsky)
# merge sensor data on time
df = pd.merge_asof(newLt, newEd, left_index=True, right_index=True, tolerance=pd.Timedelta("2 seconds"),
direction="nearest")
df = pd.merge_asof(df, newLsky, left_index=True, right_index=True, tolerance=pd.Timedelta("2 seconds"),
direction="nearest")
# Convert to UTC time
df.index = df.index # + pd.Timedelta(hours=3)
# add solar angle data and idpr
# compute solar angle (mean between fisrt and last aqcuisition time
df['sza', ''] = np.nan
for index, row in df.iterrows():
# print index
sza = sunpos(index, lat, lon, alt)[1]
df.at[index, 'sza'] = sza
df['idpr', ''] = self.idpr
df['name', ''] = name
return df, wl
class iwr_data:
'''
In-water radiometry
'''
def __init__(self, idpr, files):
# ''' get file names for Ed, Lsky and Lt data'''
self.file = list(filter(lambda x: 'idpr' + idpr in x, files))
file = self.file
self.Edf = list(filter(lambda x: 'Ed_' in x, file))
self.Edzf = list(filter(lambda x: 'Edz' in x, file))
self.Luzf = list(filter(lambda x: 'Luz' in x, file))
self.idpr = idpr
def reader(self, lat, lon, alt=0, name='', delta_Lu_depth=0, delta_Edz_depth=0):
'''
Read above-water data files for a given acquisition series (idpr),
merge the different data types:
- by interpolating over wavelengths on a common band set (from those of Lt sensor)
- by searching the nearest neighbor in time
compute solar zenith angle
return full data frame
:param Edf: file path of irradiance data
:param Edzf: file path of downward in-water irradiance data
:param Luzf: file path of upward in-water radiance data
:param lat: latitude (decimal)
:param lon: longitude (decimal)
:param alt: altitude (m)
:param delta_Lu_depth: adjustment of actual depth for Lu sensor (distance from depth sensor);
in meters for depth counted positively
:param delta_Edz_depth: similar to delta_Lu_depth for Edz sensor
:param idpr: ID of the acquisition series
:return:
'''
# ''' read files with pandas format '''
d = data([1, 0])
Ed, wl_Ed = d.load_csv(self.Edf)
Edz, wl_Edz = d.load_csv(self.Edzf)
Luz, wl_Luz = d.load_csv(self.Luzf)
# mask negative values TODO save number of discarded data
Ed[Ed < 0] = 0 # .mask(Ed<0,inplace=True)
Edz[Edz < 0] = 0 # .mask(Edz<0,inplace=True)
Luz[Luz < 0] = 0 # .mask(Luz<0,inplace=True)
# copy depth data to Ed frame on date index
# Ed.index = Ed.index.droplevel(level=1)
# ''' interpolate Ed, Edz and Luz data upon common wavelength'''
wl = wl_common
intEd = interp1d(wl_Ed, Ed.values, fill_value='extrapolate')(wl)
newEd = pd.DataFrame(index=Ed.index.get_level_values(0),
columns=pd.MultiIndex.from_tuples(list(zip(['Ed'] * len(wl), wl)), names=['param', 'wl']),
data=intEd)
intEdz = interp1d(wl_Edz, Edz.values, fill_value='extrapolate')(wl)
newEdz = pd.DataFrame(index=Edz.index, columns=pd.MultiIndex.from_tuples(list(zip(['Edz'] * len(wl), wl)),
names=['param', 'wl']), data=intEdz)
intLuz = interp1d(wl_Luz, Luz.values, fill_value='extrapolate')(wl)
newLuz = pd.DataFrame(index=Luz.index, columns=pd.MultiIndex.from_tuples(list(zip(['Luz'] * len(wl), wl)),
names=['param', 'wl']), data=intLuz)
print('read merge ok')
# correct depth data for sensor to sensor distance
newLuz.reset_index(level=1, inplace=True)
newLuz.iloc[:, 0] = newLuz.iloc[:, 0] + delta_Lu_depth
# newEd.reset_index(level=1,inplace=True)
newEdz.reset_index(level=1, inplace=True)
newEdz.iloc[:, 0] = newEdz.iloc[:, 0] + delta_Edz_depth
# merge sensor data on time
df = pd.merge_asof(newLuz, newEd, left_index=True, right_index=True, tolerance=pd.Timedelta("2 seconds"),
direction="nearest")
df = pd.merge_asof(df, newEdz, left_index=True, right_index=True, suffixes=('_Luz', '_Edz'),
tolerance=pd.Timedelta("2 seconds"),
direction="nearest") # by="depth",
# add solar angle data and idpr
# compute solar angle (mean between fisrt and last acquisition time
df['sza', ''] = np.nan
for index, row in df.iterrows():
# print index
sza = sunpos(index, lat, lon, alt)[1]
df.at[index, 'sza'] = sza
df['idpr', ''] = self.idpr
df['name', ''] = name
return df, wl
# def load_csv(self, file):
#
# dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
# if len(file) > 1:
# print('Warning! Multiple files found but only one expected, trios first file of the list:')
# print(file)
# file = file[0]
# df = pd.read_csv(file, sep=';', index_col=[1, 0], na_values=['-NAN'])
# df = df.dropna(axis=1, how='all').dropna(axis=0, how='all')
# df.index = df.index.set_levels([pd.to_datetime(df.index.levels[0]), df.index.levels[1]])
# df.columns = df.columns.astype('float') # str.extract('(\d+)',expand=False).astype('float')
# # resort to get data in increasing time order
# df.sort_index(inplace=True, level=0)
# wl = df.columns
#
# return df, wl
class swr_data:
'''
Surface-water radiometry
'''
def __init__(self, idpr, files):
# ''' get file names for Ed, Lsky and Lt data'''
self.file = list(filter(lambda x: 'idpr' + idpr in x, files))
file = self.file
self.Edf = list(filter(lambda x: '_Ed' in x, file))
self.Lu0f = list(filter(lambda x: '_Lu0+' in x, file))
self.idpr = idpr
def reader(self, lat=None, lon=None, alt=0):
'''
Read above-water data files for a given acquisition series (idpr),
merge the different data types:
- by interpolating over wavelengths on a common band set (from those of Lt sensor)
- by searching the nearest neighbor in time
compute solar zenith angle
return full data frame
:param Edf: file path of irradiance data
:param Lu0f: file path of upward in-water radiance data
:param lat: latitude (decimal)
:param lon: longitude (decimal)
:param alt: altitude (m)
:param idpr: ID of the acquisition series
:return:
'''
df = pd.DataFrame()
# ''' read files with pandas format '''
Ed, wl_Ed = data().load_csv(self.Edf)
Lu0, wl_Lu0 = data().load_csv(self.Lu0f)
# ''' interpolate Ed and Lsky data upon common wavelengths'''
wl = wl_common
intEd = interp1d(wl_Ed, Ed.values, fill_value='extrapolate')(wl)
newEd = pd.DataFrame(index=Ed.index,
columns=pd.MultiIndex.from_tuples(zip(['Ed'] * len(wl), wl), names=['param', 'wl']),
data=intEd)
intLu0 = interp1d(wl_Lu0, Lu0.values, fill_value='extrapolate')(wl)
newLu0 = pd.DataFrame(index=Lu0.index, columns=pd.MultiIndex.from_tuples(zip(['Lu0+'] * len(wl), wl),
names=['param', 'wl']), data=intLu0)
# merge sensor data on time
df = pd.merge_asof(newLu0, newEd, left_index=True, right_index=True, tolerance= | pd.Timedelta("2 seconds") | pandas.Timedelta |
from collections import namedtuple
from datetime import datetime as dt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from speculator.features.OBV import OBV
from speculator.features.RSI import RSI
from speculator.features.SMA import SMA
from speculator.features.SO import SO
import speculator.models.random_forest as rf
import speculator.models.deep_neural_network as dnn
from speculator.utils import date
from speculator.utils import poloniex
# Enum-like object with 1:1 mapping. Converts a readable
# market trend like 'bearish' to an int that is easier to parse.
TARGET_CODES = {'bearish': 0, 'neutral': 1, 'bullish': 2}
class Market:
""" Evaluates TA indicators of a market
Gets data from a market and then calculates technical analysis features.
It also prepares this data to be fed into machine learning interfaces
by creating a pandas DataFrame, and generating training/test sets
for features (x-axis) and the target market trend (y-axis).
Attributes:
symbol: String of currency pair, like a ticker symbol.
unit: String of time period unit for count argument.
How far back to check historical market data.
valid values: 'hour', 'day', 'week', 'month', 'year'
count: Int of units.
How far back to check historical market data.
period: Int defining width of each chart candlestick in seconds.
Valid values: 300, 900, 1800, 7200, 14400, 86400.
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
"""
def __init__(self, json=None, symbol='USDT_BTC', unit='month', count=6, period=86400):
""" Inits market class of symbol with data going back count units """
self.symbol = symbol
self.unit = unit
self.count = count
self.period = period
if json is None:
self.json = self.get_json()
else:
self.json = json
def get_json(self):
""" Gets market chart data from today to a previous date """
today = dt.now()
DIRECTION = 'last'
epochs = date.get_end_start_epochs(today.year, today.month, today.day,
DIRECTION, self.unit, self.count)
return poloniex.chart_json(epochs['shifted'], epochs['initial'],
self.period, self.symbol)[0]
def set_features(self, partition=1):
""" Parses market data JSON for technical analysis indicators
Args:
partition: Int of how many dates to take into consideration
when evaluating technical analysis indicators.
Returns:
Pandas DataFrame instance with columns as numpy.float32 features.
"""
if len(self.json) < partition + 1:
raise ValueError('Not enough dates for the specified partition size: {0}. Try a smaller partition.'.format(partition))
data = []
for offset in range(len(self.json) - partition):
json = self.json[offset : offset + partition]
data.append(eval_features(json))
return | pd.DataFrame(data=data, dtype=np.float32) | pandas.DataFrame |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import re
import argparse
import json
import logging
import requests
from copy import deepcopy
import pandas as pd
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def parse_args():
"""
parse input args
"""
parser = argparse.ArgumentParser()
# for local excel analysis
parser.add_argument(
"--log_file", type=str, default="result.log", help="ci result log path")
return parser.parse_args()
def _find_char(input_char):
"""
find english char in input string
"""
result = re.findall(r'[a-zA-Z=_/0-9.]+', str(input_char))
return result
def process_log(file_name: str):
"""
process log
"""
train_list_dict = []
export_list_dict = []
predict_det_list_dict = []
with open(file_name, 'r') as f:
for i, data in enumerate(f.readlines()):
# print(i, data)
train_dict = {}
if "train.py" in data:
split_data = data.split(' ')
for line_value in split_data:
if "=" in line_value:
key = _find_char(line_value.split('=')[0])
value = _find_char(line_value.split('=')[-1])
# print(key, value)
train_dict[key[0]] = ''.join(value)
if "successfully" in split_data:
train_dict["status"] = "passed"
else:
train_dict["status"] = "failed"
# print(train_dict)
train_list_dict.append(train_dict)
export_dict = {}
if "export_model.py" in data:
split_data = data.split(' ')
for line_value in split_data:
if "=" in line_value:
key = _find_char(line_value.split('=')[0])
value = _find_char(line_value.split('=')[-1])
# print(key, value)
export_dict[key[0]] = ''.join(value)
if "successfully" in split_data:
export_dict["status"] = "passed"
else:
export_dict["status"] = "failed"
# print(export_dict)
export_list_dict.append(export_dict)
predict_det_dict = {}
if "predict_det.py" in data:
split_data = data.split(' ')
for line_value in split_data:
if "=" in line_value:
key = _find_char(line_value.split('=')[0])
value = _find_char(line_value.split('=')[-1])
# print(key, value)
predict_det_dict[key[0]] = ''.join(value)
if "successfully" in split_data:
predict_det_dict["status"] = "passed"
else:
predict_det_dict["status"] = "failed"
# print(predict_det_dict)
predict_det_list_dict.append(predict_det_dict)
return train_list_dict, export_list_dict, predict_det_list_dict
def main():
"""
main
"""
args = parse_args()
a, b, c = process_log(args.log_file)
a_1 = pd.DataFrame(a)
b_1 = pd.DataFrame(b)
c_1 = | pd.DataFrame(c) | pandas.DataFrame |
"""
Train a neural network for regression task:
cv: 10
batch size: 8
initializer: He normal initializer
optimizer: AdamMax
learning rate: 0.0004
loss: RMSE
Calculate RMSE at once, Oct. 3, 2020 revised
"""
import argparse
import numpy as np
import pandas as pd
import scipy.stats as scistat
from datetime import datetime
import sklearn.preprocessing as skpre
import sklearn.model_selection as skms
import sklearn.metrics as skmts
import sklearn.utils as skut
import torch as tch
import torch.utils.data as tchud
import myModel as mynet
import myFit as myfit
import myDataloader as mydl
import myDatasplit as mysplit
import myUtility as myutil
import myPlotter as myplot
import myMetrics as mymts
import shap as sp
class RMSELoss(tch.nn.Module):
def __init__(self):
super(RMSELoss,self).__init__()
def forward(self,x,y):
eps = 1e-6
criterion = tch.nn.MSELoss()
loss = tch.sqrt(criterion(x, y) + eps)
return loss
def fit(net, train_dl, valid_dl, epochs, learning_rate, device, opt_fn):
"""
Return train and valid performance including loss
:param net: model
:param train_dl: train dataloader
:param valid_dl: valid dataloader
:param epochs: integer representing EPOCH
:param learning_rate: float representing LEARNING_RATE
:param device: string representing cpu or cuda:0
:param opt_fn: optimization function in torch (e.g., tch.optim.Adam)
:param loss_fn: loss function in torch (e.g., tch.nn.MSELoss)
"""
# setup
criterion = RMSELoss() # setup LOSS function
optimizer = opt_fn(net.parameters(), lr=learning_rate, weight_decay=1e-5) # setup optimizer
net = net.to(device) # load the network onto the device
trainloss_list = [] # metrics: MSE, size equals to EPOCH
validloss_list = [] # metrics: MSE, size equals to EPOCH
early_stopping = myutil.EarlyStopping(patience=30, verbose=True) # initialize the early_stopping
# repeat the training for EPOCH times
for epoch in range(epochs):
## training phase
net.train()
# initial loss
train_epoch_loss = 0.0 # save loss for each epoch, batch by batch
for i, (X_train, y_train) in enumerate(train_dl):
X_train, y_train = X_train.to(device), y_train.to(device) # load data onto the device
y_train_pred = net(X_train) # train result
train_loss = criterion(y_train_pred, y_train.float()) # calculate loss
optimizer.zero_grad() # clear gradients
train_loss.backward() # backpropagation
#### add this if you have gradient explosion problem ###
clip_value = 5
tch.nn.utils.clip_grad_value_(net.parameters(), clip_value)
########climp gradient within -5 ~ 5 ###################
optimizer.step() # update weights
train_epoch_loss += train_loss.item() # adding loss from each batch
# calculate total loss of all batches
avg_train_loss = train_epoch_loss / len(train_dl)
trainloss_list.append( avg_train_loss )
## validation phase
with tch.no_grad():
net.eval()
valid_epoch_loss = 0.0 # save loss for each epoch, batch by batch
for i, (X_valid, y_valid) in enumerate(valid_dl):
X_valid, y_valid = X_valid.to(device), y_valid.to(device) # load data onto the device
y_valid_pred = net(X_valid) # valid result
valid_loss = criterion(y_valid_pred, y_valid.float())#y_valid.unsqueeze(1)) # calculate loss
valid_epoch_loss += valid_loss.item() # adding loss from each batch
# calculate total loss of all batches, and append to result list
avg_valid_loss = valid_epoch_loss / len(valid_dl)
validloss_list.append( avg_valid_loss)
# display print message
#print('epoch={:}/{:}, train loss={:.5f}, valid loss={:.5f}'.format(
# epoch+1, epochs, train_epoch_loss / len(train_dl),
# valid_epoch_loss / len(valid_dl)))
# early_stopping needs the validation loss to check if it has decresed,
# and if it has, it will make a checkpoint of the current model
early_stopping(avg_valid_loss, net)
if early_stopping.early_stop:
print("Early stopping")
break
# load the last checkpoint with the best model
net.load_state_dict(tch.load('checkpoint.pt'))
return net, trainloss_list, validloss_list
def predict(net, test_dl, device):
"""
Return prediction list
:param net: model
:param train_dl: train dataloader
:param device: string representing cpu or cuda:0
"""
# create result lists
prediction_list = list()
with tch.no_grad():
net = net.to(device) # load the network onto the device
net.eval()
for i, (X_test, y_test) in enumerate(test_dl):
X_test, y_test = X_test.to(device), y_test.to(device) # load data onto the device
y_test_pred = net(X_test) # test result
# bring data back to cpu in np.array format, and append to result lists
prediction_list.append( y_test_pred.cpu().numpy() )
#print(prediction_list)
# merge all batches
prediction_list = np.vstack(prediction_list)
prediction_list = np.hstack(prediction_list).tolist()
# return
return prediction_list
# define arguments
def parse_parameter():
parser = argparse.ArgumentParser(description = "Train a feedforward")
parser.add_argument("-i", "--input_path",
required = True,
help = "input path")
parser.add_argument("-s", "--seed_int",
required = False,
default = 42,
type = int,
help = "seed for reproducibility. default=42")
parser.add_argument("-c", "--cv_int",
required = False,
default = 10,
type = int,
help = "K fold cross validation. default=10")
parser.add_argument("-g", "--gpu_int",
default = 0,
type = int,
help = "assign the n-th GPU")
parser.add_argument("-shap", "--shap_bool",
default = False,
type = bool,
help = "enable SHAP if True")
parser.add_argument("-o", "--output_path",
required = True,
help = "output path")
return parser.parse_args()
if __name__ == "__main__":
start_time = datetime.now()
# get args
args = parse_parameter()
# load data
df = pd.read_csv(args.input_path, header=0, index_col=[0,1], sep="\t")
# shuffle
sdf = skut.shuffle(df, random_state=args.seed_int)
# set parameters
myutil.set_seed(args.seed_int)
device = myutil.get_device(uth=args.gpu_int)
kFold = args.cv_int
learning_rate = 0.0004
epoch = 800
batch_size = 12
opt_fn = tch.optim.Adam
# create result list
loss_df_list = []
score_df_list = []
ytest_df_list = []
shap_df_list = []
# train with cross-validation
kf = skms.KFold(n_splits=kFold, random_state=args.seed_int, shuffle=True)
X_df = sdf.iloc[:, 0:-1]
y_df = sdf.iloc[:, -1]
# save best model with lowest RMSE
best_rmse = 10000
best_model = None
best_fold = 0
for i, (train_index, test_index) in enumerate(kf.split(X_df, y_df)):
n_fold = i+1
print('Fold={:}/{:}'.format(n_fold, args.cv_int))
# get train/test splits
Xtrain_arr = X_df.values[train_index]
Xtest_arr = X_df.values[test_index]
ytrain_arr = y_df.values[train_index]
ytest_arr = y_df.values[test_index]
# get train/valid splits from train
Xtrain_arr, Xvalid_arr, ytrain_arr, yvalid_arr = skms.train_test_split(Xtrain_arr, ytrain_arr,
test_size=0.1, random_state=args.seed_int)
print(' train={:}, valid={:}, test={:}'.format(Xtrain_arr.shape, Xvalid_arr.shape, Xtest_arr.shape))
# prepare dataframe for output
ytest_df = y_df.iloc[test_index].to_frame()
# convert to numpy array
Xtrain_arr = np.array(Xtrain_arr).astype('float32')
Xvalid_arr = np.array(Xvalid_arr).astype('float32')
Xtest_arr = np.array(Xtest_arr).astype('float32')
ytrain_arr = np.array(ytrain_arr).astype('float32')
yvalid_arr = np.array(yvalid_arr).astype('float32')
ytest_arr = np.array(ytest_arr).astype('float32')
# create mini-batch
train_dataset = mydl.NumpyDataset(tch.from_numpy(Xtrain_arr), tch.from_numpy(ytrain_arr))
valid_dataset = mydl.NumpyDataset(tch.from_numpy(Xvalid_arr), tch.from_numpy(yvalid_arr))
test_dataset = mydl.NumpyDataset(tch.from_numpy(Xtest_arr), tch.from_numpy(ytest_arr))
train_dl = tchud.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
valid_dl = tchud.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
test_dl = tchud.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# initial weight
def init_weights(m):
if type(m) == tch.nn.Linear:
tch.nn.init.kaiming_uniform_(m.weight)
m.bias.data.fill_(0.01)
# load model
n_features = Xtrain_arr.shape[1]
net = mynet.FNN(n_features)
net.apply(init_weights)
# fit data with model
trained_net, train_loss_list, valid_loss_list = fit(net, train_dl, valid_dl, epoch, learning_rate, device, opt_fn)
prediction_list = predict(trained_net, test_dl, device)
# evaluation metrics
mse = skmts.mean_squared_error(ytest_arr, prediction_list)
rmse = np.sqrt(mse)
if rmse <= best_rmse:
best_rmse = rmse
best_fold = n_fold
best_model = trained_net
print('best model so far at fold={:}, rmse={:}'.format(best_fold, best_rmse))
if args.shap_bool == True:
print('calculate shapely values')
# random select 100 samples as baseline
train_dataset = mydl.NumpyDataset(tch.from_numpy(Xtrain_arr), tch.from_numpy(ytrain_arr))
train_dl = tchud.DataLoader(train_dataset, batch_size=200, shuffle=True)
background, lbl = next(iter(train_dl))
explainer = sp.DeepExplainer(trained_net, background[:100].to(device))
shap_arr = explainer.shap_values(tch.from_numpy(Xtest_arr))
shap_df = pd.DataFrame(shap_arr, index=ytest_df.index, columns=X_df.columns)
# append to result
shap_df_list.append(shap_df)
# collect result
loss_df = pd.DataFrame({'fold':[n_fold]*len(train_loss_list),
'epoch':[i+1 for i in range(len(train_loss_list))],
'train loss':train_loss_list,
'valid loss': valid_loss_list})
ytest_df['prediction'] = prediction_list
ytest_df['fold'] = n_fold
loss_df_list.append(loss_df)
ytest_df_list.append(ytest_df)
# end of fold
trained_net = None
# save to output
all_ytest_df = pd.concat(ytest_df_list, axis=0)
all_loss_df = | pd.concat(loss_df_list, axis=0) | pandas.concat |
import sys
import subprocess
import os
import pandas as pd
def get_repo_root():
"""Get the root directory of the repo."""
dir_in_repo = os.path.dirname(os.path.abspath('__file__'))
return subprocess.check_output('git rev-parse --show-toplevel'.split(),
cwd=dir_in_repo,
universal_newlines=True).rstrip()
ROOT_dir = get_repo_root()
sys.path.append(ROOT_dir)
sys.path.insert(0, ROOT_dir + '/lib')
import lib.gs_model as gs_model
import time
import json
import lib.validation as validation
class RegionParaGenerate:
def __init__(self, res=None, region=None, rg=None, visits=None):
self.res = res
self.region = region
self.rg = rg
self.visits = visits
def region_data_load(self, type='calibration'):
if '-' not in self.region:
self.res = ROOT_dir + '/dbs/' + self.region + '/visits/' + type + '.csv'
else:
self.res = ROOT_dir + '/dbs/sweden/visits/' + self.region.split('-')[1] + '_' + type + '.csv'
rg_ = gs_model.RegionDataPrep(region=self.region)
rg_.load_zones_odm()
rg_.load_geotweets(type=type)
rg_.kl_baseline_compute()
self.rg = rg_
self.visits = gs_model.VisitsGeneration(region=self.region, bbox=self.rg.bbox,
zones=self.rg.zones, odm=self.rg.gt_odm,
distances=self.rg.distances,
distance_quantiles=self.rg.distance_quantiles, gt_dms=self.rg.dms)
def visits_gen_by_days(self, type='calibration', p=None, gamma=None, beta=None, days=None):
if type == 'calibration':
tweets = self.rg.tweets_calibration
else:
tweets = self.rg.tweets_validation
# userid as index for visits_total
visits_total = self.visits.visits_gen(tweets, p, gamma, beta,
days=days, homelocations=self.rg.home_locations)
dms, _, _ = self.visits.visits2measure(visits=visits_total, home_locations=self.rg.home_locations)
kl = validation.DistanceMetrics().kullback_leibler(dms, titles=['groundtruth', 'model'])
print("D=", days, " kl=", kl)
return kl
if __name__ == '__main__':
file = ROOT_dir + '/results/para-search-r1/parasearch.txt'
list_lines = []
with open(file) as f:
for jsonObj in f:
line = json.loads(jsonObj)
list_lines.append(line)
df = pd.DataFrame(list_lines)
list_df_res = []
for region2compute in ['sweden', 'netherlands', 'saopaulo']:
# Start timing the code
start_time = time.time()
dc = df.loc[df['region'] == region2compute, ['p', 'beta', 'gamma']].to_dict('records')[0]
# Prepare region data by initiating the class
gs = RegionParaGenerate(region=region2compute)
tp = 'calibration'
gs.region_data_load(type=tp)
list_kl = [gs.visits_gen_by_days(type=tp, p=dc['p'], gamma=dc['gamma'], beta=dc['beta'], days=day) for day in
[1, 5] + [x*10 for x in range(1, 31)]]
df_res = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Extract a COCO captions dataframe from the annotation files."""
from __future__ import print_function
import os
import sys
import argparse
import pandas as pd
def main(args):
"""Extract a COCO captions dataframe from the annotation files."""
# Load coco library
sys.path.append(args.coco_path + '/PythonAPI')
from pycocotools.coco import COCO
set_2014 = ['val2014', 'train2014']
set_2017 = ['val2017', 'train2017']
# Make dataframe to store captions in
cocoDF = pd.DataFrame(columns=['id', 'set', 'filename', 'caption'])
for st in set_2014 + set_2017:
print('\nProcessing {}'.format(st))
# Instantiate coco classes
coco = COCO(args.coco_path +
'annotations/instances_{}.json'.format(st))
coco_anns = COCO(args.coco_path +
'annotations/captions_{}.json'.format(st))
# Get Categories
cats = coco.loadCats(coco.getCatIds())
# Get unique image ids
imgIds = []
for cat in cats:
imgId = coco.getImgIds(catIds=cat['id'])
imgIds += imgId
imgIds = list(set(imgIds))
# Get annotations
annIds = coco_anns.getAnnIds(imgIds=imgIds)
anns = coco_anns.loadAnns(annIds)
# Extract ids and captions as tuples
captions = [(int(ann['image_id']), ann['caption']) for ann in anns]
print(len(captions))
# Extract filenames as tuples
img_ids = list(set([ann['image_id'] for ann in anns]))
imgs = coco.loadImgs(img_ids)
filenames = [(int(img['id']), st + '/' + img['file_name'])
for img in imgs]
# Make dataframe of captions and filenames
captionDF = pd.DataFrame(captions, columns=['id', 'caption'])
filesDF = pd.DataFrame(filenames, columns=['id', 'filename'])
# Merge dataframes on image id
df = captionDF.merge(filesDF, how='outer', on='id')
# Assign to set
df['set'] = st
# Concatenate to resultsDF
cocoDF = | pd.concat([cocoDF, df], axis=0) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
import sys
import time
import xgboost as xgb
from add_feture import *
FEATURE_EXTRACTION_SLOT = 10
LabelDay = datetime.datetime(2014,12,18,0,0,0)
Data = pd.read_csv("../../../../data/fresh_comp_offline/drop1112_sub_item.csv")
Data['daystime'] = Data['days'].map(lambda x: time.strptime(x, "%Y-%m-%d")).map(lambda x: datetime.datetime(*x[:6]))
def get_train(train_user,end_time):
# 取出label day 前一天的记录作为打标记录
data_train = train_user[(train_user['daystime'] == (end_time-datetime.timedelta(days=1)))]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
# 训练样本中,删除重复的样本
data_train = data_train.drop_duplicates(['user_id', 'item_id'])
data_train_ui = data_train['user_id'] / data_train['item_id']
# print(len(data_train))
# 使用label day 的实际购买情况进行打标
data_label = train_user[train_user['daystime'] == end_time]
data_label_buy = data_label[data_label['behavior_type'] == 4]
data_label_buy_ui = data_label_buy['user_id'] / data_label_buy['item_id']
# 对前一天的交互记录进行打标
data_train_labeled = data_train_ui.isin(data_label_buy_ui)
dict = {True: 1, False: 0}
data_train_labeled = data_train_labeled.map(dict)
data_train['label'] = data_train_labeled
return data_train[['user_id', 'item_id','item_category', 'label']]
def get_label_testset(train_user,LabelDay):
# 测试集选为上一天所有的交互数据
data_test = train_user[(train_user['daystime'] == LabelDay)]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
data_test = data_test.drop_duplicates(['user_id', 'item_id'])
return data_test[['user_id', 'item_id','item_category']]
def item_category_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_category,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = | pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type) | pandas.crosstab |
import pandas as pd
from iexfinance.base import _IEXBase
from iexfinance.utils import _handle_lists, no_pandas
from iexfinance.utils.exceptions import IEXSymbolError, IEXEndpointError
class StockReader(_IEXBase):
"""
Base class for obtaining data from the Stock endpoints of IEX.
"""
# Possible option values (first is default)
_ENDPOINTS = ["chart", "quote", "book", "open-close", "previous",
"company", "stats", "peers", "relevant", "news",
"financials", "earnings", "dividends", "splits", "logo",
"price", "delayed-quote", "effective-spread",
"volume-by-venue", "ohlc"]
def __init__(self, symbols=None, **kwargs):
""" Initialize the class
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Desired symbols for retrieval
"""
self.symbols = list(map(lambda x: x.upper(), _handle_lists(symbols)))
self.n_symbols = len(self.symbols)
self.endpoints = []
super(StockReader, self).__init__(**kwargs)
def get_all(self):
"""
Returns all endpoints, indexed by endpoint title for each symbol
Notes
-----
Only allows JSON format (pandas not supported).
"""
self.optional_params = {}
self.endpoints = self._ENDPOINTS[:10]
json_data = self.fetch(fmt_p=no_pandas)
self.endpoints = self._ENDPOINTS[10:20]
json_data_2 = self.fetch(fmt_p=no_pandas)
for symbol in self.symbols:
if symbol not in json_data:
raise IEXSymbolError(symbol)
json_data[symbol].update(json_data_2[symbol])
return json_data[self.symbols[0]] if self.n_symbols == 1 else json_data
@property
def url(self):
return 'stock/market/batch'
@property
def params(self):
temp = {
"symbols": ','.join(self.symbols),
"types": ','.join(self.endpoints)
}
temp.update(self.optional_params)
if "filter_" in temp:
if isinstance(temp["filter_"], list):
temp["filter"] = ",".join(temp.pop("filter_"))
else:
temp["filter"] = temp.pop("filter_")
if "range_" in temp:
temp["range"] = temp.pop("range_")
params = {k: str(v).lower() if v is True or v is False else str(v)
for k, v in temp.items()}
return params
def _get_endpoint(self, endpoint, params={}, fmt_p=None,
fmt_j=None, filter_=None):
result = {}
if filter_:
params.update({"filter": filter_})
self.optional_params = params
self.endpoints = [endpoint]
data = self.fetch(fmt_j=fmt_j, fmt_p=no_pandas)
for symbol in self.symbols:
if symbol not in data:
raise IEXSymbolError(symbol)
if endpoint not in data[symbol]:
result[symbol] = []
else:
result[symbol] = data[symbol][endpoint]
return self._output_format_one(result, fmt_p=fmt_p, fmt_j=fmt_j)
def _get_field(self, endpoint, field):
data = getattr(self, "get_%s" % endpoint)(filter_=field)
if self.output_format == 'json':
if self.n_symbols == 1:
data = data[field]
else:
data = {symbol: data[symbol][field] for symbol in self.symbols}
return data
def _output_format_one(self, out, fmt_p=None, fmt_j=None):
data = super(StockReader, self)._output_format(out, fmt_p=fmt_p)
if len(self.symbols) == 1 and self.output_format == 'json':
return data[self.symbols[0]]
return data
def get_endpoints(self, endpoints=[]):
"""
Universal selector method to obtain specific endpoints from the
data set.
Parameters
----------
endpoints: str or list
Desired valid endpoints for retrieval
Notes
-----
Only allows JSON format (pandas not supported).
Raises
------
IEXEndpointError
If an invalid endpoint is specified
IEXSymbolError
If a symbol is invalid
IEXQueryError
If issues arise during query
"""
if isinstance(endpoints, str) and endpoints in self._ENDPOINTS:
endpoints = list(endpoints)
if not endpoints or not set(endpoints).issubset(self._ENDPOINTS):
raise IEXEndpointError("Please provide a valid list of endpoints")
elif len(endpoints) > 10:
raise ValueError("Please input up to 10 valid endpoints")
self.optional_params = {}
self.endpoints = endpoints
json_data = self.fetch(fmt_p=no_pandas)
for symbol in self.symbols:
if symbol not in json_data:
raise IEXSymbolError(symbol)
return json_data[self.symbols[0]] if self.n_symbols == 1 else json_data
def get_book(self, **kwargs):
"""
Reference: https://iextrading.com/developer/docs/#book
Returns
-------
dict or pandas.DataFrame
Stocks Book endpoint data
"""
return self._get_endpoint("book", params=kwargs)
def get_chart(self, **kwargs):
"""
Reference: https://iextrading.com/developer/docs/#chart
Parameters
----------
range: str, default '1m', optional
Chart range to return. See docs.
chartReset: boolean, default True, optional
If true, 1d chart will reset at midnight instead of the default
behavior of 9:30am EST.
chartSimplify: boolean, default True, optional
If true, runs polyline simplification using Douglas-Peucker
algorithm. Useful for plotting spotline charts
chartInterval: int, default None, optional
Chart data will return every nth element (where n is chartInterval)
changeFromClose: bool, default False, optional
If true, changeOverTime and marketChangeOverTime will be relative
to previous day close instead of the first value.
chartLast: int, optional
return the last N elements
Returns
-------
list
Stocks Chart endpoint data
"""
def fmt_p(out):
result = {}
for symbol in self.symbols:
d = out.pop(symbol)
df = | pd.DataFrame(d) | pandas.DataFrame |
## 1. Introduction ##
import pandas as pd
hn = | pd.read_csv('hacker_news.csv') | pandas.read_csv |
import os
import csv
import numpy as np
import pandas as pd
import logging
from collections import deque
from datetime import date, datetime, timedelta, time
from typing import Dict, List, Iterator
from libs.utils.loggers import get_source_log_directory, get_area_log_directory, get_source_logging_interval
logger = logging.getLogger(__name__)
def parse_date_range(dates):
"""Generator. From a continuous sorted list of datetime64 yields tuples (start_date, end_date) for each week encompassed"""
while not dates.empty:
start = 0
end = (7 - dates[start].weekday()) - 1
if end > len(dates):
end = len(dates) - 1
yield (dates[start], dates[end])
dates = dates[end+1:]
class BaseMetric:
processing_count_threshold = 3
reports_folder = None
csv_headers = []
# entity value can be "source" or "area"
entity = "source"
# Use the `live_csv_headers` when the csv strucutre differs from the hourly/daily
live_csv_headers = []
@classmethod
def get_entity_base_directory(cls, config=None):
if config:
return get_source_log_directory(config) if cls.entity == "source" else get_area_log_directory(config)
return os.getenv("SourceLogDirectory") if cls.entity == "source" else os.getenv("AreaLogDirectory")
@classmethod
def get_entities(cls, config):
return config.get_video_sources() if cls.entity == "source" else config.get_areas()
@classmethod
def procces_csv_row(cls, csv_row, object_logs):
"""
Extracts from the `csv_row` the required information to calculate the metric.
The extracted information is populated into `object_logs`.
"""
raise NotImplementedError
@classmethod
def generate_hourly_metric_data(cls, object_logs, entity):
"""
Generates the hourly reports for the hours received in `object_logs`.
"""
raise NotImplementedError
@classmethod
def generate_hourly_csv_data(cls, entity: Dict, entity_file: str, time_from: datetime,
time_until: datetime):
if not os.path.isfile(entity_file):
entity_type = "Camera" if cls.entity else "Area"
logger.warn(f"The [{entity_type}: {entity['id']}] contains no recorded data for that day")
return
objects_logs = {}
for hour in range(time_from.hour, time_until.hour):
objects_logs[hour] = {}
with open(entity_file, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row_time = datetime.strptime(row["Timestamp"], "%Y-%m-%d %H:%M:%S")
if time_from <= row_time < time_until:
cls.procces_csv_row(row, objects_logs)
return cls.generate_hourly_metric_data(objects_logs, entity)
@classmethod
def compute_hourly_metrics(cls, config):
if not cls.reports_folder:
raise Exception(f"The metric {cls} doesn't have configured the folder parameter")
base_directory = cls.get_entity_base_directory(config)
entities = cls.get_entities(config)
current_hour = datetime.now().hour
for entity in entities:
entity_directory = os.path.join(base_directory, entity["id"])
log_directory = None
if cls.entity == "source":
log_directory = os.path.join(entity_directory, "objects_log")
else:
# cls.entity == "area"
log_directory = os.path.join(entity_directory, "occupancy_log")
reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder)
# Create missing directories
os.makedirs(log_directory, exist_ok=True)
os.makedirs(reports_directory, exist_ok=True)
time_until = datetime.combine(date.today(), time(current_hour, 0))
if current_hour == 0:
# Pending to process the latest hour from yesterday
report_date = date.today() - timedelta(days=1)
else:
report_date = date.today()
entity_csv = os.path.join(log_directory, str(report_date) + ".csv")
daily_csv = os.path.join(reports_directory, "report_" + str(report_date) + ".csv")
time_from = datetime.combine(report_date, time(0, 0))
if os.path.isfile(daily_csv):
with open(daily_csv, "r", newline='') as csvfile:
processed_hours = sum(1 for line in csv.reader(csvfile)) - 1
time_from = datetime.combine(report_date, time(processed_hours + 1, 0))
else:
with open(daily_csv, "a", newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=cls.csv_headers)
writer.writeheader()
csv_data = cls.generate_hourly_csv_data(entity, entity_csv, time_from, time_until)
if csv_data is None:
entity_type = "Camera" if cls.entity else "Area"
logger.warn(f"Hourly report not generated! [{entity_type}: {entity['id']}]")
continue
with open(daily_csv, "a", newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=cls.csv_headers)
logger.info(f">>>>>>>>>>daily csv file write in libs/metrics/base.py\n")
for item in csv_data:
row = {}
for index, header in enumerate(cls.csv_headers):
row[header] = item[index]
writer.writerow(row)
@classmethod
def generate_daily_csv_data(cls, yesterday_hourly_file):
"""
Generates the daily report for the `yesterday_hourly_file` received.
"""
raise NotImplementedError
@classmethod
def compute_daily_metrics(cls, config):
base_directory = cls.get_entity_base_directory(config)
entities = cls.get_entities(config)
for entity in entities:
entity_directory = os.path.join(base_directory, entity["id"])
reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder)
# Create missing directories
os.makedirs(reports_directory, exist_ok=True)
yesterday = str(date.today() - timedelta(days=1))
hourly_csv = os.path.join(reports_directory, "report_" + yesterday + ".csv")
report_csv = os.path.join(reports_directory, "report.csv")
if not os.path.isfile(hourly_csv):
entity_type = "Camera" if cls.entity else "Area"
logger.warn(f"Daily report for date {str(yesterday)} not generated! [{entity_type}: {entity['id']}]")
continue
daily_data = cls.generate_daily_csv_data(hourly_csv)
headers = ["Date"] + cls.csv_headers
report_file_exists = os.path.isfile(report_csv)
with open(report_csv, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
logger.info(f">>>>>>>>>report csv write in libs/metrics/base.py\n")
if not report_file_exists:
writer.writeheader()
row = {"Date": yesterday}
for index, header in enumerate(cls.csv_headers):
row[header] = daily_data[index]
writer.writerow(row)
@classmethod
def generate_live_csv_data(cls, today_entity_csv, entity, entries_in_interval):
"""
Generates the live report using the `today_entity_csv` file received.
"""
raise NotImplementedError
@classmethod
def compute_live_metrics(cls, config, live_interval):
base_directory = cls.get_entity_base_directory(config)
entities = cls.get_entities(config)
for entity in entities:
entity_directory = os.path.join(base_directory, entity["id"])
reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder)
# Create missing directories
os.makedirs(reports_directory, exist_ok=True)
log_directory = None
if cls.entity == "source":
log_directory = os.path.join(entity_directory, "objects_log")
else:
# cls.entity == "area"
log_directory = os.path.join(entity_directory, "occupancy_log")
today_entity_csv = os.path.join(log_directory, str(date.today()) + ".csv")
live_report_csv = os.path.join(reports_directory, "live.csv")
csv_headers = cls.live_csv_headers if cls.live_csv_headers else cls.csv_headers
headers = ["Time"] + csv_headers
report_file_exists = os.path.isfile(live_report_csv)
if not os.path.isfile(today_entity_csv):
return
entity["base_directory"] = entity_directory
entries_in_interval = int(live_interval * 60 / get_source_logging_interval(config))
live_data = cls.generate_live_csv_data(today_entity_csv, entity, entries_in_interval)
with open(live_report_csv, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=headers)
logger.info(f">>>>>>>.libe report csv file write in libs/metrics/base.py\n")
if not report_file_exists:
writer.writeheader()
row = {"Time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
for index, header in enumerate(csv_headers):
row[header] = live_data[index]
writer.writerow(row)
@classmethod
def get_hourly_report(cls, entities: List[str], report_date: date) -> Dict:
base_directory = cls.get_entity_base_directory()
hours = list(range(0, 24))
results = {}
for header in cls.csv_headers:
results[header] = np.zeros(24)
for entity in entities:
entity_directory = os.path.join(base_directory, entity)
reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder)
file_path = os.path.join(reports_directory, f"report_{report_date}.csv")
if os.path.exists(file_path):
df = pd.read_csv(file_path)
for header in cls.csv_headers:
results[header] += np.pad(
df[header].to_numpy(), (0, 24 - df[header].to_numpy().size), mode="constant"
)
for metric in results:
results[metric] = results[metric].tolist()
results["Hours"] = hours
return results
@classmethod
def get_daily_report(cls, entities: List[str], from_date: date, to_date: date) -> Dict:
base_directory = cls.get_entity_base_directory()
date_range = pd.date_range(start=from_date, end=to_date)
base_results = {}
for key in date_range:
base_results[key.strftime('%Y-%m-%d')] = {}
for header in cls.csv_headers:
base_results[key.strftime('%Y-%m-%d')][header] = 0
for entity in entities:
entity_directory = os.path.join(base_directory, entity)
reports_directory = os.path.join(entity_directory, "reports", cls.reports_folder)
file_path = os.path.join(reports_directory, "report.csv")
if not os.path.isfile(file_path):
continue
df = pd.read_csv(file_path)
df['Date'] = | pd.to_datetime(df['Date'], format='%Y-%m-%d') | pandas.to_datetime |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
parameters:
messages_filepath --> messages file location
categories_filepath --> categories file location
output:
merged messages, categories
"""
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
from autodesk.model import Model
from autodesk.sqlitedatastore import SqliteDataStore
from autodesk.states import UP, DOWN, ACTIVE, INACTIVE
from pandas import Timestamp, Timedelta
from pandas.testing import assert_frame_equal
from tests.stubdatastore import StubDataStore
import pandas as pd
import pytest
def make_spans(records):
return pd.DataFrame(records, columns=['start', 'end', 'state'])
@pytest.fixture()
def inmemory_model():
model = Model(SqliteDataStore(':memory:'))
yield model
model.close()
def test_get_desk_spans_empty():
t1 = Timestamp.min
t2 = Timestamp.max
model = Model(StubDataStore.empty())
result = model.get_desk_spans(t1, t2)
expected = make_spans([(t1, t2, DOWN)])
assert_frame_equal(result, expected)
def test_get_session_spans_empty():
t1 = Timestamp.min
t2 = Timestamp.max
model = Model(StubDataStore.empty())
result = model.get_session_spans(t1, t2)
expected = make_spans([(t1, t2, INACTIVE)])
assert_frame_equal(result, expected)
def test_get_desk_spans_one_up_span():
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
t3 = Timestamp(2018, 1, 3)
model = Model(StubDataStore(
session_events=[],
desk_events=[(t2, UP)]
))
result = model.get_desk_spans(t1, t3)
expected = make_spans([(t1, t2, DOWN), (t2, t3, UP)])
assert_frame_equal(result, expected)
def test_get_session_spans_one_active_span():
t1 = Timestamp(2018, 1, 1)
t2 = Timestamp(2018, 1, 2)
t3 = Timestamp(2018, 1, 3)
model = Model(StubDataStore(
session_events=[(t2, ACTIVE)],
desk_events=[]
))
result = model.get_session_spans(t1, t3)
expected = make_spans([(t1, t2, INACTIVE), (t2, t3, ACTIVE)])
assert_frame_equal(result, expected)
def test_get_session_state_empty():
model = Model(StubDataStore.empty())
assert model.get_session_state() == INACTIVE
def test_get_desk_state_empty():
model = Model(StubDataStore.empty())
assert model.get_desk_state() == DOWN
def test_get_active_time_empty():
model = Model(StubDataStore.empty())
assert model.get_active_time(Timestamp.min, Timestamp.max) == Timedelta(0)
def test_get_active_time_active_zero():
t = Timestamp(2018, 1, 1)
model = Model(StubDataStore(
session_events=[(t, ACTIVE)],
desk_events=[]
))
assert model.get_active_time(Timestamp.min, t) == Timedelta(0)
def test_get_active_time_active_for_10_minutes():
t1 = Timestamp(2018, 1, 1, 0, 0, 0)
t2 = Timestamp(2018, 1, 1, 0, 10, 0)
model = Model(StubDataStore(
session_events=[(t1, ACTIVE)],
desk_events=[]
))
assert model.get_active_time(Timestamp.min, t2) == Timedelta(minutes=10)
def test_get_active_time_just_after_desk_change():
t1 = | Timestamp(2018, 1, 1, 0, 0, 0) | pandas.Timestamp |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = | tm.box_expected(expected, box_with_array) | pandas._testing.box_expected |
from flask import render_template, request, redirect, url_for, session
from app import app
from model import *
from model.main import *
import json
import pandas as pd
import numpy as np
class DataStore():
model=None
model_month=None
sale_model=None
data = DataStore()
@app.route('/', methods=["GET"])
def home():
percent=percentageMethod()
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
with open('percent.json') as f:
file2 = json.load(f)
labels=file2['index']
data=file2['data']
if "username" in session:
return render_template('index.html', last_year=lastYear(), last_month=lastMonth(),dataset=data, label=labels, percent=percent,
month_index=month_index, month_data=month_data)
else:
return render_template('login.html')
# Register new user
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
registerUser()
return redirect(url_for("login"))
#Check if email already exists in the registratiion page
@app.route('/checkusername', methods=["POST"])
def check():
return checkusername()
# Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request)
@app.route('/login', methods=["GET"])
def login():
if request.method == "GET":
if "username" not in session:
return render_template("login.html")
else:
return redirect(url_for("home"))
@app.route('/checkloginusername', methods=["POST"])
def checkUserlogin():
return checkloginusername()
@app.route('/checkloginpassword', methods=["POST"])
def checkUserpassword():
return checkloginpassword()
#The admin logout
@app.route('/logout', methods=["GET"]) # URL for logout
def logout(): # logout function
session.pop('username', None) # remove user session
return redirect(url_for("home")) # redirect to home page with message
#Forgot Password
@app.route('/forgot-password', methods=["GET"])
def forgotpassword():
return render_template('forgot-password.html')
#404 Page
@app.route('/404', methods=["GET"])
def errorpage():
return render_template("404.html")
#Blank Page
@app.route('/blank', methods=["GET"])
def blank():
return render_template('blank.html')
@app.route('/totalyear', methods=["GET"])
def total_year():
total_year=totalYear()
file1=pd.read_json('total_year.json',orient='index')
year_index=np.array(file1['year'])
year_data=np.array(file1['total'])
return render_template("total_year.html",year_index=year_index, year_data=year_data)
@app.route('/totalmonth', methods=["GET"])
def total_month():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
num=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
data=data1['total']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totalmonth', methods=["POST"])
def total_month_num():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_month"))
predict_rs, fitted_data=predict(data.model_month,num)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totaldate', methods=["GET"])
def total_date():
total_date=totalDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['total'])
num=30
# Fit model
model_date=fit_model_date()
data.model=model_date
predict_rs_date, fitted_data_date=predict_date(model_date,30)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['total'])
#Test model
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=model_date, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def fit_model_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
data=data1['total']
result1 = fit_model_fast(data)
return result1
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=30
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','total']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','total']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totaldate', methods=["POST"])
def total_date_num():
total_date=totalDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_date"))
predict_rs_date, fitted_data_date=predict_date(data.model,num)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['total'])
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=data.model, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=6
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','total']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','total']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
return mse, rmse, mae, mape
@app.route('/revenueyear', methods=["GET"])
def revenue_year():
sale_year=saleYear()
year_index=np.array(sale_year['year'])
year_data=np.array(sale_year['quantity'])
return render_template("revenue_year.html",year_index=year_index, year_data=year_data)
@app.route('/revenuemonth', methods=["GET"])
def revenue_month():
total_month=saleMonth()
file1=pd.read_json('sale_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['quantity'])
num_sale=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['quantity'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("revenue_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num_sale=num_sale)
def check_stationary():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
data=data1['quantity']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','quantity']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','quantity']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/revenuemonth', methods=["POST"])
def revenue_month_num():
total_month=saleMonth()
file1=pd.read_json('sale_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['quantity'])
#Get data
if request.method == "POST":
num_sale= int(request.form.get("sale_month"))
predict_rs, fitted_data=predict(data.model_month,num_sale)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['quantity'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("revenue_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num_sale=num_sale)
def check_stationary():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','quantity']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['month_year'] = | pd.to_datetime(dff['date']) | pandas.to_datetime |
import numpy as np
import pandas as pd
import pytest
import scipy.stats as st
from ..analysis import GroupCorrelation
from ..analysis.exc import MinimumSizeError, NoDataError
from ..data import UnequalVectorLengthError, Vector
@pytest.fixture
def random_seed():
"""Generate a numpy random seed for repeatable test results."""
return np.random.seed(987654321)
def test_pearson_correlation_four_groups(random_seed):
"""Test the output of a pearson correlation with three groups."""
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_2 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_3 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_4_x = st.norm.rvs(size=100)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 100 + [2] * 100 + [3] * 100 + [4] * 100
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Pearson Correlation Coefficient
-------------------------------
n r value p value Group
--------------------------------------------------------
100 -0.0055 0.9567 1
100 0.0605 0.5497 2
100 -0.2250 0.0244 3
100 0.9045 0.0000 4 """
exp = GroupCorrelation(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
assert ('100', '100', '100', '100') == exp.counts
assert (-0.005504761441239719, 0.06052034843856759, -0.225005891506915, 0.9044623083255101) == exp.r_value
assert (-0.005504761441239719, 0.06052034843856759, -0.225005891506915, 0.9044623083255101) == exp.statistic
assert (0.9566515868901755, 0.5497443545114141, 0.02440365919474257, 4.844813765580646e-38) == exp.p_value
assert str(exp) == output
def test_pearson_correlation_four_string_groups(random_seed):
"""Test the output of a pearson correlation with three groups."""
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_2 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_3 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_4_x = st.norm.rvs(size=100)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = ['b'] * 100 + ['a'] * 100 + ['d'] * 100 + ['c'] * 100
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Pearson Correlation Coefficient
-------------------------------
n r value p value Group
--------------------------------------------------------
100 0.0605 0.5497 a
100 -0.0055 0.9567 b
100 0.9045 0.0000 c
100 -0.2250 0.0244 d """
exp = GroupCorrelation(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
assert ('100', '100', '100', '100') == exp.counts
assert (0.06052034843856759, -0.005504761441239719, 0.9044623083255101, -0.225005891506915) == exp.r_value
assert (0.06052034843856759, -0.005504761441239719, 0.9044623083255101, -0.225005891506915) == exp.statistic
assert (0.5497443545114141, 0.9566515868901755, 4.844813765580646e-38, 0.02440365919474257) == exp.p_value
assert str(exp) == output
def test_pearson_correlation_one_groups(random_seed):
"""Test the output of a pearson correlation with one groups."""
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
output = """
Pearson Correlation Coefficient
-------------------------------
n r value p value Group
--------------------------------------------------------
100 -0.0055 0.9567 1 """
exp = GroupCorrelation(input_1[0], input_1[1], display=False)
assert str(exp) == output
def test_spearman_correlation_four_groups(random_seed):
"""Test the output of a pearson correlation with three groups."""
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_2 = st.weibull_min.rvs(1.7, size=100), st.norm.rvs(size=100)
input_3 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_4_x = st.norm.rvs(size=100)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 100 + [2] * 100 + [3] * 100 + [4] * 100
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Spearman Correlation Coefficient
--------------------------------
n r value p value Group
--------------------------------------------------------
100 0.0079 0.9376 1
100 0.0140 0.8898 2
100 -0.1227 0.2241 3
100 0.9006 0.0000 4 """
exp = GroupCorrelation(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
assert ('100', '100', '100', '100') == exp.counts
assert (0.007932793279327931, 0.014029402940294028, -0.12266426642664265, 0.9005940594059406) == exp.r_value
assert (0.007932793279327931, 0.014029402940294028, -0.12266426642664265, 0.9005940594059406) == exp.statistic
assert (0.9375641178035645, 0.8898160391011217, 0.22405419866382636, 3.0794115586718083e-37) == exp.p_value
assert str(exp) == output
def test_pearson_correlation_with_missing_data(random_seed):
"""Test the output of a pearson correlation with random missing data."""
input_1 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_2 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_3 = st.norm.rvs(size=100), st.norm.rvs(size=100)
input_4_x = st.norm.rvs(size=100)
input_4_y = [x + st.norm.rvs(0, 0.5, size=1)[0] for x in input_4_x]
input_4 = input_4_x, input_4_y
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 100 + [2] * 100 + [3] * 100 + [4] * 100
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
input_array['a'][24] = np.nan
input_array['a'][256] = np.nan
input_array['b'][373] = np.nan
input_array['b'][24] = np.nan
input_array['b'][128] = np.nan
output = """
Pearson Correlation Coefficient
-------------------------------
n r value p value Group
--------------------------------------------------------
99 -0.0113 0.9114 1
99 0.0300 0.7681 2
99 -0.2224 0.0269 3
99 0.9043 0.0000 4 """
exp = GroupCorrelation(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
assert ('99', '99', '99', '99') == exp.counts
assert str(exp) == output
def test_no_data():
"""Test the case where there's no data."""
with pytest.raises(NoDataError):
GroupCorrelation([], [])
def test_at_minimum_size(random_seed):
"""Test the case where one group is at the minimum size."""
input_1 = st.norm.rvs(size=3), st.norm.rvs(size=3)
input_2 = st.norm.rvs(size=3), st.norm.rvs(size=3)
input_3 = st.norm.rvs(size=3), st.norm.rvs(size=3)
input_4 = st.norm.rvs(size=3), st.norm.rvs(size=3)
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 3 + [2] * 3 + [3] * 3 + [4] * 3
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Pearson Correlation Coefficient
-------------------------------
n r value p value Group
--------------------------------------------------------
3 0.9757 0.1407 1
3 -0.8602 0.3406 2
3 0.9530 0.1959 3
3 -0.9981 0.0398 4 """
exp = GroupCorrelation(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
assert str(exp) == output
def test_below_minimum_size(random_seed):
"""Test the case where the supplied data is less than the minimum size."""
input_1 = st.norm.rvs(size=20), st.norm.rvs(size=20)
input_2 = st.norm.rvs(size=20), st.norm.rvs(size=20)
input_3 = st.norm.rvs(size=2), st.norm.rvs(size=2)
input_4 = st.norm.rvs(size=20), st.norm.rvs(size=20)
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1] * 20 + [2] * 20 + [3] * 2 + [4] * 20
input_array = pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp})
output = """
Pearson Correlation Coefficient
-------------------------------
n r value p value Group
--------------------------------------------------------
20 0.1239 0.6029 1
20 0.0233 0.9224 2
20 0.0577 0.8090 4 """
exp = GroupCorrelation(input_array['a'], input_array['b'], groups=input_array['c'], display=False)
assert str(exp) == output
def test_all_below_minimum_size(random_seed):
"""Test the case where all the supplied data is less than the minimum size."""
input_1 = st.norm.rvs(size=1), st.norm.rvs(size=1)
input_2 = st.norm.rvs(size=1), st.norm.rvs(size=1)
input_3 = st.norm.rvs(size=1), st.norm.rvs(size=1)
input_4 = st.norm.rvs(size=1), st.norm.rvs(size=1)
cs_x = np.concatenate((input_1[0], input_2[0], input_3[0], input_4[0]))
cs_y = np.concatenate((input_1[1], input_2[1], input_3[1], input_4[1]))
grp = [1, 2, 3, 4]
input_array = | pd.DataFrame({'a': cs_x, 'b': cs_y, 'c': grp}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [Timestamp(x) for x in ['2017-01-01', '2014-01-01',
'2016-01-01', '2015-01-01']]
df = pd.DataFrame({'a': d1, 'b': d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ['2015-01-01', '2016-01-01',
'2016-01-01', np.nan]]
d4 = [Timestamp(x) for x in ['2014-01-01', '2015-01-01',
'2017-01-01', '2016-01-01']]
expected = pd.DataFrame({'a': d3, 'b': d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=['a', 'b'], )
tm.assert_frame_equal(sorted_df, expected)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with tm.assert_raises_regex(ValueError, 'level'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'level'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sort_index(level='A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sort_index(level=['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype(CategoricalDtype(list('cab')))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_index(self):
# GH13496
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ['A', 0]) # GH 21052
def test_sort_index_multiindex(self, level):
# GH13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples([
[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 2],
[2, 1, 3]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[3, 4],
[1, 2]], index=expected_mi)
result = df.sort_index(level=level)
assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples([
[1, 1, 1],
[2, 1, 3],
[2, 1, 2]], names=list('ABC'))
expected = pd.DataFrame([
[5, 6],
[1, 2],
[3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)),
bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2'])
result = model.groupby(['X1', 'X2'], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0),
(0.0, 0.5), (0.5, 3.0)],
closed='right')
result = result.columns.levels[1].categories
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
#system libraries
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
import random
import urllib
import os
import sys
import time
import pandas as pd
opc = Options()
opc.add_argument("user-agent=Mozilla/5.0 (X11; Linux x85_64) AppleWebKit/537.36(KHTML, like Gecko)")
driver = webdriver.Chrome('./chromedriver/chromedriver.exe', options=opc)
driver.get('https://es.stripchat.com/login')
user = "fnietzshe"
password = open('<PASSWORD>').readline().strip()
#Aquí está el código para resolver el user y password
input_user = WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH, '/html/body/div[2]/div[2]/div/main/div[2]/div[3]/div/div/div/div[2]/form/div[1]/input'))
)
input_user = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div/main/div[2]/div[3]/div/div/div/div[2]/form/div[1]/input')
input_pass = driver.find_element(By.XPATH,'/html/body/div[2]/div[2]/div/main/div[2]/div[3]/div/div/div/div[2]/form/div[2]/input')
input_user.send_keys(user)
input_pass.send_keys(password)
#Aquí está el código para resolver el Recaptcha
audioToTextDelay = 10
delayTime = 2
audioFile = "\\payload.mp3"
URL = "https://www.google.com/recaptcha/api2/demo"
SpeechToTextURL = "https://speech-to-text-demo.ng.bluemix.net/"
def delay():
time.sleep(random.randint(2, 3))
def audioToText(audioFile):
driver.execute_script('''window.open("","_blank")''')
driver.switch_to.window(driver.window_handles[1])
driver.get(SpeechToTextURL)
delay()
audioInput = driver.find_element(By.XPATH, '/html/body/div/div/div[3]/div[2]/div[1]/div[1]/div[2]/button')
audioInput.send_keys(audioFile)
time.sleep(audioToTextDelay)
text = driver.find_element(By.XPATH, '//*[@id="root"]/div/div[7]/div/div/div/span')
while text is None:
text = driver.find_element(By.XPATH, '//*[@id="root"]/div/div[7]/div/div/div/span')
result = text.text
driver.close()
driver.switch_to.window(driver.window_handles[0])
return result
try:
# create chrome driver
option = webdriver.ChromeOptions()
option.add_argument('--disable-notifications')
# option.add_argument('--mute-audio')
option.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36")
driver = webdriver.Chrome(os.getcwd() + "\\chromedriver.exe", options=option)
delay()
# go to website which have recaptcha protection
driver.get(URL)
except Exception as e:
sys.exit(
"[-] Please update the chromedriver.exe in the webdriver folder according to your chrome version:https://chromedriver.chromium.org/downloads")
g_recaptcha = driver.find_element(By.XPATH, 'id#recaptcha-anchor-label')[0]
outerIframe = g_recaptcha.find_element_by_tag_name('/html/body/div/div')
outerIframe.click()
iframes = driver.find_elements_by_id('#rc-imageselect > div.rc-imageselect-payload')
audioBtnFound = False
audioBtnIndex = -1
for index in range(len(iframes)):
driver.switch_to.default_content()
iframe = driver.find_elements_by_id('#rc-imageselect > div.rc-imageselect-payload')[index]
driver.switch_to.frame(iframe)
driver.implicitly_wait(delayTime)
try:
audioBtn = driver.find_element_by_id("recaptcha-anchor > div.recaptcha-checkbox-border")
audioBtn.click()
audioBtnFound = True
audioBtnIndex = index
break
except Exception as e:
pass
if audioBtnFound:
try:
while True:
# get the mp3 audio file
src = driver.find_element_by_id("audio-source").get_attribute("src")
print("[INFO] Audio src: %s" % src)
# download the mp3 audio file from the source
urllib.request.urlretrieve(src, os.getcwd() + audioFile)
# Speech To Text Conversion
key = audioToText(os.getcwd() + audioFile)
print("[INFO] Recaptcha Key: %s" % key)
driver.switch_to.default_content()
iframe = driver.find_elements_by_tag_name('iframe')[audioBtnIndex]
driver.switch_to.frame(iframe)
# key in results and submit
inputField = driver.find_element_by_id("audio-response")
inputField.send_keys(key)
delay()
inputField.send_keys(Keys.ENTER)
delay()
err = driver.find_elements_by_class_name('rc-audiochallenge-error-message')[0]
if err.text == "" or err.value_of_css_property('display') == 'none':
print("[INFO] Success!")
break
except Exception as e:
print(e)
sys.exit("[INFO] Possibly blocked by google. Change IP,Use Proxy method for requests")
else:
sys.exit("[INFO] Audio Play Button not found! In Very rare cases!")
#Aquí está el código para dar clic en el botón de inicio de sesión
button = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div/main/div[2]/div[3]/div/div/div/div[2]/form/div[4]/button')
button.click()
#Aquí está el código para acceder a la modelo
opcs = Options()
opcs.add_argument("user-agent=Mozilla/5.0 (X11; Linux x85_64) AppleWebKit/537.36(KHTML, like Gecko)")
driver = webdriver.Chrome('./chromedriver/chromedriver.exe', optiones=opcs)
driver.get('https://es.stripchat.com/')
modelo = WebDriverWait(driver, 5).until(
EC.presence_of_element_located((By.XPATH, '/html/body/div[2]/div[2]/div/main/div[2]/div[3]/div/div/div/div[2]/form/div[1]/input'))
)
#Entra al transmision de la modelo
modelo = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[1]/main/div[2]/div[3]/div/div[2]/div/div/div[3]/div[2]/div[1]/div/section/div[1]/a')
modelo.click()
#Entra a la sección de perfil de la modelo
perfil = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[1]/main/div[2]/div[3]/div/div/div/div[1]/div/nav[1]/div[1]/div[2]/a')
perfil.click()
#Datos del perfil de la modelo
datos = driver.find_element(By.XPATH, '/html/body/div[2]/div[2]/div[1]/main/div[2]/div[3]/div/div/div/div[3]/div/div/div[4]/div/div[1]/div[1]/div/div/div')
datos = datos.text
datos_perfil = datos.split('Perfil')[0].split('\n')[1:-1]
nombre = list()
de = list()
idiomas = list()
edad = list()
cuerpo = list()
detalles = list()
etnia = list()
pelo = list()
ojos = list()
subcultura= list()
redes = list()
for i in range(0, len(datos_perfil), 12):
nombre.append(datos_perfil[i])
de.append(datos_perfil[i+1])
idiomas.append(datos_perfil[i+2])
edad.append(datos_perfil[i+3])
cuerpo.append(datos_perfil[i+5])
detalles.append(datos_perfil[i+6])
etnia.append(datos_perfil[i+7])
pelo.append(datos_perfil[i+8])
ojos.append(datos_perfil[i+9])
subcultura.append(datos_perfil[i+10])
redes.append(datos_perfil[i+11])
df = | pd.DataFrame({'Nombre': nombre, 'Nacionalidad': de, 'Idiomas': idiomas, 'Edad': edad, 'Cuerpo': cuerpo,'Detalles': detalles,'Etnia': etnia,'Pelo': pelo,'C_Ojos': ojos, 'Subcultura': subcultura,'R_Sociales': redes })
print(df) | pandas.DataFrame |
import numpy as np
import pandas as pd
class Stream:
def __init__(self, stream_id, side_a, side_b, plist, direct=None):
self.stream_id = stream_id
self.side_a = side_a
self.side_b = side_b
self.packets = plist
self.direct = direct
self._pkt_size_list = None
self._pkt_time_list = None
# 报文长度列表
def pkt_size(self):
if self._pkt_size_list is not None:
return np.array(self._pkt_size_list)
self._pkt_size_list = []
for p in self.packets:
self._pkt_size_list.append(p.length)
return pd.Series(self._pkt_size_list).astype('int')
# 报文到达间隔列表
def pkt_iat(self):
if self._pkt_time_list is not None:
return np.diff(np.array(self._pkt_time_list))
self._pkt_time_list = []
for p in self.packets:
self._pkt_time_list.append(p.time)
return np.diff(np.array(self._pkt_time_list))
# 报文到达速率(报文数)
def pkt_num_rate(self, interval=1000):
self._make_pkt_list()
num = len(self.packets)
pkt_se = | pd.Series([1]*num, index=self._pkt_time_list) | pandas.Series |
import datareader
import dataextractor
import bandreader
import numpy as np
from _bisect import bisect
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import pandas as pd
from scipy import stats
from sklearn import metrics
def full_signal_extract(path, ident):
"""Extract breathing and heartbeat features from one user and save features to file.
:param path: (str) main path to data, where user data is located in specific folders
:param ident: (str) user identifier
:return: Nothing. It saves features (dataframe) to a .csv file
"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
data = dataread.unwrap_grc_data() # unwrap phase. returns time and y values
samp_rate = round(len(data[1]) / max(data[0]))
dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)
cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
end_epoch_time = dataread.get_end_time_cognitive_load_study() # end t
extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
6).mean()
extracted_br_features_roll_avg['times'] = extracted_br_features['times']
extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']
extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
10).mean()
extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
extracted_hr_features_roll_avg['hr_ok'] = extracted_hr_features['hr_ok']
extracted_hr_features2 = dataextract.raw_windowing_heartrate(100, 1) # longer time to extract HRV frequency feat.
extracted_hr_features2 = extracted_hr_features2[['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf', 'times']]
extracted_hr_features2_roll_avg = extracted_hr_features2.loc[:, extracted_hr_features2.columns != 'times'].rolling(
10).mean()
extracted_hr_features2_roll_avg['times'] = extracted_hr_features2['times']
all_features = extracted_br_features_roll_avg
all_features = pd.merge(all_features, extracted_hr_features_roll_avg, on='times')
all_features = pd.merge(all_features, extracted_hr_features2_roll_avg, on='times')
task_timestamps = dataread.get_data_task_timestamps()
relax_timestamps = dataread.get_relax_timestamps()
bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
band_data = bandread.load()
band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
band_data = [band_data[0][band_data_time_start:band_data_time_stop],
band_data[1][band_data_time_start:band_data_time_stop]]
band_data_new__data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]
hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
hr_data['times'] = hr_data['times'].astype(int)
band_data = pd.DataFrame()
band_data['times'] = band_data_new__data[0]
band_data['times'] = band_data['times'].astype(int)
band_data['band_rate'] = band_data_new__data[1]
band_data = band_data.drop_duplicates(subset=['times'])
together_data = pd.merge(hr_data, band_data, on='times')
together_data = together_data.dropna()
for i in range(len(all_features['times'])):
find_in_hr_data = bisect(together_data['times'], all_features['times'][i])
all_features.ix[i, 'band_rate'] = together_data['band_rate'][find_in_hr_data]
for i in range(len(cog_res)):
all_feat_ind_task_start = bisect(all_features['times'], task_timestamps[i][0])
all_feat_ind_task_end = bisect(all_features['times'], task_timestamps[i][1])
for j in cog_res.columns:
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, j] = cog_res.iloc[i][j]
if cog_res.iloc[i][j] == 'GC' or cog_res.iloc[i][j] == 'PT':
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'keyboard_task'] = True
elif cog_res.iloc[i][j] == 'HP' or cog_res.iloc[i][j] == 'FA' or cog_res.iloc[i][j] == 'NC' or \
cog_res.iloc[i][j] == 'SX':
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'keyboard_task'] = False
for k in range(all_feat_ind_task_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_task_or_break_index'] = k
for k in range(all_feat_ind_task_end - all_feat_ind_task_start, -1, -1):
all_features.ix[all_feat_ind_task_end - k, 'on_task_or_break_index_down'] = k
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'on_task'] = True
for i in range(len(relax_timestamps)):
all_feat_ind_task_start = bisect(all_features['times'], relax_timestamps[i][0])
all_feat_ind_task_end = bisect(all_features['times'], relax_timestamps[i][1])
new_end = all_feat_ind_task_end + 30
# if i==0:
# continue
for k in range(all_feat_ind_task_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_task_or_break_index'] = k
all_features.ix[k + all_feat_ind_task_start, 'consecutive_break'] = i
for k in range(new_end - all_feat_ind_task_start + 1):
all_features.ix[k + all_feat_ind_task_start, 'on_break_and_after_index'] = k
if k <= 15:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = False
elif k <= 30:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = np.nan
else:
all_features.ix[k + all_feat_ind_task_start, 'engagement_increase'] = True
for k in range(all_feat_ind_task_end - all_feat_ind_task_start, -1, -1):
all_features.ix[all_feat_ind_task_end - k, 'on_task_or_break_index_down'] = k
all_features.ix[all_feat_ind_task_start:all_feat_ind_task_end, 'on_task'] = False
all_features['person_id'] = cog_res['person_id'][0]
all_features.to_csv(path_or_buf=path + ident + '/' + ident + '-data.csv', index=False)
def extract_for_all_users_and_combine(path, idents, outfile):
for i in idents:
print(i)
full_signal_extract(path, i)
append_csv_files(path, idents, outfile)
def plot_all_full_signals(path, idents):
for i in idents:
print(i)
plot_whole_signal_and_tasks_times(path, i)
def compare_extracted_hr_and_band(path, ident):
"""Compater heart rates acquired wirelessly and with Microfost Band.
:param path: (str) main path to data, where user data is located in specific folders
:param ident: (str) user identifier
:return: MAE, MSE, CORRelation values of the aligned HR time series
"""
dataread = datareader.DataReader(path, ident) # initialize path to data
data = dataread.read_grc_data() # read from files
data = dataread.unwrap_grc_data() # unwrap phase. returns time and y values
samp_rate = round(len(data[1]) / max(data[0]))
dataextract = dataextractor.DataExtractor(data[0], data[1], samp_rate)
cog_res = dataread.read_cognitive_load_study(ident + '-primary-extract.txt')
end_epoch_time = dataread.get_end_time_cognitive_load_study() # end t
extracted_br_features = dataextract.raw_windowing_breathing(30, 1)
extracted_br_features['br_rate'] = np.array(extracted_br_features['br_rate'].rolling(6).mean())
extracted_br_features_roll_avg = extracted_br_features.loc[:, extracted_br_features.columns != 'times'].rolling(
6).mean()
extracted_br_features_roll_avg['times'] = extracted_br_features['times']
extracted_br_features_roll_avg['br_ok'] = extracted_br_features['br_ok']
extracted_hr_features = dataextract.raw_windowing_heartrate(10, 1)
extracted_hr_features = extracted_hr_features.drop(['hr_HRV_lf', 'hr_HRV_hf', 'hr_HRV_lf_hf'], axis=1)
extracted_hr_features_roll_avg = extracted_hr_features.loc[:, extracted_hr_features.columns != 'times'].rolling(
10).mean()
extracted_hr_features_roll_avg['times'] = extracted_hr_features['times']
extracted_hr_features_roll_avg['hr_ok1'] = extracted_hr_features['hr_ok']
bandread = bandreader.HeartRateBand(path + '_Hrates/', ident)
band_data = bandread.load()
band_data_time_start = bisect(band_data[0][:], end_epoch_time - data[0][-1] * 1000)
band_data_time_stop = bisect(band_data[0][:], end_epoch_time)
band_data = [band_data[0][band_data_time_start:band_data_time_stop],
band_data[1][band_data_time_start:band_data_time_stop]]
band_data_new_data = [(band_data[0] - band_data[0][0]) / 1000, band_data[1]]
plt.figure(1)
plt.clf()
plt.plot(extracted_hr_features_roll_avg['times'], extracted_hr_features_roll_avg['hr_rate'], color='orange',
label='Wi-Mind heart rate')
plt.plot(band_data_new_data[0], band_data_new_data[1], color='green', label='Microsoft Band heart rate')
plt.xlabel('time (s)')
plt.ylabel('heart rate')
plt.legend()
plt.show()
hr_data = extracted_hr_features_roll_avg[['times', 'hr_rate']]
hr_data['times'] = hr_data['times'].astype(int)
band_data = pd.DataFrame()
band_data['times'] = band_data_new_data[0]
band_data['times'] = band_data['times'].astype(int)
band_data['rate'] = band_data_new_data[1]
band_data = band_data.drop_duplicates(subset=['times'])
together_data = pd.merge(hr_data, band_data, on='times')
together_data = together_data.dropna()
# new_hr = res_ind[intersect]
# new_band = band_data_new__data[1][intersect]
mae = metrics.mean_absolute_error(together_data['rate'], together_data['hr_rate'])
mse = metrics.mean_squared_error(together_data['rate'], together_data['hr_rate'])
corr = stats.pearsonr(together_data['rate'], together_data['hr_rate'])
# print('mae amd mse: ', mae, mse)
return mae, mse, corr
def compare_hr_for_all_idents(path, idents):
compare_metrics = pd.DataFrame()
for i in idents:
print(i)
mae, mse, cor = compare_extracted_hr_and_band(path, i) # uncomment if comparing errors
df = | pd.DataFrame([[i, mae, mse, cor[0]]], columns=['ID', 'MAE', 'MSE', 'COR']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import simpy
from sim_utils.audit import Audit
from sim_utils.data import Data
from sim_utils.patient import Patient
import warnings
warnings.filterwarnings("ignore")
class Model(object):
def __init__(self, scenario):
"""
"""
self.env = simpy.Environment()
self.params = scenario
self.data = Data(self.params)
self.audit = Audit()
self.patients = []
self.patient_id_count = 0
# Set up 1D NumPy array for patient counts per unit
number_hospitals = self.data.units.shape[0]
self.unit_occupancy = np.zeros(number_hospitals)
self.unit_admissions = np.zeros(number_hospitals)
# Count displaced patients
self.unit_occupancy_displaced_preferred = np.zeros(number_hospitals)
self.unit_occupancy_displaced_destination = np.zeros(number_hospitals)
self.unit_occupancy_waiting_preferred = np.zeros(number_hospitals)
# Set up tracker dictionary (total patients updated after warmup)
self.tracker = {
'total_patients': 0,
'total_patients_asu': 0,
'total_patients_waited': 0,
'total_patients_displaced': 0,
'current_patients': 0,
'current_asu_patients_all': 0,
'current_asu_patients_allocated': 0,
'current_asu_patients_unallocated': 0,
'current_asu_patients_displaced': 0,
'patient_waiting_time': []
}
def assign_asu_los(self, patient):
"""Assign length of stay based on assigned ASU unit"""
los_mean = self.data.units.iloc[patient.assigned_asu_index]['los_mean']
los_sd = los_mean * self.params.los_cv
los = max(np.random.normal(los_mean, los_sd), 0.01)
patient.los_asu = los
return
def end_run_routine(self):
"""
Data handling at end of run
"""
self.global_audit = | pd.DataFrame(self.audit.global_audit) | pandas.DataFrame |
"""Locator functions to interact with geographic data"""
import numpy as np
import pandas as pd
import flood_tool.geo as geo
__all__ = ['Tool']
def clean_postcodes(postcodes):
"""
Takes list or array of postcodes, and returns it in a cleaned numpy array
"""
postcode_df = pd.DataFrame({'Postcode':postcodes})
postcode_df['Postcode'] = postcode_df['Postcode'].str.upper()
# If length is not 7 get rid of spaces. This fixes e.g. "SW19 2AZ" -> "SW192AZ"
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() == 7, postcode_df['Postcode'].str.replace(" ", ""))
# If length is 5 (e.g. "W67HZ") add two spaces in the middle (-> "W6 7HZ")
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() != 5,
postcode_df['Postcode'].str[:2]+ " " + postcode_df['Postcode'].str[2:])
# If length is 6 (e.g. "SW72AZ") add a space in the middle and end(-> "SW7 2AZ")
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() != 6,
postcode_df['Postcode'].str[:3]+ " " + postcode_df['Postcode'].str[3:])
return postcode_df['Postcode'].to_numpy()
class Tool(object):
"""Class to interact with a postcode database file."""
def __init__(self, postcode_file=None, risk_file=None, values_file=None):
"""
Reads postcode and flood risk files and provides a postcode locator service.
Parameters
---------
postcode_file : str, optional
Filename of a .csv file containing geographic location data for postcodes.
risk_file : str, optional
Filename of a .csv file containing flood risk data.
values_file : str, optional
Filename of a .csv file containing property value data for postcodes.
"""
self.postcode_file = postcode_file
self.risk_file = risk_file
self.values_file = values_file
self.postcode_df = pd.read_csv(self.postcode_file)
# Make data frame of values & clean the postcodes in them.
self.values_df = | pd.read_csv(self.values_file) | pandas.read_csv |
#!/usr/bin/env python
# Copyright (C) 2019 <NAME>
import crispy
import logging
import numpy as np
import pandas as pd
import pkg_resources
import seaborn as sns
from natsort import natsorted
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from scipy import stats
from crispy.BGExp import GExp
from crispy.QCPlot import QCplot
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from crispy.CrispyPlot import CrispyPlot
from scipy.stats import pearsonr, skewtest
from sklearn.model_selection import ShuffleSplit
from sklearn.linear_model.base import LinearRegression
from sklearn.metrics import jaccard_score, matthews_corrcoef
LOG = logging.getLogger("Crispy")
DPATH = pkg_resources.resource_filename("crispy", "data/")
RPATH = pkg_resources.resource_filename("notebooks", "depmap/reports/")
KWS_SCATTER = dict(edgecolor="w", lw=0.3, s=10, alpha=0.6)
KWS_LINE = dict(lw=1.0, color=CrispyPlot.PAL_DBGD[1], alpha=1.0)
KWS_JOINT = dict(lowess=False, scatter_kws=KWS_SCATTER, line_kws=KWS_LINE)
KWS_MARGINAL = dict(kde=False, hist_kws={"linewidth": 0})
KWS_ANNOT = dict(stat="R")
class GDSCGexp:
GEXP_FILE = f"{DPATH}/rnaseq_voom.csv.gz"
SAMPLESHEET_FILE = f"{DPATH}/ModelList_20191106.csv"
GROWTH_FILE = f"{DPATH}/GrowthRates_v1.3.0_20190222.csv"
TISSUE_PAL_FILE = f"{DPATH}/tissue_palette.csv"
def __init__(self):
self.growth = pd.read_csv(self.GROWTH_FILE)
self.ss = pd.read_csv(self.SAMPLESHEET_FILE, index_col=0)
self.ss["growth"] = self.growth.groupby("model_id")["GROWTH_RATE"].mean()
self.gexp = pd.read_csv(self.GEXP_FILE, index_col=0)
self.pal_tissue = pd.read_csv(self.TISSUE_PAL_FILE, index_col=0)["color"]
@staticmethod
def gene_lh(tcga_genes, gtex_genes, tcga_thres=(-2.5, 7.5), gtex_thres=(0.5, 2)):
genes_low_tcga = set(tcga_genes[tcga_genes < tcga_thres[0]].index)
genes_low_gtex = set(gtex_genes[gtex_genes < gtex_thres[0]].index)
genes_high_tcga = set(tcga_genes[tcga_genes > tcga_thres[1]].index)
genes_high_gtex = set(gtex_genes[gtex_genes > gtex_thres[1]].index)
return dict(
low=set.intersection(genes_low_tcga, genes_low_gtex),
high=set.intersection(genes_high_tcga, genes_high_gtex),
)
class TCGAGexp:
GEXP_FILE = f"{DPATH}/GSE62944_merged_expression_voom.tsv"
CANCER_TYPE_FILE = f"{DPATH}/GSE62944_06_01_15_TCGA_24_CancerType_Samples.txt"
def __init__(self, gene_subset=None):
self.gexp = pd.read_csv(self.GEXP_FILE, index_col=0, sep="\t")
if gene_subset is not None:
self.gexp = self.gexp[self.gexp.index.isin(gene_subset)]
self.gexp_genes = self.gexp.median(1).sort_values(ascending=False)
self.gexp_genes_std = self.gexp.std(1).sort_values(ascending=False)
self.gexp_genes_skew = pd.Series(
skewtest(self.gexp.T)[0], index=self.gexp.index
)
self.cancer_type = pd.read_csv(
self.CANCER_TYPE_FILE, sep="\t", header=None, index_col=0
)[1]
self.cancer_type = self.cancer_type.append(
pd.Series(
{x: "Normal" for x in self.gexp.columns if x not in self.cancer_type}
)
)
colors = (
sns.color_palette("tab20c").as_hex() + sns.color_palette("tab20b").as_hex()
)
self.cancer_type_palette = dict(
zip(natsorted(self.cancer_type.value_counts().index), colors)
)
class GTEXGexp:
GEXP_FILE = f"{DPATH}/GTEx_Analysis_2017-06-05_v8_RNASeQCv1.1.9_gene_median_tpm.gct"
def __init__(self, gene_subset=None):
self.gexp = pd.read_csv(self.GEXP_FILE, sep="\t")
self.gexp = self.gexp.drop(columns=["Name"]).groupby("Description").median()
if gene_subset is not None:
self.gexp = self.gexp[self.gexp.index.isin(gene_subset)]
self.gexp_genes = np.log10(self.gexp + 1).median(1).sort_values(ascending=False)
self.gexp_genes_std = (
np.log10(self.gexp + 1).std(1).sort_values(ascending=False)
)
self.gexp_genes_skew = pd.Series(
skewtest(np.log10(self.gexp + 1).T)[0], index=self.gexp.index
)
def pc_labels(n):
return [f"PC{i}" for i in np.arange(1, n + 1)]
def dim_reduction(
df,
input_pca_to_tsne=True,
pca_ncomps=50,
tsne_ncomps=2,
perplexity=30.0,
early_exaggeration=12.0,
learning_rate=200.0,
n_iter=1000,
):
# PCA
df_pca = PCA(n_components=pca_ncomps).fit_transform(df.T)
df_pca = pd.DataFrame(df_pca, index=df.T.index, columns=pc_labels(pca_ncomps))
# tSNE
df_tsne = TSNE(
n_components=tsne_ncomps,
perplexity=perplexity,
early_exaggeration=early_exaggeration,
learning_rate=learning_rate,
n_iter=n_iter,
).fit_transform(df_pca if input_pca_to_tsne else df.T)
df_tsne = pd.DataFrame(df_tsne, index=df_pca.index if input_pca_to_tsne else df.T.index, columns=pc_labels(tsne_ncomps))
return df_tsne, df_pca
def plot_dim_reduction(data, palette=None, ctype="tSNE"):
if "tissue" not in data.columns:
data = data.assign(tissue="All")
if palette is None:
palette = dict(All=CrispyPlot.PAL_DBGD[0])
fig, ax = plt.subplots(1, 1, figsize=(4.0, 4.0), dpi=600)
for t, df in data.groupby("tissue"):
ax.scatter(
df["PC1"], df["PC2"], c=palette[t], marker="o", edgecolor="", s=5, label=t, alpha=.8
)
ax.set_xlabel("Dimension 1")
ax.set_ylabel("Dimension 2")
ax.axis("off" if ctype == "tSNE" else "on")
ax.legend(
loc="center left",
bbox_to_anchor=(1, 0.5),
prop={"size": 4},
frameon=False,
title="Tissue",
).get_title().set_fontsize("5")
return ax
if __name__ == "__main__":
# GDSC GExp
#
gdsc = GDSCGexp()
# TCGA imports
#
tcga = TCGAGexp(gene_subset=set(gdsc.gexp.index))
# GTEx gene median expression
#
gtex = GTEXGexp(gene_subset=set(gdsc.gexp.index))
#
#
tcga_thres, gtex_thres = (-2.5, 7.5), (0.5, 2.0)
#
#
pal_genes_lh = {"low": "#fc8d62", "high": "#2b8cbe"}
genes_lh = gdsc.gene_lh(
tcga.gexp_genes, gtex.gexp_genes, tcga_thres=tcga_thres, gtex_thres=gtex_thres
)
genes_lh_df = pd.DataFrame(
[dict(gene=g, gtype=gtype) for gtype in genes_lh for g in genes_lh[gtype]]
)
genes_lh_df.to_csv(f"{DPATH}/GExp_genesets_20191126.csv", index=False)
# TCGA and GTEX gene histograms
#
for n, df in [("TCGA", tcga.gexp_genes), ("GTEX", gtex.gexp_genes)]:
plt.figure(figsize=(2.5, 1.5), dpi=600)
sns.distplot(
df,
hist=False,
kde_kws={"cut": 0, "shade": True},
color=CrispyPlot.PAL_DBGD[0],
)
plt.title(n)
plt.grid(True, ls=":", lw=0.1, alpha=1.0, zorder=0, axis="x")
plt.xlabel(
"Gene median (RPKM voom)" if n == "TCGA" else "Gene median (TPM log10)"
)
plt.savefig(
f"{RPATH}/genes_histogram_{n}.pdf", bbox_inches="tight", transparent=True
)
plt.close("all")
# TCGA dimension reduction
#
tcga_gexp_tsne, tcga_gexp_pca = dim_reduction(tcga.gexp)
for ctype, df in [("tSNE", tcga_gexp_tsne), ("PCA", tcga_gexp_pca)]:
plot_df = pd.concat(
[df, tcga.cancer_type._set_name("tissue")], axis=1, sort=False
).dropna()
ax = plot_dim_reduction(
plot_df, ctype=ctype, palette=tcga.cancer_type_palette
)
ax.set_title(f"{ctype} - TCGA GExp")
plt.savefig(
f"{RPATH}/tcga_gexp_{ctype}.pdf", bbox_inches="tight", transparent=True
)
plt.close("all")
# TCGA and GTEX gene correlation
#
plot_df = pd.concat(
[
tcga.gexp_genes.rename("TCGA_median"),
tcga.gexp_genes_std.rename("TCGA_std"),
tcga.gexp_genes_skew.rename("TCGA_skew"),
gtex.gexp_genes.rename("GTEX_median"),
gtex.gexp_genes_std.rename("GTEX_std"),
gtex.gexp_genes_skew.rename("GTEX_skew"),
],
axis=1,
sort=False,
).dropna()
for xx, yy in [("TCGA_median", "GTEX_median")]:
g = sns.JointGrid(x=xx, y=yy, data=plot_df, space=0)
g = g.plot_joint(
plt.hexbin, cmap="Spectral_r", gridsize=100, mincnt=1, bins="log", lw=0
)
g.ax_joint.grid(True, ls=":", lw=0.1, alpha=1.0, zorder=0)
x_lim, y_lim = g.ax_joint.get_xlim(), g.ax_joint.get_ylim()
for i, n in enumerate(["low", "high"]):
x, y = tcga_thres[i], gtex_thres[i]
w, h = x_lim[i] - x, y_lim[i] - y
rect = patches.Rectangle(
(x, y),
w,
h,
linewidth=0,
facecolor=CrispyPlot.PAL_DBGD[0],
alpha=0.25,
zorder=0,
)
g.ax_joint.annotate(
f"N={len(genes_lh[n])}",
(x + w / 2, y + h / 2),
color="k",
weight="bold",
fontsize=6,
ha="center",
va="center",
)
g.ax_joint.add_patch(rect)
g.ax_joint.set_ylim(y_lim)
g.ax_joint.set_xlim(x_lim)
g = g.plot_marginals(
sns.distplot,
kde=False,
color=CrispyPlot.PAL_DBGD[0],
hist_kws={"linewidth": 0},
)
g.set_axis_labels(
f"{xx.replace('_', ' ')} (voom)", f"{yy.replace('_', ' ')} (TPM log10)"
)
plt.gcf().set_size_inches(2.5, 2.5)
plt.savefig(
f"{RPATH}/genes_TCGA_GTEX_corrplot_{xx.split('_')[1]}.pdf",
bbox_inches="tight",
transparent=True,
)
plt.close("all")
# Mean vs Std
#
for n, (x, y) in [
("TCGA", ("TCGA_median", "TCGA_std")),
("GTEX", ("GTEX_median", "GTEX_std")),
]:
g = sns.JointGrid(x=x, y=y, data=plot_df, space=0)
g = g.plot_joint(
plt.hexbin, cmap="Spectral_r", gridsize=100, mincnt=1, bins="log", lw=0
)
for s in ["low", "high"]:
g.ax_joint.scatter(
plot_df.loc[genes_lh[s], x],
plot_df.loc[genes_lh[s], y],
c=pal_genes_lh[s],
marker="o",
edgecolor="white",
linewidth=0.1,
s=3,
alpha=1.0,
label=s,
)
g.ax_joint.legend(frameon=False)
g.ax_joint.grid(True, ls=":", lw=0.1, alpha=1.0, zorder=0)
g = g.plot_marginals(
sns.distplot,
kde=False,
color=CrispyPlot.PAL_DBGD[0],
hist_kws={"linewidth": 0},
)
x_label = (
"TCGA gene median (voom)" if n == "TCGA" else "GTEX gene median (TPM log10)"
)
y_label = "TCGA gene std (voom)" if n == "TCGA" else "GTEX gene std (TPM log10)"
g.set_axis_labels(x_label, y_label)
plt.gcf().set_size_inches(2.5, 2.5)
plt.savefig(
f"{RPATH}/bgexp/genes_std_{n}.pdf", bbox_inches="tight", transparent=True
)
plt.close("all")
# Genes distribution in GDSC
#
plt.figure(figsize=(2.5, 1.5), dpi=600)
for s in genes_lh:
sns.distplot(
gdsc.gexp.loc[genes_lh[s]].median(1),
hist=False,
label=s,
kde_kws={"cut": 0, "shade": True},
color=pal_genes_lh[s],
)
plt.grid(True, ls=":", lw=0.1, alpha=1.0, zorder=0, axis="x")
plt.xlabel("GDSC gene median (voom)")
plt.legend(frameon=False, prop={"size": 5})
plt.savefig(
f"{RPATH}/genes_lh_gdsc_histograms.pdf", bbox_inches="tight", transparent=True
)
plt.close("all")
# Discretise gene-expression (calculate)
#
gexp = GExp(genesets=genes_lh)
for s in gdsc.gexp:
sample = gdsc.gexp[s]
LOG.info(f"Sample={s}")
sample_disc = gexp.discretise(sample, max_fpr=0.1)
LOG.info(sample_disc[gexp.genesets].sum())
# Export report
sample_disc.to_csv(f"{RPATH}/bgexp/bgexp_{s}.csv")
# GDSC discretised gene-expression (import)
#
gdsc_disc = pd.concat(
[
pd.read_csv(f"{RPATH}/bgexp/bgexp_{s}.csv", index_col=0)[
["high", "low"]
].add_suffix(f"_{s}")
for s in gdsc.gexp
],
axis=1,
)
# GDSC discretised log-ratio
#
gdsc_lr = pd.DataFrame(
{
s: pd.read_csv(f"{RPATH}/bgexp/bgexp_{s}.csv", index_col=0)["lr_mean"]
for s in gdsc.gexp
}
)
# Assemble discretised table
#
def sample_melt_df(sample):
df = | pd.read_csv(f"{RPATH}/bgexp/bgexp_{sample}.csv", index_col=0) | pandas.read_csv |
# Copyright (c) 2013, GreyCube Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
import pandas
from operator import itemgetter
def execute(filters=None):
return get_columns(filters), get_data(filters)
def get_columns(filters):
if filters.get("show_summary"):
return [
dict(
label="Salesman",
fieldname="sales_partner",
width=160,
),
dict(
label="Commission",
fieldname="commission_amount",
fieldtype="Currency",
width=110,
),
]
else:
return [
dict(
label="Salesman",
fieldname="sales_partner",
width=160,
),
dict(
label="Item Code",
fieldname="item_code",
width=160,
),
dict(
label="Item Name",
fieldname="item_name",
width=160,
),
dict(
label="Qty",
fieldname="qty",
fieldtype="Float",
width=90,
),
dict(
label="Sales UOM",
fieldname="sales_uom",
fieldtype="Data",
width=90,
),
dict(
label="Sales Amount",
fieldname="sales_amount",
fieldtype="Currency",
width=110,
),
dict(
label="Customer Group",
fieldname="customer_group",
width=110,
),
dict(
label="Percentage",
fieldname="commission_percent",
fieldtype="Float",
width=90,
),
dict(
label="Commission",
fieldname="commission_amount",
fieldtype="Currency",
width=110,
),
]
def get_conditions(filters):
where_clause = ["si.docstatus = 1"]
if filters.get("from_date"):
where_clause.append("si.posting_date >= %(from_date)s")
if filters.get("to_date"):
where_clause.append("si.posting_date <= %(to_date)s")
if filters.get("salesman"):
where_clause.append("si.sales_partner = %(salesman)s")
return " where " + " and ".join(where_clause) if where_clause else ""
def get_data(filters):
data = frappe.db.sql(
"""
with fn_comm as
(
select icgc.customer_group, icgc.item item_code, sd.from,
sd.to, sd.commission_percent, icgc.default_sales_uom
from `tabItem Customer Group Commission` icgc
inner join `tabItem Customer Group Commission Slab Detail` sd on sd.parent = icgc.name
),
fn_sales as
(
select si.sales_partner, cus.customer_group, sit.item_code, sit.item_name,
sum(sit.base_net_amount) sales_amount,
sum(round(sit.qty * sale_ucd.conversion_factor/default_sales_ucd.conversion_factor,2)) qty,
it.sales_uom
from `tabSales Invoice` si
inner join `tabSales Invoice Item` sit on sit.parent = si.name
inner join tabItem it on it.item_code = sit.item_code
inner join `tabUOM Conversion Detail` sale_ucd on sale_ucd.parent = sit.item_code and sale_ucd.uom = sit.uom
inner join `tabUOM Conversion Detail` default_sales_ucd on default_sales_ucd.parent = sit.item_code and default_sales_ucd.uom = it.sales_uom
inner join tabCustomer cus on cus.name = si.customer
{where_conditions}
group by si.sales_partner, cus.customer_group, sit.item_code, sit.item_name
)
select fn_sales.sales_partner, fn_sales.customer_group, fn_sales.item_code, fn_sales.item_name, fn_sales.sales_uom,
sum(fn_sales.qty) qty, sum(fn_sales.sales_amount) sales_amount,
coalesce(fn_comm.commission_percent,0) commission_percent,
round(sum(coalesce(fn_comm.commission_percent,0) * fn_sales.sales_amount * .01),2) commission_amount
from fn_sales
left outer join fn_comm on fn_comm.customer_group = fn_sales.customer_group
and fn_comm.item_code = fn_sales.item_code
and fn_sales.qty BETWEEN fn_comm.from and fn_comm.to
group by fn_sales.sales_partner, fn_sales.item_code, fn_sales.item_name,
fn_sales.sales_uom, fn_comm.commission_percent, fn_sales.customer_group
""".format(
where_conditions=get_conditions(filters)
),
filters,
as_dict=True,
# debug=True,
)
if filters.get("show_summary"):
df = | pandas.DataFrame.from_records(data) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
| tm.assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
"""
"Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
"""
import os
from timeit import default_timer as timer
from datetime import datetime
from functools import reduce
import pandas as pd
import src.common as common
import src.config.constants as constants
import src.munging as process_data
import src.modeling as model
from sklearn.model_selection import KFold
from sklearn.calibration import CalibratedClassifierCV
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import LogisticRegression
common.set_timezone()
start = timer()
# Create RUN_ID
RUN_ID = datetime.now().strftime("%m%d_%H%M")
MODEL_NAME = os.path.basename(__file__).split(".")[0]
SEED = 42
EXP_DETAILS = "Stacking: LGB, XGB, Cat with and without imputation (old & new LGBs),tsne,logistic"
IS_TEST = False
PLOT_FEATURE_IMPORTANCE = False
TARGET = "claim"
MODEL_TYPE = "Ranking"
LOGGER_NAME = "ranking"
logger = common.get_logger(LOGGER_NAME, MODEL_NAME, RUN_ID, constants.LOG_DIR)
common.set_seed(SEED)
logger.info(f"Running for Model Number [{MODEL_NAME}] & [{RUN_ID}]")
common.update_tracking(RUN_ID, "model_number", MODEL_NAME, drop_incomplete_rows=True)
common.update_tracking(RUN_ID, "model_type", MODEL_TYPE)
common.update_tracking(RUN_ID, "metric", "roc_auc")
train_df, test_df, sample_submission_df = process_data.read_processed_data(
logger, constants.PROCESSED_DATA_DIR, train=True, test=True, sample_submission=True
)
# Read different submission files and merge them to create dataset
# for level 2
sub_1_predition_name = (
"sub_lgb_K5_nonull_mean_sum_max_no_imp_no_scaler_params_K_0924_1159_0.81605.gz"
)
sub_1_oof_name = (
"oof_lgb_K5_nonull_mean_sum_max_no_imp_no_scaler_params_K_0924_1159_0.81605.csv"
)
sub_1_test_pred = pd.read_csv(f"{constants.SUBMISSION_DIR}/{sub_1_predition_name}")
sub_1_oof_pred = | pd.read_csv(f"{constants.OOF_DIR}/{sub_1_oof_name}") | pandas.read_csv |
import datetime
import json
import numpy as np
import requests
import pandas as pd
import streamlit as st
from copy import deepcopy
from fake_useragent import UserAgent
import webbrowser
from footer_utils import image, link, layout, footer
service_input = st.selectbox('Select Service',["","CoWin Vaccine Slot","Oxygen","Beds","Ambulance","Medicines","Miscellaneous","Important Links"])
if service_input =="CoWin Vaccine Slot":
temp_user_agent = UserAgent()
browser_header = {'User-Agent': temp_user_agent.random}
st.title("Vacciation Slot Availability")
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def import_dataset():
df = pd.read_csv("Combined_List.csv")
return df
def district_mapping(state_inp,df):
return list(df[df['State_Name']==state_inp]['District_Name'])
def column_mapping(df,col,value):
df_temp = deepcopy(df.loc[df[col] == value, :])
return df_temp
def availability_check(df,col,value):
df_temp2 = deepcopy(df.loc[df[col]>value, :])
return df_temp2
@st.cache(allow_output_mutation=True)
def Pageviews():
return []
mapping_df= import_dataset()
state_name = list((mapping_df['State_Name'].sort_values().unique()))
district_name = list((mapping_df['District_Name'].sort_values().unique()))
age = [18,45]
date_input = st.sidebar.slider('Select Date Range', min_value=0, max_value=50)
state_input = st.sidebar.selectbox('Select State',state_name)
district_input = st.sidebar.selectbox('Select District',district_mapping(state_input,mapping_df))
age_input = st.sidebar.selectbox('Select Minimum Age',[""]+age)
fee_input = st.sidebar.selectbox('Select Free or Paid',[""]+['Free','Paid'])
vaccine_input = st.sidebar.selectbox("Select Vaccine",[""]+['COVISHIELD','COVAXIN'])
available_input = st.sidebar.selectbox("Select Availability",[""]+['Available'])
col_rename = {
'date': 'Date',
'min_age_limit': 'Minimum Age Limit',
'available_capacity': 'Available Capacity',
'vaccine': 'Vaccine',
'pincode': 'Pincode',
'name': 'Hospital Name',
'state_name' : 'State',
'district_name' : 'District',
'block_name': 'Block Name',
'fee_type' : 'Fees'
}
DIST_ID = mapping_df[mapping_df['District_Name']==district_input]['District_ID'].values[0]
base_date = datetime.datetime.today()
date_list = [base_date+ datetime.timedelta(days = x) for x in range(date_input+1)]
date_string = [i.strftime('%d-%m-%y') for i in date_list]
final_df =None
for INP_DATE in date_string:
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={}&date={}".format(DIST_ID, INP_DATE)
data = requests.get(URL,headers = browser_header)
if (data.ok) and ('centers' in data.json()):
data_json = data.json()['centers']
if data_json is not None:
data_df = pd.DataFrame(data_json)
if len(data_df):
data_df = data_df.explode('sessions')
data_df['date']= data_df.sessions.apply(lambda x: x['date'])
data_df['available_capacity']= data_df.sessions.apply(lambda x: x['available_capacity'])
data_df['min_age_limit']= data_df.sessions.apply(lambda x: x['min_age_limit'])
data_df['vaccine']= data_df.sessions.apply(lambda x: x['vaccine'])
data_df = data_df[["date","state_name", "district_name", "name", "block_name", "pincode", "available_capacity", "min_age_limit", "vaccine", "fee_type"]]
if final_df is not None:
final_df = pd.concat([final_df,data_df])
else:
final_df = deepcopy(data_df)
else:
st.error('Nothing extracted from the API')
else:
st.write("As of now Center has restricted real-time data sharing through API's, you can only see the real-time data once the restrictions are removed.")
st.markdown("[Read more about this here->](https://government.economictimes.indiatimes.com/news/governance/centre-restricts-real-time-data-sharing-for-blocking-vaccination-slots-on-cowin-portal/82458404)")
st.write('\n\n')
st.write('Nevertheless, You can search and find other Services available on this site.:wink:')
st.write('\n\n')
if (final_df is not None) and (len(final_df)):
final_df.drop_duplicates(inplace=True)
final_df.rename(columns = col_rename,inplace=True)
if age_input != "":
final_df = column_mapping(final_df,'Minimum Age Limit',age_input)
if fee_input != "":
final_df = column_mapping(final_df,'Fees',fee_input)
if vaccine_input != "":
final_df = column_mapping(final_df,'Vaccine',vaccine_input)
if available_input != "":
final_df = availability_check(final_df,'Available Capacity',0)
pincodes = list(np.unique(final_df["Pincode"].values))
pincode_inp = st.sidebar.selectbox('Select Pincode', [""] + pincodes)
if pincode_inp != "":
final_df = column_mapping(final_df, "Pincode", pincode_inp)
final_df['Date'] = pd.to_datetime(final_df['Date'],dayfirst=True)
final_df = final_df.sort_values(by='Date')
final_df['Date'] = final_df['Date'].apply(lambda x:x.strftime('%d-%m-%y'))
table = deepcopy(final_df)
table.reset_index(inplace=True, drop=True)
st.table(table)
else:
if st.button('Book Your Slot'):
js = "window.open('https://www.cowin.gov.in/home')" # New tab or window
#js = "window.location.href = 'https://www.streamlit.io/'" # Current tab
html = '<img src onerror="{}">'.format(js)
div = Div(text=html)
st.bokeh_chart(div)
st.write('\n\n')
st.write('\n\n')
st.write('\n\n\n\n')
pageviews=Pageviews()
pageviews.append('dummy')
pg_views = len(pageviews)
footer(pg_views)
elif service_input=="Oxygen":
st.write(':point_left:')
st.text('Filter services by State and City')
st.write('\n\n')
st.title("Oxygen Availability")
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def import_dataset():
df = pd.read_csv("oxy_final.csv")
return df
def city_mapping(state,df):
return list(np.unique(list(df[df['State']==state]['City'])))
def column_mapping(df,col,value):
df_temp = deepcopy(df.loc[df[col] == value, :])
return df_temp
@st.cache(allow_output_mutation=True)
def Pageviews():
return []
oxy_df = import_dataset()
valid_states = list(np.unique(oxy_df.State))
state_input = st.sidebar.selectbox('Select State',valid_states)
city_input = st.sidebar.selectbox('Select City',city_mapping(state_input,oxy_df))
st.sidebar.text("More States and Cities will be\navailable in the future")
final_df = column_mapping(oxy_df,'State',state_input)
final_df = column_mapping(oxy_df,'City',city_input)
table = deepcopy(final_df)
table.reset_index(inplace=True, drop=True)
st.table(table)
st.subheader('Chaos is a part of evolution!:muscle:')
st.write('\n\n\n\n\n\n\n\n')
pageviews=Pageviews()
pageviews.append('dummy')
pg_views = len(pageviews)
footer(pg_views)
elif service_input=="Beds":
st.write(':point_left:')
st.text('Filter services by State and City')
st.write('\n\n')
st.title("Beds Availability")
@st.cache(allow_output_mutation=True, suppress_st_warning=True)
def import_dataset():
df = | pd.read_csv("beds_final.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list('cab'))})
tm.assert_frame_equal(result, expected)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'cab'))}).set_index('B')
result = pd.concat([df2, df2])
expected = DataFrame({'A': pd.concat([a, a]),
'B': pd.concat([b, b]).astype(
'category', categories=list(
'cab'))}).set_index('B')
tm.assert_frame_equal(result, expected)
# wrong catgories
df3 = DataFrame({'A': a,
'B': b.astype('category', categories=list(
'abc'))}).set_index('B')
self.assertRaises(TypeError, lambda: pd.concat([df2, df3]))
def test_append(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = df.append(df)
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
df.append(df_wrong_categories)
self.assertRaises(ValueError, f)
def test_merge(self):
# GH 9426
right = DataFrame({'c': {0: 'a',
1: 'b',
2: 'c',
3: 'd',
4: 'e'},
'd': {0: 'null',
1: 'null',
2: 'null',
3: 'null',
4: 'null'}})
left = DataFrame({'a': {0: 'f',
1: 'f',
2: 'f',
3: 'f',
4: 'f'},
'b': {0: 'g',
1: 'g',
2: 'g',
3: 'g',
4: 'g'}})
df = pd.merge(left, right, how='left', left_on='b', right_on='c')
# object-object
expected = df.copy()
# object-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
result = pd.merge(left, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright['d'] = cright['d'].astype('category')
cleft = left.copy()
cleft['b'] = cleft['b'].astype('category')
result = pd.merge(cleft, cright, how='left', left_on='b', right_on='c')
tm.assert_frame_equal(result, expected)
def test_repeat(self):
# GH10183
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
exp = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"])
res = cat.repeat(2)
self.assert_categorical_equal(res, exp)
def test_na_actions(self):
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = pd.DataFrame({"cats": cat2, "vals": vals2})
cat3 = pd.Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = pd.DataFrame({"cats": cat3, "vals": vals3})
cat4 = pd.Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = pd.DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
def f():
df.fillna(value={"cats": 4, "vals": "c"})
self.assertRaises(ValueError, f)
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes both missing values and NA categories
# into account
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
df = pd.DataFrame({"cats": c, "vals": [1, 2, 3]})
df_exp = pd.DataFrame({"cats": Categorical(["a", "b", "a"]),
"vals": [1, 2, 3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_astype_to_other(self):
s = self.cat['value_group']
expected = s
tm.assert_series_equal(s.astype('category'), expected)
tm.assert_series_equal(s.astype(com.CategoricalDtype()), expected)
self.assertRaises(ValueError, lambda: s.astype('float64'))
cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_series_equal(cat.astype('str'), exp)
s2 = Series(Categorical.from_array(['1', '2', '3', '4']))
exp2 = Series([1, 2, 3, 4]).astype(int)
tm.assert_series_equal(s2.astype('int'), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(
np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(s.values), name='value_group')
cmp(s.astype('object'), expected)
cmp(s.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(s), np.array(s.values))
# valid conversion
for valid in [lambda x: x.astype('category'),
lambda x: x.astype(com.CategoricalDtype()),
lambda x: x.astype('object').astype('category'),
lambda x: x.astype('object').astype(
com.CategoricalDtype())
]:
result = valid(s)
tm.assert_series_equal(result, s)
# invalid conversion (these are NOT a dtype)
for invalid in [lambda x: x.astype(pd.Categorical),
lambda x: x.astype('object').astype(pd.Categorical)]:
self.assertRaises(TypeError, lambda: invalid(s))
def test_astype_categorical(self):
cat = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
tm.assert_categorical_equal(cat, cat.astype('category'))
tm.assert_almost_equal(np.array(cat), cat.astype('object'))
self.assertRaises(ValueError, lambda: cat.astype(float))
def test_to_records(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array([(0, 'a'), (1, 'b'), (2, 'c')],
dtype=[('index', '=i8'), ('0', 'O')])
tm.assert_almost_equal(result, expected)
def test_numeric_like_ops(self):
# numeric ops should not succeed
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
self.assertRaises(TypeError,
lambda: getattr(self.cat, op)(self.cat))
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = self.cat['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
self.assertRaises(TypeError,
lambda: getattr(s, op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = pd.Series(pd.Categorical([1, 2, 3, 4]))
self.assertRaises(TypeError, lambda: np.sum(s))
# numeric ops on a Series
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
self.assertRaises(TypeError, lambda: getattr(s, op)(2))
# invalid ufunc
self.assertRaises(TypeError, lambda: np.log(s))
def test_cat_tab_completition(self):
# test the tab completion display
ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories',
'add_categories', 'remove_categories',
'rename_categories', 'reorder_categories',
'remove_unused_categories', 'as_ordered', 'as_unordered']
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith('_')]
return list(sorted(set(results)))
s = Series(list('aabbcde')).astype('category')
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.categorical import CategoricalAccessor
self.assertIs(Series.cat, CategoricalAccessor)
s = Series(list('aabbcde')).astype('category')
self.assertIsInstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with tm.assertRaisesRegexp(AttributeError, "only use .cat accessor"):
invalid.cat
self.assertFalse(hasattr(invalid, 'cat'))
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
c = Series(list('aabbcde')).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_str_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.core.strings import StringMethods
s = Series(list('aabb'))
s = s + " " + s
c = s.astype('category')
self.assertIsInstance(c.str, StringMethods)
# str functions, which need special arguments
special_func_defs = [
('cat', (list("zyxw"),), {"sep": ","}),
('center', (10,), {}),
('contains', ("a",), {}),
('count', ("a",), {}),
('decode', ("UTF-8",), {}),
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
('ljust', (10,), {}),
('match', ("a"), {}), # deprecated...
('normalize', ("NFC",), {}),
('pad', (10,), {}),
('partition', (" ",), {"expand": False}), # not default
('partition', (" ",), {"expand": True}), # default
('repeat', (3,), {}),
('replace', ("a", "z"), {}),
('rfind', ("a",), {}),
('rindex', (" ",), {}),
('rjust', (10,), {}),
('rpartition', (" ",), {"expand": False}), # not default
('rpartition', (" ",), {"expand": True}), # default
('slice', (0, 1), {}),
('slice_replace', (0, 1, "z"), {}),
('split', (" ",), {"expand": False}), # default
('split', (" ",), {"expand": True}), # not default
('startswith', ("a",), {}),
('wrap', (2,), {}),
('zfill', (10,), {})
]
_special_func_names = [f[0] for f in special_func_defs]
# * get, join: they need a individual elements of type lists, but
# we can't make a categorical with lists as individual categories.
# -> `s.str.split(" ").astype("category")` will error!
# * `translate` has different interfaces for py2 vs. py3
_ignore_names = ["get", "join", "translate"]
str_func_names = [f
for f in dir(s.str)
if not (f.startswith("_") or f in _special_func_names
or f in _ignore_names)]
func_defs = [(f, (), {}) for f in str_func_names]
func_defs.extend(special_func_defs)
for func, args, kwargs in func_defs:
res = getattr(c.str, func)(*args, **kwargs)
exp = getattr(s.str, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
else:
tm.assert_series_equal(res, exp)
invalid = Series([1, 2, 3]).astype('category')
with tm.assertRaisesRegexp(AttributeError,
"Can only use .str accessor with string"):
invalid.str
self.assertFalse(hasattr(invalid, 'str'))
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pydata/pandas/issues/10661
from pandas.tseries.common import Properties
from pandas.tseries.index import date_range, DatetimeIndex
from pandas.tseries.period import period_range, PeriodIndex
from pandas.tseries.tdi import timedelta_range, TimedeltaIndex
s_dr = Series(date_range('1/1/2015', periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range('1/1/2015', freq='D', periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range('1 days', '10 days'))
c_tdr = s_tdr.astype("category")
test_data = [
("Datetime", DatetimeIndex._datetimelike_ops, s_dr, c_dr),
("Period", PeriodIndex._datetimelike_ops, s_pr, c_pr),
("Timedelta", TimedeltaIndex._datetimelike_ops, s_tdr, c_tdr)]
self.assertIsInstance(c_dr.dt, Properties)
special_func_defs = [
('strftime', ("%Y-%m-%d",), {}),
('tz_convert', ("EST",), {}),
('round', ("D",), {}),
('floor', ("D",), {}),
('ceil', ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ['tz_localize']
for name, attr_names, s, c in test_data:
func_names = [f
for f in dir(s.dt)
if not (f.startswith("_") or f in attr_names or f in
_special_func_names or f in _ignore_names)]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_numpy_array_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, pd.DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, pd.Series):
| tm.assert_series_equal(res, exp) | pandas.util.testing.assert_series_equal |
# %% [markdown]
# # FOI-based hospital/ICU beds data analysis
import pandas
import altair
altair.data_transformers.disable_max_rows()
# %% [markdown]
# ## BHSCT FOI data
#
# * weekly totals, beds data is summed (i.e. bed days)
bhsct_beds = pandas.read_excel('../data/BHSCT/10-11330 Available_Occupied Beds & ED Atts 2010 - 2020.xlsx', engine='openpyxl', header=[9,10,11], index_col=0, sheet_name='BEDS')
bhsct_beds = bhsct_beds.stack([0,2]).reset_index()
bhsct_beds.rename(columns={'level_0':'Dates','level_1':'Hospital','Dates':'Care'},inplace=True)
bhsct_beds['start'] = pandas.to_datetime(bhsct_beds['Dates'].str.split(' - ', expand=True)[0], format='%d/%m/%Y')
bhsct_beds = bhsct_beds.groupby(['start','Care','Hospital'])['Available', 'Occupied'].sum().reset_index()
bhsct_beds = bhsct_beds.melt(id_vars=['start','Care','Hospital'])
bhsct_beds['col'] = bhsct_beds['Care'] + '-' + bhsct_beds['variable']
bhsct_beds = bhsct_beds.pivot(index=['start','Hospital'], columns='col', values='value')
bhsct_beds.rename(columns={'ICU/Critical Care-Available': 'Critical Care Available', 'NON ICU/Critical Care-Available': 'General Available', 'ICU/Critical Care-Occupied': 'Critical Care Occupied', 'NON ICU/Critical Care-Occupied': 'General Occupied'}, inplace=True)
bhsct_ae = pandas.read_excel('../data/BHSCT/10-11330 Available_Occupied Beds & ED Atts 2010 - 2020.xlsx', engine='openpyxl', header=6, sheet_name='AE')
bhsct_ae['start'] = pandas.to_datetime(bhsct_ae['Dates'].str.split(' - ', expand=True)[0], format='%d/%m/%Y')
bhsct_ae.drop(columns=['Dates'],inplace=True)
bhsct_ae = bhsct_ae.melt(id_vars=['start']).groupby(['start','variable'])['value'].sum().reset_index()
bhsct_ae.rename(columns={'variable': 'Hospital', 'value': 'ED Attendances'}, inplace=True)
bhsct_ae.set_index(['start', 'Hospital'], inplace=True)
bhsct_weekly = bhsct_beds.merge(bhsct_ae, how='left', left_index=True, right_index=True)
bhsct_weekly.fillna(0, inplace=True)
bhsct_weekly = bhsct_weekly.astype(int)
bhsct_weekly = bhsct_weekly.reset_index().replace({
'MIH': 'Mater Infirmorum Hospital',
'RBHSC': 'Royal Belfast Hospital for Sick Children',
'RVH': 'Royal Victoria Hospital',
'BCH': 'Belfast City Hospital',
'MPH': 'Musgrave Park Hospital'
}).set_index(['start','Hospital'])
# %% [markdown]
# ## NHSCT FOI data
#
# * daily data
nhsct_ae = pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=6, sheet_name='ED Attendances')
nhsct_ae.dropna(axis='columns', how='all', inplace=True)
nhsct_ae.dropna(axis='index', subset=['Arrival Date'], inplace=True)
nhsct_ae['date'] = pandas.to_datetime(nhsct_ae['Arrival Date'], format='%Y-%m-%d')
nhsct_ae.rename(columns={'Site': 'Hospital'}, inplace=True)
nhsct_ae_daily = nhsct_ae.groupby(['date','Hospital'])['Attendances'].sum()
nhsct_ae_daily.name = 'ED Attendances'
nhsct_icu = pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=5, sheet_name='ICU Wards')
nhsct_icu['date'] = pandas.to_datetime(nhsct_icu['DATE'], format='%Y-%m-%d')
nhsct_icu.rename(columns={'HOSPITAL': 'Hospital'}, inplace=True)
nhsct_icu_daily = nhsct_icu.groupby(['date','Hospital'])['AVAILABLE BEDS','OCCUPIED BEDS'].sum()
nhsct_icu_daily.rename(columns={'AVAILABLE BEDS': 'Critical Care Available', 'OCCUPIED BEDS': 'Critical Care Occupied'}, inplace=True)
nhsct_daily = nhsct_icu_daily.merge(nhsct_ae_daily, how='left', left_index=True, right_index=True)
nhsct_nonicu = pandas.read_excel('../data/NHSCT/20210208_PB080121_Response_Attachment_IJ.xlsx', engine='openpyxl', header=6, sheet_name='Non ICU Wards')
nhsct_nonicu['date'] = pandas.to_datetime(nhsct_nonicu['DATE'], format='%Y-%m-%d')
nhsct_nonicu.rename(columns={'HOSPITAL': 'Hospital'}, inplace=True)
nhsct_nonicu_daily = nhsct_nonicu.groupby(['date','Hospital'])['AVAILABLE BEDS','OCCUPIED BEDS'].sum()
nhsct_nonicu_daily.rename(columns={'AVAILABLE BEDS': 'General Available', 'OCCUPIED BEDS': 'General Occupied'}, inplace=True)
nhsct_daily = nhsct_daily.merge(nhsct_nonicu_daily, how='left', left_index=True, right_index=True)
nhsct_daily = nhsct_daily.astype(int)
nhsct_daily.reset_index(inplace=True)
nhsct_daily['start'] = nhsct_daily['date'] - pandas.to_timedelta((nhsct_daily['date'].dt.dayofweek+3)%7, unit='d')
nhsct_weekly = nhsct_daily.groupby(['start','Hospital']).sum()
nhsct_weekly = nhsct_weekly.reset_index().replace({'ANTRIM':'Antrim Area Hospital','CAUSEWAY':'Causeway Hospital'}).set_index(['start', 'Hospital'])
# %% [markdown]
# ## SEHSCT FOI data
#
# * weekly data, beds data is summed (i.e. bed days)
sehsct_beds = pandas.read_excel('../data/SEHSCT/Attachment 1 - Occupied & Available Beds and A&E Attendances.xlsx', engine='openpyxl', header=[9,10,11], sheet_name='Beds')
sehsct_beds.dropna(axis='columns', how='all', inplace=True)
sehsct_beds[sehsct_beds.columns[0]] = sehsct_beds[sehsct_beds.columns[0]].replace(to_replace ='27/03/2020 - 31/03/20', value = '2020-03-27 00:00:00')
sehsct_beds['start'] = pandas.to_datetime(sehsct_beds[sehsct_beds.columns[0]], format='%Y-%m-%d 00:00:00')
sehsct_beds.drop(columns=sehsct_beds.columns[0], inplace=True)
sehsct_beds = sehsct_beds.melt(id_vars=[('start','','')])
sehsct_beds.rename(columns={('start','',''): 'start', 'variable_0': 'hospital', 'variable_1': 'state', 'variable_2': 'ward'}, inplace=True)
sehsct_beds['col'] = sehsct_beds['ward'] + '-' + sehsct_beds['state']
sehsct_beds = sehsct_beds.pivot(index=['start', 'hospital'], columns='col', values='value').reset_index(1)
sehsct_beds.rename(columns={'ICU/Critical Care-Available': 'Critical Care Available', 'Non Critical Care-Available': 'General Available', 'ICU/Critical Care-Occupied': 'Critical Care Occupied', 'Non Critical Care-Occupied': 'General Occupied'}, inplace=True)
sehsct_beds.fillna(0, inplace=True)
sehsct_beds.rename(columns={'hospital': 'Hospital'}, inplace=True)
sehsct_weekly = sehsct_beds.groupby(['start','Hospital']).sum()
sehsct_ae = pandas.read_excel('../data/SEHSCT/Attachment 1 - Occupied & Available Beds and A&E Attendances.xlsx', engine='openpyxl', header=7, sheet_name='ED')
sehsct_ae['Week'] = sehsct_ae['Week'].replace(to_replace ='27/03/2020 - 31/03/20', value = '2020-03-27 00:00:00')
sehsct_ae['start'] = pandas.to_datetime(sehsct_ae['Week'], format='%Y-%m-%d 00:00:00')
sehsct_ae.drop(columns='Week', inplace=True)
sehsct_ae = sehsct_ae.melt(id_vars='start', var_name='Hospital').set_index(['start','Hospital'])
sehsct_ae['value'] = sehsct_ae['value'].fillna('0').replace(' ', '0').astype('int')
sehsct_ae = sehsct_ae.groupby(['start','Hospital'])['value'].sum()
sehsct_ae.name = 'ED Attendances'
sehsct_weekly = sehsct_weekly.merge(sehsct_ae, how='left', left_index=True, right_index=True)
sehsct_weekly.fillna(0, inplace=True)
sehsct_weekly = sehsct_weekly.astype(int)
sehsct_weekly = sehsct_weekly.reset_index().replace({
'Ards': 'Ards Hospital',
'Bangor': 'Bangor Hospital',
'Downe': 'Downe Hospital',
'Lagan Valley': 'Lagan Valley Hospital',
'Ulster': 'Ulster Hospital'
}).set_index(['start', 'Hospital'])
# %% [markdown]
# ## SHSCT FOI data
#
# * daily data
shsct_ae = pandas.read_excel('../data/SHSCT/FOI 350 EC MIU ATTENDANCES.xlsx', engine='openpyxl', header=10, sheet_name='DATA')
shsct_ae['date'] = | pandas.to_datetime(shsct_ae['Arrival Date'], format='%Y-%m-%d') | pandas.to_datetime |
import numpy as np
import pandas as pd
import datetime
import random as r
def randate():
start_date = datetime.date(2020, 1, 1)
end_date = datetime.date(2021, 2, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = r.randrange(days_between_dates)
random_date = start_date + datetime.timedelta(days=random_number_of_days)
return random_date
donor = | pd.read_csv("donors.csv") | pandas.read_csv |
import logging
from urllib.request import urlopen
import zipfile
import os.path
import io
import pandas as pd
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def data_url():
return "file:///home/orest/PycharmProjects/hdx/hdx-ecb-reference-fx/eurofxref-hist.zip"
return "https://www.ecb.europa.eu/stats/eurofxref/eurofxref-hist.zip"
def raw_data(url=None):
url = data_url() if url is None else url
return urlopen(url).read()
def csv_content(url=None):
"Fetch the raw csv data"
zipdata = raw_data(url)
zf = zipfile.ZipFile(io.BytesIO(zipdata),"r")
name = [n for n in zf.namelist() if os.path.splitext(n)[1].lower()==".csv"][0]
return zf.read(name)
def df_content(url=None,add_base_currency=False,base_currency="EUR"):
"Fetch the data as a dataframe"
return pd.read_csv(io.BytesIO(csv_content(url)))
def add_base_currency(df,base_currency="EUR"):
df.loc[:,base_currency]=1.0
return df
def convert_currency(df, to_currency="USD", base_currency="EUR"):
currency_columns = [c for c in df.columns if c.lower()!="date"]
scale = (df.loc[:,base_currency]/df.loc[:,to_currency]).values.copy()
for c in currency_columns:
df.loc[:,c]*=scale
return df
def add_hxl_tags(df):
hxl = [("#date" if c.lower()=="date" else "#value +"+str(c))for c in df.columns]
hxl_df = | pd.DataFrame([hxl],columns=df.columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright © 2021 by <NAME>. All rights reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Set of operations to transform pandas DataFrames given user specified config.
These operations are ordered according to `ndj_pipeline.model.run_model_training`.
"""
import logging
from pathlib import Path
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split as tts
from ndj_pipeline import utils
def load_data_and_key(model_config: Dict[str, Any]) -> pd.DataFrame:
"""Uses config to load data and assign key.
Args:
model_config: Loaded model experiment config, specifically for
data path and index column(s)
Returns:
Pandas dataframe with optionally assigned index
"""
input_path = Path(*model_config["data_file"])
logging.info(f"Loading parquet from {input_path}")
data = pd.read_parquet(input_path)
unique_key = model_config.get("unique_key")
if unique_key:
data = data.set_index(unique_key)
if not data.index.is_unique:
raise ValueError(f"Config specified key not unique {unique_key}")
return data
def apply_filtering(df: pd.DataFrame, model_config: Dict[str, Any]) -> pd.DataFrame:
"""Filters dataframe according to config specified labels.
Any row containing a specified label is filtered from the data.
Args:
df: Pandas dataframe, must contain `_filter` column with string type.
model_config: Loaded model experiment config, specifically for
list of filter labels.
Returns:
Pandas dataframe with optionally assigned index
Raises:
ValueError: Expects '_filter' column in processed data.
"""
if "_filter" not in df.columns:
raise ValueError("Expects `_filter` column in processed data.")
if not model_config.get("filters", []):
logging.debug("No filter conditions from config, passing")
return df
master_filter = pd.Series(0, index=df.index)
for _filter in model_config.get("filters", []):
master_filter = master_filter | df["_filter"].str.contains(_filter)
master_filter = ~master_filter
logging.info(f"Applying filters {model_config.get('filters', [])} to dataset, pre shape {df.shape}")
df = df.loc[master_filter]
logging.info(f"Post filter shape {df.shape}")
return df
def create_compressed_dummies(df: pd.DataFrame, dummy: str, min_dummy: float) -> Tuple[pd.DataFrame, List[str]]:
"""Creates enhanced feature dummies for a single dataframe column.
Improves on standard pandas.get_dummies by combining low-incidence dummy columns
into a single `_other_combined` column. Dummy columns are named according to
`{col_name}_##_{value}`
Args:
df: Pandas dataframe, must contain specified dummy column.
dummy: string label of DataFrame to create dummy features
min_dummy: minimum percentage incidence to create standalone
dummy feature, otherwise group into `_other_combined`.
Returns:
Dummified Pandas DataFrame for a single feature.
Also returns dummy column names as a list of strings.
"""
# Cleaning steps to avoid weird characters in string
df[dummy] = df[dummy].astype(str)
values = list(df[dummy].unique())
df[dummy] = df[dummy].replace(utils.clean_column_names(values))
raw_dummies = pd.get_dummies(df[dummy], prefix=f"{dummy}_##")
dummies_min_test = raw_dummies.mean() < min_dummy
insufficient = dummies_min_test[dummies_min_test].index
sufficient = dummies_min_test[~dummies_min_test].index
selected_dummies = raw_dummies[sufficient]
selected_dummies[f"{dummy}_##_other_combined"] = (raw_dummies[insufficient].sum(axis=1) > 0).astype(int)
return selected_dummies, selected_dummies.columns.tolist()
def create_dummy_features(df: pd.DataFrame, model_config: Dict[str, Any]) -> Tuple[pd.DataFrame, List[str]]:
"""Create dummy features for each config specified dummy_variable.
Iterates through all specified dummy features and adds to DataFrame.
Args:
df: Pandas dataframe, must contain specified dummy columns.
model_config: Loaded model experiment config, specifically for
list of dummy features.
Returns:
Pandas DataFrame with original data plus all new dummy fields.
Also returns full list of created dummy column names
"""
logging.info("Creating dummy features")
dummy_features = []
min_dummy = model_config.get("min_dummy_percent", 0.001)
for col in model_config.get("dummy_features", []):
logging.debug(f"Creating dummy features for {col}")
_features, _cols = create_compressed_dummies(df, col, min_dummy)
df = df.join(_features)
dummy_features += _cols
return df, dummy_features
def filter_target(df: pd.DataFrame, model_config: Dict[str, Any]) -> pd.DataFrame:
"""Filters Dataframe to ensure no missing data in target variable.
Args:
df: Pandas dataframe, must contain config specified target colunn.
model_config: Loaded model experiment config, specifically for
target column name.
Returns:
Filtered Pandas DataFrame.
"""
logging.info(f"Original data size {df.shape}")
df = df.dropna(subset=[model_config["target"]])
logging.info(f"Dropped target size {df.shape}")
return df
def split(df: pd.DataFrame, model_config: Dict[str, Any]) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Create train test split using model config.
Config may specify a pre-calculated column present in the DataFrame,
or use sklearn style split params. No config results in no split,
with the creation of an empty test dataframe.
Args:
df: Pandas dataframe, must contain a pre-calculated split column
if this is specified in the `model_config`.
model_config: Loaded model experiment config, specifically for
split approach, either a `field` or sklearn style params.
Returns:
Two Pandas DataFrames intended for training, test sets.
"""
split_params = model_config.get("split", {})
split_field = split_params.get("field", None)
if split_field:
logging.info(f"Splitting sample at using existing {split_field} column")
train = df.loc[df[split_field] == 1]
test = df.loc[df[split_field] == 0]
elif split_params:
logging.info("Splitting sample at random")
train, test = tts(df, **split_params)
else:
logging.warning("No test set specified")
train = df
test = pd.DataFrame(columns=df.columns)
logging.info(f"Training size: {train.shape}, Test size: {test.shape}")
return train, test
def get_simple_feature_aggregates(df: pd.DataFrame, model_config: Dict[str, Any]) -> pd.Series:
"""Generates config specified feature aggregates.
These are used to inform missing data replacement strategy. Ideally this is
run on training data, and used to replace train and test data.
Performs validations and raises errors as part of process.
Args:
df: Pandas dataframe. Must include columns specified in the `simple_features`
section of config, and these must be numeric type columns with no infinite values.
model_config: Loaded model experiment config, specifically for
`simple_features` dictionary of column names and aggregation strategy.
Returns:
Pandas Series with specified feature columns: value of aggregation.
Raises:
ValueError: If any features contain infinate values that need fixing.
"""
simple_features_agg = model_config.get("simple_features", {})
# Validate to ensure no features contain infinity
problems = []
for feature in simple_features_agg:
logging.debug(f"{feature} has {str(df[feature].dtype)}")
try:
isinf = df[feature].dropna().apply(np.isinf)
except TypeError:
problems.append(feature)
if isinf.any():
problems.append(feature)
if problems:
raise ValueError(f"One or more features contains -inf/inf, fix these; {', '.join(problems)}")
agg = df.agg(simple_features_agg)
try:
agg = agg.loc[0] # type: ignore
except KeyError:
logging.debug("No 'mode' values detected in aggregations")
aggregates = | pd.Series(agg, name="aggregates") | pandas.Series |
import numpy as np
from scipy.stats import ranksums
import pandas as pd
import csv
file = pd.read_csv('merged-file.txt', header=None, skiprows=0, delim_whitespace=True)
file.columns = ['Freq_allel','dpsnp','sift','polyphen','mutas','muaccessor','fathmm','vest3','CADD','geneName']
df = file.drop_duplicates(keep=False)
################## START ###################
# calculate ranksums for SIFT
sift_df = df[['geneName','sift']]
# extract all non-driver genes | sift_score
genelist = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t')
genelist.columns = ['geneName']
#
merged_df = pd.merge(
sift_df, genelist,
how='outer', on=['geneName'], indicator=True, suffixes=('_foo','')).query(
'_merge == "left_only"')
merged_df.drop(['geneName','_merge'], axis=1, inplace=True)
# extract all predicted driver genes | sift_score
genelist1 = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t')
genelist1.columns = ['geneName']
merged_df1 = sift_df.merge(genelist1, how = 'inner', on = ['geneName'])
merged_df1.drop(['geneName'], axis=1, inplace=True)
# calculate p-value for ranksums with SIFT
stat, pvalue = ranksums(merged_df, merged_df1)
print(pvalue)
#################### POLYPHEN ###################
# calculate ranksums for POLYPHEN
polyphen_df = df[['geneName','polyphen']]
# extract all non-driver genes | sift_score
genelist = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t')
genelist.columns = ['geneName']
#
merged_df = pd.merge(
polyphen_df, genelist,
how='outer', on=['geneName'], indicator=True, suffixes=('_foo','')).query(
'_merge == "left_only"')
merged_df.drop(['geneName','_merge'], axis=1, inplace=True)
# extract all predicted driver genes | polyphen_score
genelist1 = | pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t') | pandas.read_csv |
import datetime
import time
import pandas as pd
import numpy as np
import tensorflow as tf
import random as rn
import os
import keras
from keras import Input
from keras.models import Sequential, Model
from keras.layers import concatenate
from keras.layers import Dense
from keras.layers import LSTM, Dropout
from keras.callbacks import EarlyStopping
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras import regularizers
import keras as k
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
# Restricting operation to 1 thread for reproducible results.
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# Setting the graph-level random seed.
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def read_data(filename):
df = pd.read_csv(filename, header=None)
df.columns = ['Time', 'PV']
df['Time'] = pd.to_datetime(df["Time"], errors='coerce')
df.index = df["Time"]
df = df.drop(columns=['Time'])
print(df.head())
return df
def pre_processing_data(real_file, hist_file):
df = pd.read_csv(real_file, header=None)
df.columns = ['Time', 'Values']
df['Time'] = pd.to_datetime(df["Time"], errors='coerce')
df.index = df["Time"]
df = df.drop(columns=['Time'])
print("read csv")
print(df.head())
#Changing Frequency of Data to Minutes
df = df.resample('T').mean()
#checking for null values and if any, replacing them with last valid observation
df.isnull().sum()
df.Values.fillna(method='pad', inplace=True)
data = df.values.reshape(-1, 1)
flat_list = [item for sublist in data for item in sublist]
#Quantile Normalization
s = pd.Series(flat_list)
quant = s.quantile(0.75)
Xmin = np.amin(data)
Xmax = quant
X_std = (data - Xmin) / (Xmax - Xmin)
max = 1
min = 0
X_scaled = X_std * (max - min) + min
hist_data = []
start_date_hist = datetime.datetime.strptime("2016-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")
with open(hist_file, "r") as f:
data = f.readlines()
data.insert(0, data[-1])
for v in data:
hist_data.append([start_date_hist.strftime("%Y-%m-%d %H:%M:%S"), float(v)])
start_date_hist += datetime.timedelta(hours=1)
hd = pd.DataFrame(hist_data, columns=['Time', 'Values'])
hd['Time'] = pd.to_datetime(hd["Time"], errors='coerce')
hd.index = hd["Time"]
hd = hd.drop(columns=['Time'])
print(hd.head(20))
data = hd.values.reshape(-1, 1)
Xmin = np.amin(data)
Xmax = np.amax(data)
X_std = (data - Xmin) / (Xmax - Xmin)
max = 1
min = 0
X_scaled_hist = X_std * (max - min) + min
return X_scaled, df, X_scaled_hist, hd
def train_model(realXtrain, histXtrain, Ytrain, model, input_size_real, input_size_hist, hidden_size, batch_size,
output_size, Num_Epochs):
#Creating LSTM's structure
if model is None:
print("Training the model..........")
real_input = Input(batch_shape=(batch_size, input_size_real, 1), name="real")
real_features = LSTM(hidden_size, stateful=True, return_sequences=True)(real_input)
hist_input = Input(batch_shape=(batch_size, input_size_hist, 1), name="hist")
hist_features = LSTM(hidden_size, stateful=True, return_sequences=True)(hist_input)
x = concatenate([real_features, hist_features], axis=1)
x = Dropout(0.3)(x)
x = LSTM(hidden_size, stateful=True)(x)
output_layer = Dense(output_size)(x)
model = Model(inputs=[real_input, hist_input], outputs=output_layer)
model.summary()
adam = k.optimizers.Adam(lr=0.01)
model.compile(loss="mean_squared_error", optimizer=adam,
metrics=["mean_squared_error"])
model.compile(loss="mean_squared_error", optimizer=adam,
metrics=["mean_squared_error"])
# define reduceLROnPlateau and early stopping callback
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2,
patience=3, min_lr=0.001)
earlystop = EarlyStopping(monitor='loss', min_delta=0.0001, patience=3, verbose=1, mode='auto')
# define the checkpoint
filepath = "model.h5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=0, save_best_only=True, mode='min')
callbacks_list = [reduce_lr,earlystop,checkpoint]
#Training a stateful LSTM
for i in range(Num_Epochs):
print("Epoch {:d}/{:d}".format(i+1, Num_Epochs))
model.fit({"real": realXtrain, "hist": histXtrain}, Ytrain, batch_size=Batch_Size, epochs=1, verbose=2, callbacks=callbacks_list, shuffle=False)
model.reset_states()
return model
def predict_model(model, realXtest, histXtest, Batch_Size):
#Predicting for the test data
start_time = time.clock()
pred = model.predict({"real": realXtest, "hist": histXtest},batch_size=Batch_Size)
end_time = time.clock()
time_taken = end_time - start_time
return pred[0], time_taken
def find_nearest_hour_index(t):
start_date_hist = datetime.datetime.strptime("2016-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")
if t.minute > 30:
t = t.replace(year=2016, minute=0, second=0, microsecond=0) + datetime.timedelta(hours=1)
else:
t = t.replace(year=2016, minute=0, second=0, microsecond=0)
index = int((t - start_date_hist).total_seconds()/3600)
return index
def incremental_algorithm(X_scaled, df, X_scaled_hist, Hist_input_size, look_back, Hidden_Size, Batch_Size, Num_Epochs):
num_features = 1
prediction_horizon = 1440
nb_samples = X_scaled.shape[0] - look_back - prediction_horizon
x_train_reshaped = np.zeros((nb_samples, look_back, num_features))
y_train_reshaped = np.zeros((nb_samples, prediction_horizon))
print("----", X_scaled.shape[0])
print("initial X",x_train_reshaped.shape)
print("initial Y",y_train_reshaped.shape)
train_time = []
prediction_time = []
prediction_error = []
prediction_median = []
prediction_std = []
for i in range(nb_samples):
start_date_index = find_nearest_hour_index(datetime.datetime.strptime(str(df.index[i]), "%Y-%m-%d %H:%M:%S"))
end_date_index = start_date_index + Hist_input_size
histXtrain = X_scaled_hist[start_date_index:end_date_index]
if end_date_index >= len(X_scaled_hist):
histXtrain = histXtrain + X_scaled_hist[0:len(X_scaled_hist)-end_date_index]
histXtrain = np.reshape(histXtrain, (1,) + histXtrain.shape)
print("hist shape "+str(histXtrain.shape))
y_position = i + look_back
y_position_end = y_position + prediction_horizon
x_train_reshaped[i] = X_scaled[i:y_position]
y__re = X_scaled[y_position:y_position_end]
y_train_reshaped[i] = [item for sublist in y__re for item in sublist]
realXtrain = np.reshape(x_train_reshaped[i], (1,) + x_train_reshaped[i].shape)
ytrain = np.reshape(y_train_reshaped[i], (1,) + y_train_reshaped[i].shape)
print("realX train shape : "+str(realXtrain.shape))
start_time = time.clock()
if i == 0:
trained_model = train_model(realXtrain, histXtrain, ytrain, None, look_back, Hist_input_size, Hidden_Size, Batch_Size,
prediction_horizon, Num_Epochs)
else:
trained_model = train_model(realXtrain, histXtrain, ytrain, trained_model, look_back, Hist_input_size, Hidden_Size, Batch_Size,
prediction_horizon, Num_Epochs)
end_time = time.clock()
time_taken = end_time - start_time
predicted_value, predTime = predict_model(trained_model, realXtrain, histXtrain, Batch_Size)
error = abs(ytrain[0] - predicted_value)
error_median = np.median(error)
error_std = np.std(error)
error_mean = np.mean(error)
prediction_median.append(error_median)
prediction_std.append(error_std)
prediction_error.append(error_mean)
train_time.append(time_taken)
prediction_time.append(predTime)
print("The iteration is **** ", i)
return prediction_error, prediction_median, train_time, prediction_time
def post_processing_data(df, prediction_error, prediction_median, train_time, prediction_time):
pred_new_df = df[1440:] # instead of 24 now 1440
new_df_date = pred_new_df[-len(pred_new_df):]
test_act = new_df_date.reset_index()
test_act = test_act.drop('Values', axis =1)
#Adding datetime to prediction error and changing to dataframe
test_predictions_date = pd.DataFrame(prediction_error)
test_predictions_date.columns = ['Values']
test_predictions_date['Time'] = test_act['Time']
#Adding datetime to prediction error median and changing to dataframe
test_predictions_medianError = pd.DataFrame(prediction_median)
test_predictions_medianError.columns = ['Values']
test_predictions_medianError['Time'] = test_act['Time']
print("Average Error is", test_predictions_date['Values'].mean())
#Writing predicitons to a csv file
test_predictions_date.to_csv('MAE_House20.csv')
test_predictions_medianError.to_csv('MedianError_House20.csv')
train_time_date = | pd.DataFrame(train_time) | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.