prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
'''
02-Dec-2020
Author: <NAME>
Script to scrape football match result and odds data
Maintains register for daily efficient scraping of data
Source Website: https://www.football-data.co.uk/
'''
from bs4 import BeautifulSoup
import datetime as dt
import io
import json
import numpy as np
import os
import pandas as pd
import requests
import sqlite3
from urllib.request import Request, urlopen
from epl.query import create_conn, query_creator, query_db, table_exists
# define the site root
SITE_ROOT = 'https://www.football-data.co.uk/'
DATA_ROOT = 'https://www.football-data.co.uk/data.php'
FIXTURES_ROOT = 'https://www.football-data.co.uk/matches.php'
DB_NAME = 'footie.sqlite'
DB_NAME_UAT = 'footie_uat.sqlite'
def get_root_dir():
script_location = os.path.realpath(__file__)
root_dir = script_location.split('/')[:-2]
return '/'.join(root_dir)
def get_reg_filename(reg_name):
# get reg location
reg_dir = '/'.join([get_root_dir(), 'data'])
reg_file = '/'.join([reg_dir, reg_name])+'.csv'
return reg_file
def standardise_dates(d):
if len(d) == len('01/02/2000'):
return pd.to_datetime(d, format='%d/%m/%Y')
elif len(d) == len('01/02/20'):
return pd.to_datetime(d, format='%d/%m/%y')
else:
return pd.to_datetime(d)
def get_country_urls():
'''
Returns dict of {country: data_url}
'''
# get the page and parse into soup object
req = Request(DATA_ROOT)
html_page = urlopen(req)
soup = BeautifulSoup(html_page, "lxml")
# get all the links on the data page
links = []
for link in soup.findAll('a'):
l = link.get('href')
if l != None:
links.append(l)
# now we need to get the list of links that link to pages for data
# this involves parsing the page for the country name
# from inspection these pages end '*m.php'
countries = [x[:-5]
for x in links if x[-5:] == 'm.php' and 'https' not in x]
# form the data links and then zip into a dictionary
country_links = [SITE_ROOT+x+'m.php' for x in countries]
country_dict = dict(zip(countries, country_links))
return country_dict
def get_most_recents(country_dict):
'''
country_dict: dict of {country, data_url} to iterate through
Returns dataframe of Date / Country / MostRecentSeason
'''
# dict of country key to most recent season id value
most_recents = {}
# get todays date
d = dt.date.today()
for country, link in country_dict.items():
# get the page and parse into soup object
req = Request(link)
html_page = urlopen(req)
soup = BeautifulSoup(html_page, "lxml")
# given page is rev chronological i.e. most recent season first
# can grab first valid link and strip season id from that
for url_link in soup.findAll('a'):
# if we haven't already found the most recent season
if country not in most_recents.keys():
# get the link ref e.g. 'mmz4281/2021/E0.csv'
l = url_link.get('href')
# if link not null and is a csv then is valid data link
if l != None and '.csv' in l:
season_id = l.split('/')[1]
# create record for table creation
rec = {'Date': d, 'Country': country,
'MostRecentSeason': season_id}
most_recents[country] = rec
# convert this into a dataframe and return
df_recents = pd.DataFrame(most_recents.values())
return df_recents
def update_most_recents_register(uat=False):
'''
Fetches latest most recents, creates/appends to most_recents_register
'''
# used for updating reg if already exists (re-running on same day)
d = pd.to_datetime(dt.date.today())
# get country urls
country_dict = get_country_urls()
# iterate through to get mostrecent season id per country
df_recents = get_most_recents(country_dict)
# dir structure:
# /scripts
# /this_script.py
# /data
# /most_recents_register.csv
dir_name = 'data'
# get the location of this script and strip the filename and dir
root_dir = get_root_dir()
reg_dir = "/".join([root_dir, dir_name])
# first check if dir 'data' exists - if not create it
if not os.path.exists(reg_dir):
print("database directory does not exist, creating now at: {}".format(reg_dir))
os.makedirs(reg_dir)
# now check if register file exists
if uat:
reg_file = "/".join([reg_dir, 'register_most_recents_uat.csv'])
else:
reg_file = "/".join([reg_dir, 'register_most_recents.csv'])
if not os.path.exists(reg_file):
# doesn't exist - just set down what we have without index
try:
print("register_most_recents.csv doesn't exist yet - writing it")
df_recents.to_csv(reg_file, index=False)
except:
print("Unable to write register_most_recents.csv for first time")
else:
try:
# register exists so we want to load it, append and set
df_curr_recents = pd.read_csv(reg_file, parse_dates=['Date'])
# remove today's date if there - must be re-running and want to have unique per date
df_curr_recents = df_curr_recents[df_curr_recents.Date != d]
# concat and reindex
df_reg = pd.concat([df_curr_recents, df_recents])
df_reg = df_reg.reset_index(drop=True)
print("Writing down regsiter_most_recents{}.csv post append".format(
('_uat' if uat else '')))
df_reg.to_csv(reg_file, index=False)
except:
print("Unable to append to register_most_recents{}.csv".format(
('_uat' if uat else '')))
return reg_file
def get_register(reg_name):
'''
Returns pd.DataFrame of the register requested
'''
# get reg location
reg_file = get_reg_filename(reg_name)
# if doesn't exist then report
if not os.path.exists(reg_file):
print('Register: {} does not exist yet at loc: {}'.format(reg_name, reg_file))
# return None so can use output to fire if statements
return None
df_reg = pd.read_csv(reg_file, parse_dates=True)
for c in ['Date']:
if c in df_reg.columns:
df_reg[c] = pd.to_datetime(df_reg[c])
# convert season col to string as pd auto imports as int
for c in ['MostRecentSeason', 'Season', 'ParseMessage']:
if c in df_reg.columns:
df_reg[c] = df_reg[c].apply(str)
if 'ParseMessage' in df_reg.columns:
df_reg['ParseMessage'] = np.where(
df_reg['ParseMessage'] == 'nan', '', df_reg['ParseMessage'])
return df_reg
def get_all_curr_urls(country_dict):
'''
Returns pd.DataFrame of all the current data links for country_dict fed
'''
# get today date
d = pd.to_datetime(dt.date.today())
# define array to append link dicts to
csv_links = []
# iterate through countries
for country, link in country_dict.items():
# handle html data into python data structure
req = Request(link)
html_page = urlopen(req)
soup = BeautifulSoup(html_page, "lxml")
# get all the links on the data page
for url_link in soup.findAll('a'):
# get the label e.g. 'Premier League'
label = url_link.contents[0]
# get the link ref e.g. 'mmz4281/2021/E0.csv'
l = url_link.get('href')
# if link not null and is a csv then add it
if l != None and '.csv' in l:
# then construct the rec and append
rec = {'Date': d,
'Country': country,
'DivName': label,
'Div': l.split('/')[-1][:-4],
'Season': l.split('/')[-2],
'url': SITE_ROOT+l}
csv_links.append(rec)
df_links = pd.DataFrame(csv_links)
return df_links
def get_fixtures_link():
# get today date
d = pd.to_datetime(dt.date.today())
req = Request(FIXTURES_ROOT)
html_page = urlopen(req)
soup = BeautifulSoup(html_page, "lxml")
# find the fixtures csv link
for url_link in soup.findAll('a'):
# get the link ref e.g. 'mmz4281/2021/E0.csv'
l = url_link.get('href')
# if link not null and is a csv then add it
if l == 'fixtures.csv':
rec = {'Date': d,
'url': SITE_ROOT+l}
return pd.DataFrame([rec])
def create_match_register(uat=False):
'''
Returns pd.DataFrame for latest register for matches from football-data.co.uk
'''
# get country dicts and recent urls
country_dict = get_country_urls()
df_links = get_all_curr_urls(country_dict)
# get recents register
if uat:
df_recents = get_register('register_most_recents_uat')
else:
df_recents = get_register('register_most_recents')
# create col to denote most recent season
df_links = pd.merge(left=df_links, right=df_recents,
how='left', on=['Date', 'Country'])
df_links['IsMostRecent'] = (
df_links['Season'] == df_links['MostRecentSeason'])
df_links = df_links.drop(columns=['MostRecentSeason'])
# update status as 'New' and empty parse message (for now)
df_links['Status'] = 'New'
df_links['ParseMessage'] = ''
# now grab the existing register if it exists
if uat:
df_reg = get_register('register_matches_uat')
reg_file = get_reg_filename('register_matches_uat')
else:
df_reg = get_register('register_matches')
reg_file = get_reg_filename('register_matches')
print(reg_file)
if df_reg is None:
# if none then does not exist - so we create and set what we have
print('No reg detected - creating new reg in: {}'.format(reg_file))
df_links.to_csv(reg_file, index=False)
df_new_reg = df_links.copy()
else:
# then reg exists and we need to update it
# keep links if they are most recent on this date only
d = df_links.Date.max()
df_old_recs = df_reg[(df_reg.IsMostRecent) & (
df_reg.Date == d) & (df_reg.Status == 'Processed')]
# now we only keep most recents if they aren't in df_old_recs
df_recs = df_links[df_links['IsMostRecent']]
df_recs = df_recs[~df_recs.url.isin(df_old_recs.url)]
# we also keep those which have never appeared before based on url
df_new = df_links[~df_links.url.isin(df_reg.url)]
# now we concat them and remove any potential dupes
df_new_reg = pd.concat([df_recs, df_new]).drop_duplicates()
df_new_reg = pd.concat([df_reg, df_new_reg])
print('Setting new reg down into: {}'.format(reg_file))
df_new_reg.to_csv(reg_file, index=False)
return df_new_reg
def update_fixture_register(uat=False):
# get fixture csv link in df format
df_links = get_fixtures_link()
# add parse status
df_links['Status'] = 'New'
df_links['ParseMessage'] = ''
# get current reg (if it exists)
if uat:
df_reg = get_register('register_fixtures_uat')
reg_file = get_reg_filename('register_fixtures_uat')
else:
df_reg = get_register('register_fixtures')
reg_file = get_reg_filename('register_fixtures')
print(reg_file)
if df_reg is None:
# if none then does not exist - so we create and set what we have
print('No reg detected - creating new reg in: {}'.format(reg_file))
df_links.to_csv(reg_file, index=False)
df_new_reg = df_links.copy()
else:
# then reg exists and we need to update it
# simple here - just concat on the end
d = pd.to_datetime(dt.date.today())
if len(df_reg[(df_reg.Date == d) & (df_reg.Status == 'Processed')]) > 0:
# then we have already processed today, do nothing
df_new_reg = df_reg.copy()
else:
df_new_reg = pd.concat([df_reg, df_links])
df_new_reg = df_new_reg.drop_duplicates()
print('Setting new reg down into: {}'.format(reg_file))
df_new_reg.to_csv(reg_file, index=False)
return df_new_reg
def get_new_files(table_name, uat=False):
'''
Returns pd.DataFrame of files to be parsed for a given register
'''
reg_name = 'register_' + table_name + ('_uat' if uat else '')
df_reg = get_register(reg_name)
if df_reg is None:
# then no files so end here
print('No register exists yet for table: {}'.format(table_name))
return None
else:
# register exists - get non-processed files - both 'New' and 'Error'
df_new = df_reg[df_reg.Status != 'Processed']
return df_new
def fetch_file(url):
# query it
print('Fetching {}'.format(url))
res = requests.get(url)
# if good response, extract
if res.status_code == 200:
output = res.content
df = pd.read_csv(io.StringIO(output.decode('utf-8', errors='ignore')),
parse_dates=True, error_bad_lines=False, warn_bad_lines=False)
return df
else:
print('Bad response code from {} for {}'.format(SITE_ROOT, url))
return None
def fetch_new_files(df_new):
'''
Accept pd.DataFrame of new files, fetches them and cleans them
'''
# add a new col which will be the resulting dataframe
df_res = df_new.copy()
df_res['Results'] = None
for index, row in df_new.iterrows():
try:
df = fetch_file(row.url)
# add new cols and keep col order if they are in reg (not in fixture reg)
if any([x in row.index for x in ['Country', 'DivName', 'Season']]):
cols = list(df.columns)
df['Country'] = row.Country
df['League'] = row.DivName
df['Season'] = row.Season
df = df[['Country', 'League', 'Season'] + cols]
# remove cols with more than 99% nulls
df = df[df.columns[df.isnull().mean() < 0.99]]
# only include if all teams are not null
df = df[(~df.HomeTeam.isna()) & (~df.AwayTeam.isna())]
df_res.at[index, 'Results'] = df
df_res.at[index, 'Status'] = 'Processed'
except:
# if can't then log and update the reg
print('Unable to fetch file: {}'.format(row.url))
df_res.at[index, 'Results'] = None
df_res.at[index, 'Status'] = 'Error'
df_res.at[index, 'ParseMessage'] = 'ErrorOnRequest'
return df_res
def update_register(reg_name, new_reg):
'''
Queries current register and updates with new data
'''
# get current reg
reg_file = get_reg_filename(reg_name)
reg = get_register(reg_name)
reg.update(new_reg)
reg.to_csv(reg_file, index=False)
return reg
def handle_initial_match_db(df_new, uat=False):
'''
Function to handle initial creation of matches table in sqlite
Handled separately in order to determine which columns to keep
All updates then will be joins where columns must be strict subset of existing db
i.e. can't have new columns created - seems okay restriction as data feed relatively mature
'''
# concat all the res dfs together
dfs = df_new[df_new.Status == 'Processed']
df = pd.concat(list(dfs['Results']))
# remove col if >99% of col are NaNs in combined df
df = df[df.columns[df.isnull().mean() < 0.99]]
df = clean_data(df)
# set down data into db
try:
conn = create_conn(uat=uat)
print('Connection established - setting down intial db')
df.to_sql('matches', conn, index=False)
conn.close()
df_new['Status'] = 'Processed'
df_new['ParseMessage'] = ''
except:
print('Unable to set down intial db')
df_new['Status'] = 'Error'
df_new['ParseMessage'] = 'Failed at initial setdown into sqlite'
# update the register
if uat:
reg_name = 'register_matches_uat'
else:
reg_name = 'register_matches'
new_reg = update_register(reg_name, df_new.drop(columns=['Results']))
return df_new
def delete_table_rows(table_name, wc=None, uat=False):
# create delete query
sel_query = query_creator(table_name, wc=wc)
del_query = sel_query.replace('SELECT *', 'DELETE')
# establish conn and execute query
try:
conn = create_conn(uat=uat)
cur = conn.cursor()
cur.execute(del_query)
conn.commit()
conn.close()
return True
except:
print('Failed to execute delete query: {}'.format(del_query))
return False
def clean_data(df):
# remove any cols with 'Unnamed' in them
df = df[[x for x in df.columns if 'Unnamed' not in x]]
# remove any rows if Div is na as garbage (as will all other cols)
df = df[~df.Div.isna()]
# standardise dates
df['Date'] = df.Date.apply(lambda x: standardise_dates(x))
return df
def handle_update_match_db(df_new, uat=False):
# for each new entry
for index, row in df_new.iterrows():
# create new data
new_data = row['Results']
# create where clause that uniquely defines a data file
wc = {'Country': ['=', row['Country']],
'Div': ['=', row['Div']],
'Season': ['=', row['Season']]}
sel_query = query_creator('matches', wc=wc)
# query existing data - to get cols
old_data = query_db(sel_query, uat=uat)
# only keep existing cols
new_data = new_data[[
x for x in new_data.columns if x in old_data.columns]]
new_data = clean_data(new_data)
# now we delete the old data and insert the new data
old_del = delete_table_rows('matches', wc=wc, uat=uat)
if not old_del:
df_new.at[index, 'Status'] = 'Error'
df_new.at[index, 'ParseMessage'] = 'Failed to delete old data'
else:
try:
conn = create_conn(uat=uat)
new_data.to_sql('matches', conn,
if_exists='append', index=False)
conn.close()
df_new.at[index, 'Status'] = 'Processed'
df_new.at[index, 'ParseMessage'] = ''
except:
df_new.at[index, 'Status'] = 'Error'
df_new.at[index, 'ParseMessage'] = 'Deleted old data, failed to insert new data'
# update the register
if uat:
reg_name = 'register_matches_uat'
else:
reg_name = 'register_matches'
new_reg = update_register(reg_name, df_new.drop(columns=['Results']))
return df_new
def process_match_data(uat=False, div_list=None, season_list=None):
'''
Function that:
- Gets most recent season and creates register
- Gets urls and creates/updates file reg
- Fetches new files and processes
- If no db currently exists then joins all together and sets
- If db exists then incrementally updates and removes new cols
- Once done then update register to be processed
'''
# update most recents register
update_most_recents_register(uat=uat)
# update matches register and return it
create_match_register(uat=uat)
# restriction possibility for testing
if (div_list is not None) or (season_list is not None):
reg_name = 'register_matches'+('_uat' if uat else '')
reg_file = get_reg_filename(reg_name)
reg = get_register(reg_name)
if (div_list is not None) & (season_list is not None):
reg = reg[(reg.Div.isin(div_list)) & (
reg.Season.isin(season_list))]
elif div_list is not None:
reg = reg[reg.Div.isin(div_list)]
elif season_list is not None:
reg = reg[reg.Season.isin(season_list)]
reg.to_csv(reg_file, index=False)
# get new files from reg
df_new = get_new_files('matches', uat=uat)
# fetch new files if poss
df_new = fetch_new_files(df_new)
if len(df_new) == 0:
# no files to process
print('No new files to process for matches table')
return None
# handle new files depending on whether or not db exists
if uat:
db_file = '/'.join([get_root_dir(), 'data', DB_NAME_UAT])
else:
db_file = '/'.join([get_root_dir(), 'data', DB_NAME])
if (not table_exists('matches')) or (not os.path.exists(db_file)):
# then db and table does not exist and this is inital set
print('Database doesnt exist - going to create it now')
res = handle_initial_match_db(df_new, uat=uat)
else:
# db exists and we need to update the data in the table
res = handle_update_match_db(df_new, uat=uat)
return res
def clean_and_join_fixture_data(df_new, uat=False):
# add country, league and season data
divs = query_db('SELECT Div, Country, League from matches GROUP BY Div',
uat=uat).sort_values('Country')
# assume fixtures are most recent season
seasons = get_register('register_most_recents' + ('_uat' if uat else ''))
seasons = seasons[seasons.Date == seasons.Date.max()].drop(columns='Date')
seasons = seasons.rename(columns={'MostRecentSeason': 'Season'})
# clean the data
res = df_new['Results'].values[0]
res = clean_data(res)
cols = list(res.columns)
res =
|
pd.merge(left=res, right=divs, how='left', on=['Div'])
|
pandas.merge
|
import unittest
from pydre import project
from pydre import core
from pydre import filters
from pydre import metrics
import os
import glob
import contextlib
import io
from tests.sample_pydre import project as samplePD
from tests.sample_pydre import core as c
import pandas
import numpy as np
from datetime import timedelta
import logging
import sys
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
# Test cases of following functions are not included:
# Reason: unmaintained
# in common.py:
# tbiReaction()
# tailgatingTime() & tailgatingPercentage()
# ecoCar()
# gazeNHTSA()
#
# Reason: incomplete
# in common.py:
# findFirstTimeOutside()
# brakeJerk()
class TestPydre(unittest.TestCase):
ac_diff = 0.000001
# the acceptable difference between expected & actual results when testing scipy functions
def setUp(self):
# self.whatever to access them in the rest of the script, runs before other scripts
self.projectlist = ["honda.json"]
self.datalist = ["Speedbump_Sub_8_Drive_1.dat", "ColTest_Sub_10_Drive_1.dat"]
self.zero = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
funcName = ' [ ' + self._testMethodName + ' ] ' # the name of test function that will be executed right after this setUp()
print(' ')
print (funcName.center(80,'#'))
print(' ')
def tearDown(self):
print(' ')
print('[ END ]'.center(80, '#'))
print(' ')
# ----- Helper Methods -----
def projectfileselect(self, index: int):
projectfile = self.projectlist[index]
fullpath = os.path.join("tests/test_projectfiles/", projectfile)
return fullpath
def datafileselect(self, index: int):
datafile = self.datalist[index]
fullpath = glob.glob(os.path.join(os.getcwd(), "tests/test_datfiles/", datafile))
return fullpath
def secs_to_timedelta(self, secs):
return timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=secs)
def compare_cols(self, result_df, expected_df, cols):
result = True
for names in cols:
result = result and result_df[names].equals(expected_df[names])
if not result:
print(names)
print(result_df[names])
print("===")
print(expected_df[names])
return False
return result
# convert a drivedata object to a str
def dd_to_str(self, drivedata: core.DriveData):
output = ""
output += str(drivedata.PartID)
output += str(drivedata.DriveID)
output += str(drivedata.roi)
output += str(drivedata.data)
output += str(drivedata.sourcefilename)
return output
# ----- Test Cases -----
def test_datafile_exist(self):
datafiles = self.datafileselect(0)
self.assertFalse(0 == len(datafiles))
for f in datafiles:
self.assertTrue(os.path.isfile(f))
def test_reftest(self):
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
results = p.run(self.datafileselect(0))
results.Subject.astype('int64')
sample_p = samplePD.Project(desiredproj)
expected_results = (sample_p.run(self.datafileselect(0)))
self.assertTrue(self.compare_cols(results, expected_results, ['ROI', 'getTaskNum']))
def test_columnMatchException_excode(self):
f = io.StringIO()
with self.assertRaises(SystemExit) as cm:
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
result = p.run(self.datafileselect(1))
self.assertEqual(cm.exception.code, 1)
def test_columnMatchException_massage(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184]}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
handler = logging.FileHandler(filename='tests\\temp.log')
filters.logger.addHandler(handler)
with self.assertRaises(core.ColumnsMatchError):
result = filters.smoothGazeData(data_object)
expected_console_output = "Can't find needed columns {'FILTERED_GAZE_OBJ_NAME'} in data file ['test_file3.csv'] | function: smoothGazeData"
temp_log = open('tests\\temp.log')
msg_list = temp_log.readlines()
msg = ' '.join(msg_list)
filters.logger.removeHandler(handler)
#self.assertIn(expected_console_output, msg)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_1(self):
d = {'col1': [1, 2, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 3, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1 7\n1 2 8\n2 3 9"
self.assertEqual(result, expected_result)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_2(self):
d = {'col1': [1, 1.1, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 2, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1.0 7\n1 1.1 8"
self.assertEqual(result, expected_result)
def test_core_mergeBySpace(self):
d1 = {'SimTime': [1, 2], 'XPos': [1, 3], 'YPos': [4, 3]}
df1 =
|
pandas.DataFrame(data=d1)
|
pandas.DataFrame
|
import datetime
import gzip
import shutil
import urllib.request
import xml.etree.ElementTree as et
from pathlib import Path
import warnings
import pandas as pd
import numpy as np
from .utils import log_message as print
from . import urlcleaning as uclean
from . import waybackinterface as wbi
from . import utils as utils
from .. import data_io
GFM_SITEMAP_URL = "https://www.gofundme.com/sitemap.xml"
SITEMAP_PARENTDIR = data_io.input_raw
class URLCollector(object):
def __init__(
self,
gfm_sitemap_url=GFM_SITEMAP_URL,
sitemap_parentdir=None,
verbose=True,
use_tqdm=True,
):
self.gfm_sitemap_url = gfm_sitemap_url
self.sitemap_store = sitemap_parentdir
self.verbose = verbose
self.use_tqdm = use_tqdm
@property
def use_tqdm(self):
return self._use_tqdm
@use_tqdm.setter
def use_tqdm(self, mode):
if mode:
try:
from tqdm import tqdm
self.tqdm = tqdm
self._use_tqdm = mode
except ModuleNotFoundError:
print("tqdm not found")
self.use_tqdm = False
except Exception as err:
print(err)
self.use_tqdm = False
else:
self._use_tqdm = False
@property
def sitemap_store(self):
return self._sitemap_store
@sitemap_store.setter
def sitemap_store(self, dir):
if dir is None:
dir = SITEMAP_PARENTDIR
dir = utils.verify_pathtype(dir)
if not dir.exists():
warnings.warn("{} not exists, will create directory".format(dir))
dir.mkdir()
self._sitemap_store = dir
def log(self, *args):
if self.verbose:
print(*args)
def _load_sitemap(self, sitemap_directory):
""" returns today's sitemap (downloads it if it doesn't exist) """
self.log("[URLCollector] loading site data from " + self.gfm_sitemap_url)
sitemap_path = sitemap_directory / "sitemap.xml"
if not sitemap_path.exists():
self.log(
"[URLCollector] time map is out of date;"
+ " retreiving today's data and saving to "
+ str(sitemap_directory)
)
urllib.request.urlretrieve(self.gfm_sitemap_url, str(sitemap_path))
else:
self.log("[URLCollector] site map is up-to-date; no need to download")
return sitemap_directory, sitemap_path
def _unpack_sitemap(self, sitemap_directory):
dpath, fpath = self._load_sitemap(sitemap_directory)
gzs = []
tree = et.parse(fpath)
root = tree.getroot()
savepath = dpath / "packets"
if not savepath.exists():
self.log("[URLCollector] loading packets...")
savepath.mkdir()
if self.use_tqdm:
root = self.tqdm(root, total=len(root))
for sitemap in root:
sitemap_iter = sitemap.findall(
"{http://www.sitemaps.org/schemas/sitemap/0.9}loc"
)
for gz in sitemap_iter:
filename = gz.text.split("/")[-1]
if filename == "sitemap_marketing.xml.gz":
continue
if self.use_tqdm:
root.set_description("retrieved " + filename)
else:
self.log("---retrieved " + filename)
filepath = savepath / filename
urllib.request.urlretrieve(gz.text, str(filepath))
gzs.append(filepath)
else:
self.log("[URLCollector] site map packets already exist; no need to unpack.")
gzs = [p for p in savepath.glob("*.gz") if p.is_file()]
return gzs
def _unzip_all_gzs(self, gzs):
unzipped = [(gz.parent / gz.stem) if gz.suffix == ".gz" else gz for gz in gzs]
iterr = zip(gzs, unzipped)
if self.use_tqdm:
iterr = self.tqdm(iterr, total=len(unzipped))
for gz, uz in iterr:
if self.use_tqdm:
iterr.set_description("[URLCollector] unzipping " + (gz.stem + gz.suffix))
if gz.suffix != ".gz":
continue
with gzip.open(str(gz), "r") as f_in, uz.open(mode="wb") as f_out:
shutil.copyfileobj(f_in, f_out)
# os.remove(gz)
return unzipped
def _read_urls_from_xml_file(self, filepath):
"""This parses the xml files to get the available urls.
Checks for the subset that have the work 'cancer' in their url.
This is not a great filter, but works for the time being. """
if not self.use_tqdm:
self.log("parsing " + str(filepath.stem))
root = et.parse(filepath).getroot()
urls = [
loc.text
for _url in root
for loc in _url.findall("{http://www.sitemaps.org/schemas/sitemap/0.9}loc")
]
df =
|
pd.Series(urls)
|
pandas.Series
|
#%%
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import numpy.random as random
import gzip
import csv
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
from tqdm import tqdm
from joblib import Parallel, delayed
import networkx as nx
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
# load pairs
pairs = pm.Promat.get_pairs()
ipsi_pair_ids = pm.Promat.load_pairs_from_annotation('mw ipsilateral axon', pairs, return_type='all_pair_ids')
bilateral_pair_ids = pm.Promat.load_pairs_from_annotation('mw bilateral axon', pairs, return_type='all_pair_ids')
contra_pair_ids = pm.Promat.load_pairs_from_annotation('mw contralateral axon', pairs, return_type='all_pair_ids')
dVNC_pair_ids = pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_ids')
dSEZ_pair_ids = pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_ids')
RGN_pair_ids = pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_ids')
sensories_pair_ids = [pm.Promat.load_pairs_from_annotation(x, pairs, return_type='all_pair_ids') for x in pymaid.get_annotated('mw brain inputs').name]
all_sensories = [x for sublist in sensories_pair_ids for x in sublist]
# %%
# EXPERIMENT 1: removing edges from contralateral and bilateral neurons -> effect on path length?
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 40
# excise edges and generate graphs
e_contra_contra, e_contra_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_contra, e_bi_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_bi_ipsi, e_bi_ipsi_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids, all_sensories), 'ipsilateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
e_all_contra, e_all_contra_control = pg.Prograph.excise_edge_experiment(all_edges_combined, np.setdiff1d(bilateral_pair_ids + contra_pair_ids, all_sensories), 'contralateral', n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# this chunk is incomplete
# write all graphs to graphml
# read all graph from graphml
graph = pg.Analyze_Nx_G(all_edges_combined, graph_type='directed')
shuffled_graphs = Parallel(n_jobs=-1)(delayed(nx.readwrite.graphml.read_graphml)(f'interhemisphere/csv/shuffled_graphs/iteration-{i}.graphml', node_type=int, edge_key_type=str) for i in tqdm(range(n_init)))
shuffled_graphs = [pg.Analyze_Nx_G(edges=x.edges, graph=x) for x in shuffled_graphs]
# %%
# generate and save paths
cutoff=5
# generate and save paths for experimental
save_path = [f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi',
f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra']
experimental = [e_contra_contra, e_bi_contra, e_bi_ipsi, e_all_contra]
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(experimental[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=save_path[i]) for i in tqdm((range(len(experimental)))))
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_contra_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_bi_ipsi_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(e_all_contra_control[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths(excise_paths, control_paths, edges_removed):
excise_count = len(excise_paths)
control_counts = [len(x) for x in control_paths]
path_counts_data = []
for row in zip(control_counts, [f'control-{edges_removed}']*len(control_counts)):
path_counts_data.append(row)
path_counts_data.append([excise_count, f'excised-{edges_removed}'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}.csv')
# count per # hops
excise_path_counts = [len(x) for x in excise_paths]
control_path_counts = [[len(x) for x in path] for path in control_paths]
path_counts_length_data = []
for i, path_length in enumerate(control_path_counts):
for row in zip(path_length, [f'control-{edges_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for row in zip(excise_path_counts, [f'excised-{edges_removed}']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/processed/excised_graph_{edges_removed}_path_lengths.csv')
cutoff=5
n_init = 40
excise_Cc_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra.csv.gz')
control_Cc_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Cc_paths, control_Cc_paths, edges_removed='Contra-contra')
excise_Bc_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra.csv.gz')
control_Bc_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Bc_paths, control_Bc_paths, edges_removed='Bilateral-contra')
excise_Bi_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi.csv.gz')
control_Bi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-bilateral-ipsi_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Bi_paths, control_Bi_paths, edges_removed='Bilateral-ipsi')
excise_Ac_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra.csv.gz')
control_Ac_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/excised-graph_all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-all-contra_CONTROL-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths(excise_Ac_paths, control_Ac_paths, edges_removed='All-contra')
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/all_paths_sens-to-dVNC_cutoff{cutoff}.csv.gz')
excise_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([excise_count, f'wildtype'])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/processed/wildtype.csv')
path_counts_length_data = []
excise_path_counts = [len(x) for x in graph_paths]
for row in zip(excise_path_counts, [f'wildtype']*len(excise_path_counts), [0]*len(excise_path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/processed/wildtype_path_lengths.csv')
# %%
##########
# EXPERIMENT 2: removing random number of ipsi vs contra edges, effect on paths
#
# load previously generated paths
all_edges_combined = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
# iterations for random edge removal as control
n_init = 8
# excise edges and generate graphs
random_ipsi500, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 500, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi1000, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 1000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi2000, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 2000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
random_ipsi4000, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined, 4000, n_init, seed=0, exclude_nodes=(all_sensories+dVNC_pair_ids))
# %%
# generate and save paths
cutoff=5
# generate and save paths for controls
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-500-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-500-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-1000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-1000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-2000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-2000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-4000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-4000-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories, dVNC_pair_ids, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_paths, contra_paths, count_removed):
ipsi_counts = [len(x) for x in ipsi_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_counts, [f'ipsi-{count_removed}']*len(ipsi_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra-{count_removed}']*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges/processed/excised_graph_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_path_counts = [[len(x) for x in path] for path in ipsi_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_path_counts):
for row in zip(path_length, [f'ipsi-{count_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra-{count_removed}']*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges/processed/excised_graph_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_paths, random_contra_paths, count_removed)
# %%
##########
# EXPERIMENT 3: removing random number of ipsi vs contra edges, effect on paths on just one side of brain
#
# load previously generated paths
all_edges_combined_split = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
left = pm.Promat.get_hemis('left')
right = pm.Promat.get_hemis('right')
# iterations for random edge removal as control
n_init = 8
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_left = list(np.intersect1d(dVNC, left))
dVNC_right = list(np.intersect1d(dVNC, right))
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
all_sensories_left = list(np.intersect1d(all_sensories, left))
all_sensories_right = list(np.intersect1d(all_sensories, right))
# generate wildtype graph
split_graph = pg.Analyze_Nx_G(all_edges_combined_split, graph_type='directed', split_pairs=True)
# excise edges and generate graphs
random_ipsi500_left, random_ipsi500_right, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 500, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi1000_left, random_ipsi1000_right, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 1000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi2000_left, random_ipsi2000_right, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 2000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi4000_left, random_ipsi4000_right, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 4000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi8000_left, random_ipsi8000_right, random_contra8000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
#random_ipsi8764_left, random_ipsi8764_right, random_contra8764 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8764, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
# %%
# generate and save paths
cutoff=5
# generate wildtype paths
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_wildtype'
pg.Prograph.generate_save_simple_paths(split_graph.G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=save_path)
# generate and save paths
count = 500
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 1000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 2000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 4000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 8000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
'''
count = 8764
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_left, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
'''
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_left_paths, ipsi_right_paths, contra_paths, count_removed):
ipsi_left_counts = [len(x) for x in ipsi_left_paths]
ipsi_right_counts = [len(x) for x in ipsi_right_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_left_counts, [f'ipsi-left']*len(ipsi_left_counts), [count_removed]*len(ipsi_left_counts)):
path_counts_data.append(row)
for row in zip(ipsi_right_counts, [f'ipsi-right']*len(ipsi_right_counts), [count_removed]*len(ipsi_right_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra']*len(contra_counts), [count_removed]*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_left_path_counts = [[len(x) for x in path] for path in ipsi_left_paths]
ipsi_right_path_counts = [[len(x) for x in path] for path in ipsi_right_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_left_path_counts):
for row in zip(path_length, [f'ipsi-left']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(ipsi_right_path_counts):
for row in zip(path_length, [f'ipsi-right']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'edges_removed', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 8000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC_cutoff{cutoff}_wildtype.csv.gz')
wt_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([wt_count, f'wildtype', 0])
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype.csv')
path_counts_length_data = []
path_counts = [len(x) for x in graph_paths]
for row in zip(path_counts, [f'wildtype']*len(path_counts), [0]*len(path_counts), [0]*len(path_counts)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_path_lengths.csv')
# %%
# plot total paths per condition from left -> left paths
total_paths = pd.concat([pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_500-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_1000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_2000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_4000-removed.csv', index_col=0),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_8000-removed.csv', index_col=0)], axis=0)
wildtype = pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype.csv', index_col=0)
total_paths = pd.concat([total_paths, pd.DataFrame([[wildtype['count'].values[0], 'contra', 0]], columns = total_paths.columns),
pd.DataFrame([[wildtype['count'].values[0], 'ipsi-left', 0]], columns = total_paths.columns),
pd.DataFrame([[wildtype['count'].values[0], 'ipsi-right', 0]], columns = total_paths.columns)], axis=0)
# plot raw number of paths (all lengths), after removing edges of different types
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data = total_paths, x='edges_removed', y='count', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1100000))
plt.savefig('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-counts_left-to-left_removing-edge-types.pdf', format='pdf', bbox_inches='tight')
# normalized plot of all paths (all lengths), after removing edges of different types
max_control_paths = total_paths[total_paths.edges_removed==0].iloc[0, 0]
total_paths.loc[:, 'count'] = total_paths.loc[:, 'count']/max_control_paths
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data = total_paths, x='edges_removed', y='count', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1.05))
plt.savefig('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-counts_left-to-left_removing-edge-types_normalized.pdf', format='pdf', bbox_inches='tight')
# plot total paths per path length from left -> left paths
total_paths = pd.concat([pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_500-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_1000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_2000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_4000-removed_path-lengths.csv'),
pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph_random-ipsi-contra_8000-removed_path-lengths.csv')], axis=0)
wildtype = pd.read_csv('interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/wildtype_path_lengths.csv')
total_paths_normalized = []
for i in range(len(total_paths.index)):
length = total_paths.iloc[i].path_length
row = [total_paths.iloc[i].condition, total_paths.iloc[i].N,
total_paths.iloc[i].edges_removed, total_paths.iloc[i].path_length,
total_paths.iloc[i].value/wildtype[wildtype.path_length==length].value.values[0]] # normalized path counts by wildtype
total_paths_normalized.append(row)
total_paths_normalized = pd.DataFrame(total_paths_normalized, columns = total_paths.columns)
for removed in [500, 1000, 2000, 4000, 8000]:
fig, ax = plt.subplots(1,1, figsize=(2,2))
sns.lineplot(data=total_paths_normalized[total_paths_normalized.edges_removed==removed], x='path_length', y='value', hue='condition', err_style='bars', linewidth=0.75, err_kws={'elinewidth':0.75}, ax=ax)
ax.set(ylim=(0, 1.1))
plt.savefig(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/path-length-counts_left-to-left_removing-{removed}-edge-types.pdf', format='pdf', bbox_inches='tight')
# %%
# how many nodes are in each type of path?
# %%
##########
# EXPERIMENT 4: removing random number of ipsi vs contra edges, effect on paths on just one side of brain to opposite side
#
# load previously generated paths
all_edges_combined_split = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
left = pm.Promat.get_hemis('left')
right = pm.Promat.get_hemis('right')
# iterations for random edge removal as control
n_init = 8
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dVNC_left = list(np.intersect1d(dVNC, left))
dVNC_right = list(np.intersect1d(dVNC, right))
all_sensories = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
all_sensories_left = list(np.intersect1d(all_sensories, left))
all_sensories_right = list(np.intersect1d(all_sensories, right))
# generate wildtype graph
split_graph = pg.Analyze_Nx_G(all_edges_combined_split, graph_type='directed', split_pairs=True)
# excise edges and generate graphs
random_ipsi500_left, random_ipsi500_right, random_contra500 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 500, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi1000_left, random_ipsi1000_right, random_contra1000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 1000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi2000_left, random_ipsi2000_right, random_contra2000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 2000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi4000_left, random_ipsi4000_right, random_contra4000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 4000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
random_ipsi8000_left, random_ipsi8000_right, random_contra8000 = pg.Prograph.excise_ipsi_contra_edge_experiment(all_edges_combined_split, 8000, n_init, 0, left, right, exclude_nodes=(all_sensories+dVNC), split_pairs=True)
# %%
# generate and save paths
cutoff=5
# generate wildtype paths
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_wildtype'
pg.Prograph.generate_save_simple_paths(split_graph.G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=save_path)
# generate and save paths
count = 500
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi500_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra500[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 1000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi1000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra1000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 2000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi2000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra2000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 4000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi4000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra4000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
count = 8000
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_left[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_ipsi8000_right[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
save_path = f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count}-N'
Parallel(n_jobs=-1)(delayed(pg.Prograph.generate_save_simple_paths)(random_contra8000[i].G, all_sensories_left, dVNC_right, cutoff=cutoff, save_path=f'{save_path}{i}') for i in tqdm((range(n_init))))
# %%
# analyze paths: total and count per # hops
def process_paths_ipsi_contra(ipsi_left_paths, ipsi_right_paths, contra_paths, count_removed):
ipsi_left_counts = [len(x) for x in ipsi_left_paths]
ipsi_right_counts = [len(x) for x in ipsi_right_paths]
contra_counts = [len(x) for x in contra_paths]
path_counts_data = []
for row in zip(ipsi_left_counts, [f'ipsi-left']*len(ipsi_left_counts), [count_removed]*len(ipsi_left_counts)):
path_counts_data.append(row)
for row in zip(ipsi_right_counts, [f'ipsi-right']*len(ipsi_right_counts), [count_removed]*len(ipsi_right_counts)):
path_counts_data.append(row)
for row in zip(contra_counts, [f'contra']*len(contra_counts), [count_removed]*len(contra_counts)):
path_counts_data.append(row)
path_counts_data = pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
path_counts_data.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_{count_removed}-removed.csv')
# count per # hops
ipsi_left_path_counts = [[len(x) for x in path] for path in ipsi_left_paths]
ipsi_right_path_counts = [[len(x) for x in path] for path in ipsi_right_paths]
contra_path_counts = [[len(x) for x in path] for path in contra_paths]
path_counts_length_data = []
for i, path_length in enumerate(ipsi_left_path_counts):
for row in zip(path_length, [f'ipsi-left']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(ipsi_right_path_counts):
for row in zip(path_length, [f'ipsi-right']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
for i, path_length in enumerate(contra_path_counts):
for row in zip(path_length, [f'contra']*len(path_length), [count_removed]*len(path_length), [i]*len(path_length)):
path_counts_length_data.append(row)
path_counts_length_data = pd.DataFrame(path_counts_length_data, columns=['path_length', 'condition', 'edges_removed', 'N'])
path_counts_length_data['value'] = [1]*len(path_counts_length_data) # just adding [1] so that groupby has something to count
path_counts_length_data_counts = path_counts_length_data.groupby(['condition', 'N', 'edges_removed', 'path_length']).count()
path_counts_length_data_counts.to_csv(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/processed/excised_graph-to-dVNC-right_random-ipsi-contra_{count_removed}-removed_path-lengths.csv')
cutoff=5
n_init = 8
count_removed = 500
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 1000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 2000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 4000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
count_removed = 8000
random_ipsi_left_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-left-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_ipsi_right_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-ipsi-right-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
random_contra_paths = Parallel(n_jobs=-1)(delayed(pg.Prograph.open_simple_paths)(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_minus-edges-contra-{count_removed}-N{i}.csv.gz') for i in tqdm(range(n_init)))
process_paths_ipsi_contra(random_ipsi_left_paths, random_ipsi_right_paths, random_contra_paths, count_removed)
# wildtype paths
graph_paths = pg.Prograph.open_simple_paths(f'interhemisphere/csv/paths/random-ipsi-contra-edges_left-paths/all-paths-sens-to-dVNC-right_cutoff{cutoff}_wildtype.csv.gz')
wt_count = len(graph_paths)
path_counts_data = []
path_counts_data.append([wt_count, f'wildtype', 0])
path_counts_data =
|
pd.DataFrame(path_counts_data, columns=['count', 'condition', 'edges_removed'])
|
pandas.DataFrame
|
from collections import defaultdict
import argparse
from tqdm import tqdm
import editdistance as ed
import pandas as pd
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--unlabeled-file', '-i', type=str, required=True)
parser.add_argument('--labeled-file', '-g', type=str, required=True)
parser.add_argument('--output-file', '-o', type=str, required=True)
parser.add_argument('--input-columns', '-c', type=str, nargs='+', default=['sentence'])
parser.add_argument('--output-column', '-v', type=str, default='label')
args = parser.parse_args()
l_df = pd.read_csv(args.labeled_file, sep='\t', quoting=3)
ul_df = pd.read_csv(args.unlabeled_file, sep='\t', quoting=3)
is_multicol = len(args.input_columns) > 1
candidates = [('\t'.join(x[1:]), y) for x, y in zip(l_df[args.input_columns].itertuples(), l_df[args.output_column])]
output_dict = defaultdict(list)
columns = list(ul_df.columns) + [args.output_column]
for tup in tqdm(list(ul_df[list(ul_df.columns)].itertuples())):
tup = tup[1:]
for col, x in zip(ul_df.columns, tup):
output_dict[col].append(x)
for inp_tup in tqdm(list(ul_df[args.input_columns].itertuples())):
inp_tup = inp_tup[1:]
best_candidate = min(candidates, key=lambda x: ed.eval(x[0], '\t'.join(inp_tup)))
candidates.pop(candidates.index(best_candidate))
output_dict[args.output_column].append(best_candidate[1])
|
pd.DataFrame(output_dict)
|
pandas.DataFrame
|
# coding: utf-8
# In[1]:
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/USvideos.csv -o USvideos.csv')
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/USvideosOld.csv -o USvideosOld.csv')
# In[30]:
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/USvideos.csv -o USvideos.csv')
# In[1]:
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/CAvideos.csv -o CAvideos.csv')
# In[2]:
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/DEvideos.csv -o DEvideos.csv')
# In[3]:
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/FRvideos.csv -o FRvideos.csv')
# In[4]:
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/GBvideos.csv -o GBvideos.csv')
# In[53]:
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/US_category_id.json -o US_category_id.json')
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/CA_category_id.json -o CA_category_id.json')
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/DE_category_id.json -o DE_category_id.json')
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/FR_category_id.json -o FR_category_id.json')
get_ipython().system(u'curl -L https://raw.githubusercontent.com/mgiovann/machineLearningData/master/youtube-new/GB_category_id.json -o GB_category_id.json')
# In[15]:
import pandas as pd
import numpy as np
from collections import Counter
from random import randint
import datetime
import time
import json
from pprint import pprint
import string
# In[36]:
df = pd.read_csv("GBvideos.csv", error_bad_lines=False)
df['publish_time'] = pd.to_datetime(df['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ')
df['publishHour'] = df['publish_time'].dt.time
timeList = df['publishHour'].tolist()
timeList = np.array(timeList)
viewList = df['views']
viewList = np.array(viewList)
timeCountList = []
viewCountList = []
dataArray = []
print("\n\n")
lastHour = datetime.time(00, 00, 00)
for i in range(0,8):
hour = (i+1)*3
nextHour = datetime.time(hour-1, 59, 59)
newList = np.asarray(np.where(np.logical_and((timeList > lastHour), (timeList < nextHour))))
asdf, count = newList.shape
viewCount = int(round(np.mean(viewList[newList])))
viewCountList.append(viewCount)
timeLabel = lastHour.strftime("%H:%M") + "-" + nextHour.strftime("%H:%M")
dataArray.append(viewCount)
delta = datetime.timedelta(seconds=1)
lastHour = nextHour
lastHour = (datetime.datetime.combine(datetime.date(1,1,1),lastHour) + delta).time()
# print(timeCountList)
print("\n\n\n")
print("Average views by publish time: ")
print(dataArray)
print("\n")
print("% views by publish time: ")
percentageArray = np.array(dataArray)
percentageArray = np.round((percentageArray / sum(percentageArray)) * 100, 1)
print(percentageArray)
print(sum(percentageArray))
# In[50]:
df = pd.read_csv("FRvideos.csv", error_bad_lines=False)
# print(list(df))
df['publish_time'] = pd.to_datetime(df['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ')
df['publishHour'] = df['publish_time'].dt.time
timeList = df['publishHour'].tolist()
timeList = np.array(timeList)
# zeroToThree =
# threeToSix =
timeCountList = []
dataArray = []
# print(type(timeList[0]))
t= timeList[0]
print("\n\n")
lastHour = datetime.time(00, 00, 00)
for i in range(0,8):
hour = (i+1)*3
nextHour = datetime.time(hour-1, 59, 59)
newList = np.asarray(np.where(np.logical_and((timeList > lastHour), (timeList < nextHour))))
asdf, count = newList.shape
timeCountList.append(count)
timeLabel = lastHour.strftime("%H:%M") + "-" + nextHour.strftime("%H:%M")
# dataArray.append(count)
dataArray.append(count)
delta = datetime.timedelta(seconds=1)
lastHour = nextHour
lastHour = (datetime.datetime.combine(datetime.date(1,1,1),lastHour) + delta).time()
print("Count by publish time: ")
print(dataArray)
# In[74]:
#parse category files
with open('US_category_id.json') as f:
data = json.load(f)
catList = []
catDict = {}
for i in range(0, 32):
catList.append(data["items"][i]['snippet']['title'])
catDict[data["items"][i]['id']] = data["items"][i]['snippet']['title']
print(catDict)
# In[78]:
print(catDict['20'])
# In[103]:
df = pd.read_csv("USvideos.csv", error_bad_lines=False)
categoryList = df['category_id'].tolist()
likesList = df['likes'].tolist()
dislikesList = df['dislikes'].tolist()
viewsList = df['views'].tolist()
categoryList = np.array(categoryList)
likesList = np.array(likesList)
dislikesList = np.array(dislikesList)
viewsList = np.array(viewsList)
print(Counter(categoryList).keys())
print(Counter(categoryList).values())
catCountKeys = np.array([v for v in Counter(categoryList).keys()])
catCountCounts = np.array([v for v in Counter(categoryList).values()])
print("\n\n")
# list of descriptions. Matches up with indices in catCountKeys
categoryDescriptions = ["Entertainment", "Science & Technology", "People & Blogs", "Comedy", "Film & Animation",
"Howto & Style", "Music", "News & Politics", "Sports", "Education",
"Autos & Vehicles", "Gaming", "Pets & Animals", "Nonprofits & Activism", "Travel & Events", "Shows"]
print("catCountKeys: ", catCountKeys)
categoryListForPrint = []
graphList = []
count = 0
for category in catCountKeys:
mean = round(np.mean(viewsList[np.where(categoryList == category)]))
print("category: ", category, " average views = ", mean)
graphList.append([catDict[str(category)], mean])
# categoryListForPrint.append(catDict)
count += 1
print("\n")
#sort by mean views
print("Category count")
print(sorted(graphList, key=lambda mean: mean[1]))
# In[104]:
sortedCats = sorted(graphList, key=lambda mean: mean[1])
sortedCats = np.array(sortedCats)
i, j = sortedCats.shape
printCatList = []
printMeanList = []
for i in range(i):
m = int(float(sortedCats[i][1]))
printMeanList.append(m)
printCatList.append(sortedCats[i][0])
print(printMeanList)
print(printCatList)
print("\n\n")
print(sortedCats[:,0])
print(sortedCats[:,1])
# In[116]:
df = pd.read_csv("GBvideos.csv", error_bad_lines=False)
categoryList = df['category_id'].tolist()
likesList = df['likes'].tolist()
dislikesList = df['dislikes'].tolist()
viewsList = df['views'].tolist()
categoryList = np.array(categoryList)
likesList = np.array(likesList)
dislikesList = np.array(dislikesList)
viewsList = np.array(viewsList)
# print(Counter(categoryList).keys())
# print(Counter(categoryList).values())
catCountKeys = np.array([v for v in Counter(categoryList).keys()])
catCountCounts = np.array([v for v in Counter(categoryList).values()])
# print("catCountKeys: ", catCountKeys)
categoryListForPrint = []
graphList = []
countryDict = {}
count = 0
for category in catCountKeys:
mean = round(np.mean(viewsList[np.where(categoryList == category)]))
countryDict[catDict[str(category)]] = mean
# print(countryDict)
otherCountryMeans = []
for k in printCatList:
otherCountryMeans.append(countryDict[k])
print("Country category mean list: ")
print(otherCountryMeans)
# In[61]:
columnNames = ['video_id', 'title']
df = pd.read_csv("USvideos.csv", error_bad_lines=False)
print(df.describe())
print(list(df))
# In[30]:
categoryList = df['category_id'].tolist()
likesList = df['likes'].tolist()
dislikesList = df['dislikes'].tolist()
viewsList = df['views'].tolist()
categoryList = np.array(categoryList)
likesList = np.array(likesList)
dislikesList = np.array(dislikesList)
viewsList = np.array(viewsList)
# In[60]:
#random colors
colors = []
for i in range(20):
colors.append('%06X' % randint(0, 0xFFFFFF))
print(Counter(categoryList).keys())
print(Counter(categoryList).values())
catCountKeys = np.array([v for v in Counter(categoryList).keys()])
catCountCounts = np.array([v for v in Counter(categoryList).values()])
print("\n\n")
# list of descriptions. Matches up with indices in catCountKeys
categoryDescriptions = ["Entertainment", "Science & Technology", "People & Blogs", "Comedy", "Film & Animation",
"Howto & Style", "Music", "News & Politics", "Sports", "Education",
"Autos & Vehicles", "Gaming", "Pets & Animals", "Nonprofits & Activism", "Travel & Events", "Shows"]
graphList = []
count = 0
for category in catCountKeys:
mean = round(np.mean(viewsList[np.where(categoryList == category)]))
print("category: ", category, " average views = ", mean)
graphList.append([categoryDescriptions[count], mean, colors[count]])
count += 1
print("\n")
#sort by mean views
print("paste this into the js: \n")
print(sorted(graphList, key=lambda mean: mean[1]))
# In[9]:
oldYoutubeDF = pd.read_csv("USvideosOld.csv", error_bad_lines=False)
print(oldYoutubeDF.describe())
print(list(oldYoutubeDF))
# In[94]:
#random colors
colors = []
for i in range(20):
colors.append('%06X' % randint(0, 0xFFFFFF))
publishTimeDF['publish_time'] = pd.to_datetime(oldYoutubeDF['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ')
publishTimeDF['publishHour'] = publishTimeDF['publish_time'].dt.time
# print(publishTimeDF[['publishHour']].head)
timeList = publishTimeDF['publishHour'].tolist()
timeList = np.array(timeList)
# zeroToThree =
# threeToSix =
timeCountList = []
dataArray = []
# print(type(timeList[0]))
t= timeList[0]
# newTime = datetime.time(17, 14, 0)
# print(newTime)
if t > newTime:
print("asdasd")
print("\n\n")
lastHour = datetime.time(00, 00, 00)
for i in range(0,8):
hour = (i+1)*3
nextHour = datetime.time(hour-1, 59, 59)
# print(i)
# print(lastHour)
# print(nextHour, "\n")
# newList = np.where((timeList > lastHour) and (timeList < nextHour))
newList = np.asarray(np.where(np.logical_and((timeList > lastHour), (timeList < nextHour))))
asdf, count = newList.shape
timeCountList.append(count)
timeLabel = lastHour.strftime("%H:%M") + "-" + nextHour.strftime("%H:%M")
dataArray.append([timeLabel, count, colors[i]])
delta = datetime.timedelta(seconds=1)
lastHour = nextHour
lastHour = (datetime.datetime.combine(datetime.date(1,1,1),lastHour) + delta).time()
# print(timeCountList)
print("\n\n\n")
print(dataArray)
# In[98]:
#random colors
colors = []
for i in range(20):
colors.append('%06X' % randint(0, 0xFFFFFF))
publishTimeDF['publish_time'] =
|
pd.to_datetime(oldYoutubeDF['publish_time'], format='%Y-%m-%dT%H:%M:%S.%fZ')
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
import re # regex
# Weather Station key & geocode
dfStation = pd.read_table('~/Data/stationary/allstations.txt', header=None)
# Removes trailing whitespaces and extracts the latitude/longitude
dfStation = dfStation[0].apply(lambda x: pd.Series(re.sub(' +', ' ', x).split()[:3]))
dfStation.columns = ['Station', 'Latitude', 'Longitude']
# Set path to directories to shorten code for reading data
hourlyPath = '~/Data/product/hourly/'
monthlyPrecipPath = '~/Data/products/precipitation/'
monthlyTempPath = '~/Data/products/temperature/'
def read_hourly(col_name, file_name, divisor):
"""Creates a formatted data frame from the hourly weather normals"""
# Formats the column names including a prefix for the weather type
station_columns = ['Station', 'Month', 'Day']
column_names = ([col_name+str(x) for x in range(1, 25)])
column_names = station_columns+column_names
df_hourly = pd.read_csv(hourlyPath+file_name,
header=None, delim_whitespace=True,
names = column_names)
# Sets missing values to NaN
df_hourly.replace('-9999', np.NaN, inplace=True)
# Split - Extracting to avoid calculations
column_order = df_hourly[['Station', 'Month', 'Day']]
# Apply - Removing flags and converting to the proper format
formatted_values = df_hourly.replace('[\D]', '', regex=True).astype(float) / divisor
formatted_values = formatted_values.drop(['Station', 'Month', 'Day'], axis=1)
# Combine
df_hourly = pd.merge(column_order, formatted_values, left_index=True, right_index=True)
return(df_hourly)
# Hourly Temperature
dfTmp = read_hourly('Tmp', 'hly-temp-normal.txt', 10)
# Hourly Dewpoint
dfDewp = read_hourly('Dew', 'hly-dewp-normal.txt', 10)
# Hourly Cloud Coverage - Specifically Overcast %
dfCloud = read_hourly('Cloud', 'hly-clod-pctovc.txt', 10)
# Hourly Cloud Coverage
dfHtIdx = read_hourly('HtIdx', 'hly-hidx-normal.txt', 10)
# Hourly Cloud Coverage
dfCoolHrs = read_hourly('CoolHr', 'hly-cldh-normal.txt', 10)
# Hourly Cloud Coverage
dfHtHrs = read_hourly('HtHr', 'hly-htdh-normal.txt', 10)
# Combining hourly data frames
dfHourlyWeather = pd.merge(dfStation, dfTmp).merge(dfDewp).merge(dfCloud).merge(dfHtIdx) \
.merge(dfCoolHrs).merge(dfHtHrs)
def read_monthly(col_name, file_name, path, divisor):
"""Creates a formatted data frame from the monthly weather normals"""
# Formats the column names including a prefix for the weather type
station_columns = ['Station']
column_names = ([col_name+str(x) for x in range(1, 13)])
column_names = station_columns+column_names
df_monthly = pd.read_csv(path+file_name,
header=None, delim_whitespace=True,
names = column_names)
# Sets missing values to NaN
df_monthly.replace('-9999', np.NaN, inplace=True)
# Split - Extracting to avoid calculations
column_order = df_monthly[['Station']]
# Apply - Removing flags and converting to the proper format
formatted_values = df_monthly.replace('[\D]', '', regex=True).astype(float) / divisor
formatted_values = formatted_values.drop(['Station'], axis=1)
# Combine
df_monthly = pd.merge(column_order, formatted_values, left_index=True, right_index=True)
return(df_monthly)
# Monthly Precipitation
dfPrecip = read_monthly('Precip', 'mly-prcp-normal.txt', monthlyPrecipPath, 100)
# Monthly Minimum Temperature
dfMthlyTmpMin = read_monthly('MthlyTmpMin', 'mly-tmin-normal.txt', monthlyTempPath, 10)
# Monthly Maximum Temperature
dfMthlyTmpMax = read_monthly('MthlyTmpMax', 'mly-tmax-normal.txt', monthlyTempPath, 10)
# Monthly Average Temperature
dfMthlyTmpAvg = read_monthly('MthlyTmpAvg', 'mly-tavg-normal.txt', monthlyTempPath, 10)
# Combining monthly data frames
dfMonthlyWeather =
|
pd.merge(dfStation, dfMthlyTmpMin)
|
pandas.merge
|
from __future__ import print_function
_README_ = '''
-------------------------------------------------------------------------
Generate JSON files for GBE decomposition page.
-p option outputs python numpy npz file (compressed format) for python
Author: <NAME> (<EMAIL>)
Date: 2017/12/01
-------------------------------------------------------------------------
'''
import pandas as pd
import numpy as np
import os, sys, json, re, gzip, argparse, logging, collections
from datetime import datetime
from functools import reduce
from scipy.sparse import dok_matrix
from logging.config import dictConfig
import rpy2.robjects as robjects
logging_config = dict(
version = 1,
formatters = {
'f': {'format':
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'}
},
handlers = {
'h': {'class': 'logging.StreamHandler',
'formatter': 'f',
'level': logging.DEBUG}
},
root = {
'handlers': ['h'],
'level': logging.INFO,
#'level': logging.DEBUG,
},
)
dictConfig(logging_config)
def parse_label_phe(label_phe_f):
label_phe_df = pd.read_csv(label_phe_f, sep='\t', compression='gzip')
label_phe_code = label_phe_df['icd'].as_matrix()
label_phe = label_phe_df['Name'].map(lambda x: re.sub('_', ' ', re.sub('_/_', '/', x))).as_matrix()
return label_phe, label_phe_code
def parse_label_var(label_var_f):
label_var_df =
|
pd.read_csv(label_var_f, sep='\t', compression='gzip')
|
pandas.read_csv
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import unittest
import numpy as np
import pandas as pd
from nimbusml import Pipeline
from nimbusml.internal.entrypoints.transforms_variablecolumntransform import transforms_variablecolumntransform
from nimbusml.internal.utils.entrypoints import Graph, DataOutputFormat
class TestVariableColumn(unittest.TestCase):
def to_variable_column(self, input, features=None, length_column_name=None):
node = transforms_variablecolumntransform(data='$data',
output_data='$output_data',
features=features,
length_column_name=length_column_name)
graph_nodes = [node]
graph = Graph(dict(data=''),
dict(output_data=''),
DataOutputFormat.DF,
*(graph_nodes))
(out_model, out_data, out_metrics, _) = graph.run(verbose=True, X=input)
return out_data
def test_nonvariable_columns_are_returned_unchanged(self):
train_data = {'c1': [2, 3, 4, 5],
'c2': [3, 4, 5, 6],
'c3': [4, 5, 6, 7],
'c4': [0, 1, 2, 1]}
train_df = pd.DataFrame(train_data).astype({'c1': np.float64,
'c2': np.float64})
result = self.to_variable_column(train_df, ['c1', 'c2'])
self.assertTrue(result.loc[:, 'c3'].equals(train_df.loc[:, 'c3']))
self.assertTrue(result.loc[:, 'c4'].equals(train_df.loc[:, 'c4']))
def test_variable_columns_of_same_length_do_not_add_nans(self):
train_data = {'c1': [2, 3, 4, 5],
'c2': [3, 4, 5, 6],
'c3': [4, 5, 6, 7]}
train_df = pd.DataFrame(train_data).astype({'c1': np.float64,
'c2': np.float64})
result = self.to_variable_column(train_df, ['c1', 'c2'])
self.assertTrue(result.loc[:, 'c1.0'].equals(train_df.loc[:, 'c1']))
self.assertTrue(result.loc[:, 'c1.1'].equals(train_df.loc[:, 'c2']))
def test_variable_columns_with_different_lengths_return_nans(self):
train_data = {'c1': [2, 3, 4, 5],
'c2': [3, 4, 5, 6],
'c3': [4, 5, 6, 7],
'c4': [0, 1, 2, 1]}
train_df =
|
pd.DataFrame(train_data)
|
pandas.DataFrame
|
from tqdm import tqdm
import numpy as np
import pandas as pd
threshold = 5 #from last row in excel template
validratio = 0.72 #assume 75% of it is valid, must change together with threshold
totalSigCnt=int(19.69*10000) #assume we submit 195k signatures
dupratioAmongValid = 0.07 #assume among the valid signatures, 2% are duplicate
sampleRate = 0.25
trialCnt = 5000
print('validRatio={0:4.3f}, duplicateAmongValid={1:4.3f}, sampleRatio={2:4.3f}\n'.format(validratio, dupratioAmongValid, sampleRate))
totalvalidCnt = int(totalSigCnt * validratio)
duplicateCnt = int(totalvalidCnt * dupratioAmongValid) #and 1% of valid signatures are duplicate
print('expect duplicate signatures {0}'.format(duplicateCnt))
distinctCnt = (totalvalidCnt - 2 * duplicateCnt) + duplicateCnt
invalidCnt = totalSigCnt -totalvalidCnt
sigSpace = list(range(distinctCnt))
sigSpace.extend(list(range(duplicateCnt)))
sigSpace.extend(list([-1]*invalidCnt))
sampleCnt = int(totalSigCnt * sampleRate)
print('there are total {0} signatures, {1} unique signatures, sample {2} signatures\n'.format(len(sigSpace), len(np.unique(sigSpace)), sampleCnt))
duplicateCnts = []
validCnts = []
for i in tqdm(range(trialCnt)):
sampledSig = np.random.choice(sigSpace, sampleCnt, replace=False)
validSampleSig = [x for x in list(sampledSig) if x >=0]
uniqueSampleSig = np.unique(validSampleSig)
duplicateCnt = len(validSampleSig)-len(uniqueSampleSig)
duplicateCnts.append(duplicateCnt)
validCnts.append(len(validSampleSig))
df=
|
pd.DataFrame({"dupcnt":duplicateCnts, "totalvalidcnt":validCnts})
|
pandas.DataFrame
|
import pandas as pd
data = pd.read_csv("Filieres.csv")
#get code and specialite column and drop first row
data = data.filter(['code_filière','Spécialité'])
data = data.drop(data.index[0])
data = data.rename(columns={"code_filière": "field_id", "Spécialité": "name"})
#replace éè with e and get field id from code_filiere text
data['name'] = data['name'].replace(regex=True, to_replace=r'é|è', value=r'e').str.lower()
data['field_id'] = data['field_id'].str[-2:]
data = data.filter(['field_id','name']).drop_duplicates()
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import pandas as pd
import nltk
import multiprocessing as mp
from nltk.tokenize import word_tokenize
from nltk.stem.porter import *
from nltk.corpus import stopwords
from tqdm import tqdm
import sys
# User defined Imports ugly python import syntax >:(
sys.path.append('../Preprocess')
from dataJoin import joinData
from parallelLoad import parallelLoad
def tokenize(df):
# To measure the progress of our lambda apply functions
# Need to specify since they will be running on separate processes
tqdm.pandas()
# Do a tokenization by row
#print('Tokenizing text...')
#print(df.loc[69377:69380,['text']]) # This will have NA as text
# Drop the NA tweets texts so we dont have problems with our tokenizers
df = df.dropna(subset=['text'])
# Do the apply method
#df['tokenized_text'] = df.progress_apply(lambda row: word_tokenize(row['text']), axis=1)
df['tokenized_text'] = df.apply(lambda row: word_tokenize(row['text']), axis=1)
# Return df
return df
def wordCount(df):
# Also the lenght of the tokenizer (could be useful?)
#print('Getting number of words...')
#df['tweets_length'] = df.progress_apply(lambda row: len(row['tokenized_text']), axis=1)
df['tweets_length'] = df.apply(lambda row: len(row['tokenized_text']), axis=1)
# Return the new df
return df
def steem(df):
#print('Stemming Words...')
# Create an instance of the porter stemmer
stemmer = PorterStemmer()
# Steam the words
#df['stemmed_tweets'] = df['tokenized_text'].progress_apply(lambda words:[stemmer.stem(word) for word in words])
df['stemmed_tweets'] = df['tokenized_text'].apply(lambda words:[stemmer.stem(word) for word in words])
# Return the new stemmed df
return df
def removeUrls(df):
#print('Removing Urls...')
# Remove the urls/# etc
#df['stemmed_tweets'] = df['stemmed_tweets'].progress_apply(lambda words:[ re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", word) for word in words])
df['stemmed_tweets'] = df['stemmed_tweets'].apply(lambda words:[ re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", word) for word in words])
# Return the df without URLs
return df
def removeStopWords(df):
# Set-up remove of stop words
stop_words = set(stopwords.words('english'))
# Get the multidimensional array
stemmedWords = df['stemmed_tweets'].values.reshape(-1,).tolist()
# Flatten to 1d array
#print('Flattening the array...')
flattenedStemmedWords = [x for sublist in stemmedWords for x in sublist]
#print('The flattened stemmed words are: \n', flattenedStemmedWords[:10])
# Cleanup of the stemmed words because they are dirty :O
cleanedStemmedWords = []
#print('Removing stop words and punctuation...')
for word in flattenedStemmedWords:
# Not commas periods and applause.
if word not in [
",",
".",
"``",
"''",
";",
"?",
"--",
")",
"(",
":",
"!",
"...",
"http",
"u2013"
] and len(word) > 2 and word not in stop_words:
cleanedStemmedWords.append(word.lower())
#print('The cleaned Stemmed Words are: \n',cleanedStemmedWords[:30])
return cleanedStemmedWords
if __name__ =='__main__':
# To measure the progress of our lambda apply functions
tqdm.pandas()
print('Loading data...')
# Start Data loading using paralelization parallelLoad(route_to_files) function!
filesRoute = '../data/traditionalSpamBotsChunks1/'
botData = parallelLoad(filesRoute)
filesRoute = '../data/genuineTweetsChunks/'
genuineData = parallelLoad(filesRoute)
print('Joining data...')
df = joinData(botData.head(50000), genuineData.head(5000))
# Drop all columns but the one containing the tweets text
df = df[['text','bot']]
# Divide data into chunks
n = 1000 #chunk row size
list_df = [df[i:i+n] for i in range(0,df.shape[0],n)]
# Use 4 processes
pool = mp.Pool(8) # use 4 processes
print('Tokenizing text...')
# Create a list of async functions
funclist = []
for df in list_df:
# Process each df using and async function
f = pool.apply_async(tokenize, [df])
# Append it to a list of async functions
funclist.append(f)
result = []
for f in tqdm(funclist):
# Timeout in 2 mins
# Use the get method on the f object generated by apply_async
# to retrive the result once the process is finished
result.append(f.get(timeout=120))
# Concat results
df = pd.concat(result)
# Divide data into chunks for parallel processing
n = 1000 #chunk row size
list_df = [df[i:i+n] for i in range(0,df.shape[0],n)]
print('Counting number of words...')
# Create a list of async functions
funclist = []
for df in list_df:
# Process each df using and async function
f = pool.apply_async(wordCount, [df])
# Append it to a list of async functions
funclist.append(f)
result = []
for f in tqdm(funclist):
# Timeout in 2 mins
# Use the get method on the f object generated by apply_async
# to retrive the result once the process is finished
result.append(f.get(timeout=120))
# Concat results
df = pd.concat(result)
print('Stemming...')
# Divide data into chunks for parallel processing
n = 1000 #chunk row size
list_df = [df[i:i+n] for i in range(0,df.shape[0],n)]
# Create a list of async functions
funclist = []
for df in list_df:
# Process each df using and async function
f = pool.apply_async(steem, [df])
# Append it to a list of async functions
funclist.append(f)
result = []
for f in tqdm(funclist):
# Timeout in 2 mins
# Use the get method on the f object generated by apply_async
# to retrive the result once the process is finished
result.append(f.get(timeout=120))
# Concat results
df =
|
pd.concat(result)
|
pandas.concat
|
################## IMPORTS #####################
import logging
import os
from bs4 import BeautifulSoup
import requests
from datetime import datetime
import pandas as pd
import re
import numpy as np
import sqlite3
import logging
from sqlalchemy import create_engine
############### DATA COLLECTION ##################
#creating a useragent
#parameters
def data_collection(url, header):
# Request to URL
page = requests.get(url, headers=header)
logger.debug('Page Response: %s', page)
# Beautiful soup object
soup = BeautifulSoup(page.text, 'html.parser')
# ===================== Product Data ============================
products = soup.find('ul', class_='products-listing small')
product_list = products.find_all('article', class_='hm-product-item')
# product id
product_id = [p.get('data-articlecode') for p in product_list]
# product category
product_category = [p.get('data-category') for p in product_list]
# product name
product_list = products.find_all('a', class_='link')
product_name = [p.get_text() for p in product_list]
# price
product_list = products.find_all('span', class_='price regular')
product_price = [p.get_text() for p in product_list]
data = pd.DataFrame([product_id, product_category, product_name, product_price]).T
data.columns = ['product_id', 'product_category', 'product_name', 'product_price']
return data
######################## DATA COLLECT BY PRODUCT #################################
def data_collection_by_product(data, header):
# empty dataframe
df_compositions = pd.DataFrame()
# unique columns for all products
aux = []
cols = ['Art. No.',
'Composition',
'Fit',
'Product safety',
'Size',
'More sustainable materials']
df_pattern = pd.DataFrame(columns=cols)
for i in range(len(data)):
# API Requests
url = 'https://www2.hm.com/en_us/productpage.' + data.loc[i, 'product_id'] + '.html'
logger.debug('Product: %s, URL: %s', i, url)
page = requests.get(url, headers=header)
# BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
product_list = soup.find_all('a', class_='filter-option miniature active') + soup.find_all('a',
class_='filter-option miniature')
color_item = [p.get('data-color') for p in product_list]
# product id
product_id = [p.get('data-articlecode') for p in product_list]
# creating data frame with product id+color name
df_color = pd.DataFrame((product_id, color_item)).T
df_color.columns = ['product_id', 'color_name'] # renaming columns
for j in range(len(df_color)):
# API Requests
url = 'https://www2.hm.com/en_us/productpage.' + df_color.loc[j, 'product_id'] + '.html'
logger.debug('Color: %s', url)
page = requests.get(url, headers=header)
# BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
######################## PRODUCT NAME ###################################
product_name = soup.find_all('h1', class_='primary product-item-headline')
product_name = product_name[0].get_text()
######################## PRODUCT PRICE ###################################
product_price = soup.find_all('div', class_='primary-row product-item-price')
product_price = re.findall(r'\d+\.?\d+', product_price[0].get_text())[0]
####################### COMPOSITION ######################################
product_composition_list = soup.find_all('div', class_='pdp-description-list-item')
product_composition = [list(filter(None, p.get_text().split("\n"))) for p in product_composition_list]
# renaming labels
df_composition = pd.DataFrame(product_composition).T
df_composition.columns = df_composition.iloc[0]
# deleting first row
df_composition = df_composition.iloc[1:].fillna(method='ffill')
# remove pocket lining, shell and lining
df_composition['Composition'] = df_composition['Composition'].str.replace('Pocket: ', '', regex=True)
df_composition['Composition'] = df_composition['Composition'].str.replace('Pocket lining: ', '', regex=True)
df_composition['Composition'] = df_composition['Composition'].str.replace('Shell: ', '', regex=True)
df_composition['Composition'] = df_composition['Composition'].str.replace('Lining: ', '', regex=True)
# guarantee the same number of columns
df_composition = pd.concat([df_pattern, df_composition], axis=0)
# rename columns
df_composition.columns = ['product_id', 'composition', 'fit', 'product_safety', 'size', 'sustainable_materials']
df_composition['product_name'] = product_name
df_composition['product_price'] = product_price
# keep new columns if they show up
aux = aux + df_composition.columns.tolist()
# merge df_color and df_composition
df_composition =
|
pd.merge(df_composition, df_color, how='left', on='product_id')
|
pandas.merge
|
#------------------------------------------------------------------------------------------------------------------------------
# By <NAME>
# (updated October 2018)
#
# Define offset vectors
# An offset vector represents the difference in gene expression profiles between two states (ex. two different conditions like
# disease vs normal)
#-------------------------------------------------------------------------------------------------------------------------------
import os
import pandas as pd
import numpy as np
from keras.models import model_from_json, load_model
from keras import metrics, optimizers
from sklearn.decomposition import PCA
from functions import utils
import pickle
from numpy.random import seed
randomState = 123
seed(randomState)
def gene_space_offset(data_dir, gene_id, percent_low, percent_high):
"""
gene_space_offset(data_dir: string, gene_id: string):
input:
data_dir: directory containing the raw gene expression data for all genes including the target gene (see
gene_id definition).
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below.
In "interpolate_in_gene_space.py", after we sort samples based on the expression level of the
target gene, we want to predict the expression profile of the OTHER genes at different levels
of target gene expression.
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
computation:
offset_vector = average(gene expression of samples that have the highest percent_high% of target gene expression) -
average(gene expression of samples that have the lowest percent_low% of target gene expression)
output:
offset vector (1 x 5548 genes)
Note: offset vector does not include the target gene
"""
# Load arguments
target_gene_file = os.path.join(data_dir, gene_id + ".txt")
non_target_gene_file = os.path.join(data_dir, "train_model_input.txt.xz")
# Output files
offset_file = os.path.join(data_dir, "offset_gene_space.txt")
lowest_file = os.path.join(data_dir, "lowest.txt")
highest_file = os.path.join(data_dir, "highest.txt")
# Read in data
target_gene_data = pd.read_table(target_gene_file, header=0, index_col=0)
non_target_gene_data = pd.read_table(non_target_gene_file, header=0, index_col=0)
# Sort target gene data by expression (lowest --> highest)
target_gene_sorted = target_gene_data.sort_values(by=[gene_id])
# Collect the extreme gene expressions
[low_ids, high_ids] = utils.get_gene_expression_above_percent(target_gene_sorted, gene_id, percent_low, percent_high)
low_exp = non_target_gene_data.loc[low_ids]
high_exp = non_target_gene_data.loc[high_ids]
print('Number of genes in low expression group is {}'.format(low_exp.shape))
print('Number of gene in high expression group is {}'.format(high_exp.shape))
# Average gene expression across samples in each extreme group
lowest_mean = low_exp.mean(axis=0)
highest_mean = high_exp.mean(axis=0)
# Generate offset using average gene expression in original dataset
offset_gene_space = highest_mean - lowest_mean
offset_gene_space_df = pd.Series.to_frame(offset_gene_space).T
# output lowest and highest expressing samples
low_exp.to_csv(lowest_file, sep='\t', float_format="%.5g")
high_exp.to_csv(highest_file, sep='\t', float_format="%.5g")
# ouput gene space offset vector
offset_gene_space_df.to_csv(offset_file, sep='\t', float_format="%.5g")
def vae_latent_space_offset(data_dir, model_dir, encoded_dir, latent_dim, gene_id, percent_low, percent_high):
"""
vae_latent_space_offset(data_dir: string, model_dir: string, encoded_dir: string, gene_id: string):
input:
data_dir: directory containing the raw gene expression data for all genes including the target gene (see
gene_id definition).
model_dir: directory containing the learned vae models
encoded_dir: directory to use to output offset vector to
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below.
In "interpolate_in_vae_latent_space.py", after we sort samples based on the expression level of the
target gene, we want to predict the expression profile of the OTHER genes at different levels
of target gene expression.
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
computation:
offset_vector = average(encoded gene expression of samples that have the highest percent_high% of target gene expression) -
average(encoded gene expression of samples that have the lowest percent_low% of target gene expression)
output:
encoded offset vector (1 x number of latent space features)
Note: offset vector does not include the target gene
"""
# Load arguments
target_gene_file = os.path.join(data_dir, gene_id + ".txt")
non_target_gene_file = os.path.join(data_dir, "train_model_input.txt.xz")
model_file = os.path.join(model_dir, "tybalt_2layer_{}latent_encoder_model.h5".format(latent_dim))
weights_file = os.path.join(model_dir, "tybalt_2layer_{}latent_encoder_weights.h5".format(latent_dim))
# Output files
offset_file = os.path.join(encoded_dir, "offset_latent_space_vae.txt")
lowest_file = os.path.join(encoded_dir, "lowest_encoded_vae.txt")
highest_file = os.path.join(encoded_dir, "highest_encoded_vae.txt")
# Read in data
target_gene_data = pd.read_table(target_gene_file, header=0, index_col=0)
non_target_gene_data = pd.read_table(non_target_gene_file, header=0, index_col=0)
# read in saved models
loaded_model = load_model(model_file)
# load weights into new model
loaded_model.load_weights(weights_file)
# Sort target gene data by expression (lowest --> highest)
target_gene_sorted = target_gene_data.sort_values(by=[gene_id])
# Collect the extreme gene expressions
[low_ids, high_ids] = utils.get_gene_expression_above_percent(target_gene_sorted, gene_id, percent_low, percent_high)
low_exp = non_target_gene_data.loc[low_ids]
high_exp = non_target_gene_data.loc[high_ids]
print('Number of genes in low expression group is {}'.format(low_exp.shape))
print('Number of gene in high expression group is {}'.format(high_exp.shape))
# Use trained model to encode expression data into SAME latent space
low_exp_encoded = loaded_model.predict_on_batch(low_exp)
low_exp_encoded_df = pd.DataFrame(low_exp_encoded, index=low_exp.index)
high_exp_encoded = loaded_model.predict_on_batch(high_exp)
high_exp_encoded_df = pd.DataFrame(high_exp_encoded, index=high_exp.index)
# Average gene expression across samples in each extreme group
lowest_mean = low_exp_encoded_df.mean(axis=0)
highest_mean = high_exp_encoded_df.mean(axis=0)
# Generate offset using average gene expression in original dataset
offset_latent_space = highest_mean - lowest_mean
offset_latent_space_df = pd.Series.to_frame(offset_latent_space).T
# output lowest and highest expressing samples
low_exp_encoded_df.to_csv(lowest_file, sep='\t', float_format="%.5g")
high_exp_encoded_df.to_csv(highest_file, sep='\t', float_format="%.5g")
# ouput gene space offset vector
offset_latent_space_df.to_csv(offset_file, sep='\t', float_format="%.5g")
def pca_latent_space_offset(data_dir, model_dir, encoded_dir, gene_id, percent_low, percent_high):
"""
pca_latent_space_offset(data_dir: string, model_dir: string, encoded_dir: string, gene_id: string):
input:
data_dir: directory containing the raw gene expression data for all genes including the target gene (see
gene_id definition).
model_dir: directory containing the learned pca models
encoded_dir: directory to use to output offset vector to
gene_id: gene you are using as the "phenotype" to sort samples by
This gene is referred to as "target_gene" in comments below.
In "interpolate_in_pca_latent_space.py", after we sort samples based on the expression level of the
target gene, we want to predict the expression profile of the OTHER genes at different levels
of target gene expression.
percent_low: integer between 0 and 1
percent_high: integer between 0 and 1
computation:
offset_vector = average(encoded gene expression of samples that have the highest percent_high% of target gene expression) -
average(encoded gene expression of samples that have the lowest percent_low% of target gene expression)
output:
encoded offset vector (1 x number of latent space features)
Note: offset vector does not include the target gene
"""
# Load arguments
target_gene_file = os.path.join(data_dir, gene_id + ".txt")
non_target_gene_file = os.path.join(data_dir, "train_model_input.txt.xz")
# Output files
offset_file = os.path.join(encoded_dir, "offset_latent_space_pca.txt")
lowest_file = os.path.join(encoded_dir, "lowest_encoded_pca.txt")
highest_file = os.path.join(encoded_dir, "highest_encoded_pca.txt")
# Read in data
target_gene_data = pd.read_table(target_gene_file, header=0, index_col=0)
non_target_gene_data = pd.read_table(non_target_gene_file, header=0, index_col=0)
model_file = os.path.join(model_dir, "pca_model.pkl")
# Sort target gene data by expression (lowest --> highest)
target_gene_sorted = target_gene_data.sort_values(by=[gene_id])
# Collect the extreme gene expressions
[low_ids, high_ids] = utils.get_gene_expression_above_percent(target_gene_sorted, gene_id, percent_low, percent_high)
low_exp = non_target_gene_data.loc[low_ids]
high_exp = non_target_gene_data.loc[high_ids]
print('Number of genes in low expression group is {}'.format(low_exp.shape))
print('Number of gene in high expression group is {}'.format(high_exp.shape))
# Load pca model
infile = open(model_file,'rb')
pca = pickle.load(infile)
infile.close()
# Transform data using loaded model
low_exp_encoded = pca.transform(low_exp)
high_exp_encoded = pca.transform(high_exp)
low_exp_encoded_df =
|
pd.DataFrame(low_exp_encoded, index=low_exp.index)
|
pandas.DataFrame
|
'''
Calculating the uniqueness and average age of my children
This is just a fun script to analyze the Social Security Administration's
baby names and actuarial life tables datasets.
The accompanying blog post is at:
http://echrislynch.com/2018/05/10/fun-with-baby-names/
'''
# Load Modules
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Create a list of years from 1880 to 2016
years = [i for i in range(1880, 2017)]
# Initialize an empty list to become the babynames dataframe
babynames = []
# Generate the babyname csv file names to be imported
for each in years:
file_name = "datasets/babynames/yob%s.txt" % each
columns = ['Name', 'Sex', 'Count']
# read each csv file into a dataframe
babyname_df = pd.read_csv(file_name, sep=',', header=0, names=columns)
# insert a column for the year
babyname_df.insert(0, 'Year', each)
# append each year's dataframe to the babynames list
babynames.append(babyname_df)
# convert the babynames list into a dataframe
babynames =
|
pd.concat(babynames, axis=0)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/10/30 20:18
describe: A股市场感应器若干,主要作为编写感应器的示例
"""
import os
import os.path
import traceback
import inspect
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
from collections import OrderedDict, Counter
from tqdm import tqdm
from typing import Callable
from czsc.objects import Event
from czsc.data.ts_cache import TsDataCache, Freq
from czsc.sensors.utils import get_index_beta, generate_signals, max_draw_down
from czsc.utils import WordWriter
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
class StocksDaySensor:
"""以日线为基础周期的强势股票感应器
输入:市场个股全部行情、概念板块成分信息
输出:强势个股列表以及概念板块分布
"""
def __init__(self,
dc: TsDataCache,
get_signals: Callable,
get_event: Callable,
params: dict = None):
self.name = self.__class__.__name__
self.version = "V20211119"
self.data = OrderedDict()
self.get_signals = get_signals
self.get_event = get_event
self.event: Event = get_event()
self.base_freq = Freq.D.value
self.freqs = [Freq.W.value, Freq.M.value]
if params:
self.params = params
else:
self.params = {
"validate_sdt": "20210101",
"validate_edt": "20211112",
}
self.dc = dc
self.betas = ['000001.SH', '000016.SH', '000905.SH', '000300.SH', '399001.SZ', '399006.SZ']
self.all_cache = dict()
self.res_cache = dict()
self.sdt = self.params['validate_sdt']
self.edt = self.params['validate_edt']
self.ssd = self.get_stocks_strong_days() # ssd 是 stocks_strong_days 的缩写,表示全市场股票的强势日期
def get_share_strong_days(self, ts_code: str, name: str):
"""获取单个标的全部强势信号日期"""
dc = self.dc
event = self.event
sdt = self.sdt
edt = self.edt
start_date = pd.to_datetime(self.sdt) - timedelta(days=3000)
bars = dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=edt, freq='D', asset="E", raw_bar=True)
n_bars = dc.pro_bar(ts_code=ts_code, start_date=sdt, end_date=edt, freq='D', asset="E", raw_bar=False)
nb_dicts = {row['trade_date'].strftime("%Y%m%d"): row for row in n_bars.to_dict("records")}
signals = generate_signals(bars, sdt, self.base_freq, self.freqs, self.get_signals)
results = []
for s in signals:
m, f = event.is_match(s)
if m:
res = {
'ts_code': ts_code,
'name': name,
'reason': f,
}
nb_info = nb_dicts.get(s['dt'].strftime("%Y%m%d"), None)
if not nb_info:
print(f"not match nb info: {nb_info}")
res.update(nb_info)
results.append(res)
df_res = pd.DataFrame(results)
if df_res.empty:
print(f"{ts_code} - {name} - empty")
else:
df_res = df_res[pd.to_datetime(sdt) <= df_res['trade_date']]
df_res = df_res[df_res['trade_date'] <= pd.to_datetime(edt)]
print(f"{ts_code} - {name} 强势: {len(df_res)}, mean={df_res.n1b.mean()}, sum={df_res.n1b.sum()}")
print(f"{ts_code} - {name} 基准: {len(n_bars)}, mean={n_bars.n1b.mean()}, sum={n_bars.n1b.sum()}")
# 加入总市值
df_ = dc.daily_basic(ts_code, sdt, dc.edt)
df_['trade_date'] = pd.to_datetime(df_['trade_date'])
df_res = df_res.merge(df_[['trade_date', 'total_mv']], on='trade_date', how='left')
self.all_cache[ts_code] = df_res
def get_stocks_strong_days(self):
"""获取全部股票的强势日期"""
stocks = self.dc.stock_basic()
for row in tqdm(stocks.to_dict('records'), desc="validate"):
ts_code = row['ts_code']
name = row['name']
try:
self.get_share_strong_days(ts_code, name)
except:
print(f"get_share_strong_days error: {ts_code}, {name}")
traceback.print_exc()
res = []
for ts_code, x in self.all_cache.items():
if x.empty:
continue
res.append(x)
df = pd.concat(res, ignore_index=True)
return df
def filter_by_concepts(self, dfg, top_n=20, min_n=3):
"""使用板块效应过滤
:param dfg: 单个交易日的强势股选股结果
:param top_n: 选取前 n 个密集概念
:param min_n: 单股票至少要有 n 个概念在 top_n 中
:return:
"""
if dfg.empty:
return dfg, []
dc = self.dc
ths_members = dc.get_all_ths_members()
ths_members = ths_members[ths_members['概念类别'] == 'N']
ths_members = ths_members[~ths_members['概念名称'].isin([
'MSCI概念', '沪股通', '深股通', '融资融券', '上证180成份股', '央企国资改革',
'标普道琼斯A股', '中证500成份股', '上证380成份股', '沪深300样本股',
])]
ths_concepts = ths_members[ths_members.code.isin(dfg.ts_code)]
all_concepts = ths_concepts['概念名称'].to_list()
key_concepts = [k for k, v in Counter(all_concepts).most_common(top_n)]
sel = ths_concepts[ths_concepts['概念名称'].isin(key_concepts)]
ts_codes = [k for k, v in Counter(sel.code).most_common() if v >= min_n]
dfg = dfg[dfg.ts_code.isin(ts_codes)]
dfg.loc[:, '概念板块'] = dfg.ts_code.apply(lambda x: ths_concepts[ths_concepts.code == x]['概念名称'].to_list())
dfg.loc[:, '概念数量'] = dfg['概念板块'].apply(len)
return dfg, key_concepts
@staticmethod
def filter_by_market_value(dfg, min_total_mv):
"""使用总市值过滤
:param dfg: 单个交易日的强势股选股结果
:param min_total_mv: 最小总市值,单位为万元,1e6万元 = 100亿
:return:
"""
if dfg.empty:
return dfg
return dfg[dfg['total_mv'] >= min_total_mv]
def create_next_positions(self, dfg):
"""构建某天选股结果对应的下一交易日持仓明细
:param dfg: 单个交易日的强势股选股结果
:return: 下一交易日持仓明细
"""
if dfg.empty:
return dfg
trade_cal = self.dc.trade_cal()
trade_cal = trade_cal[trade_cal.is_open == 1]
trade_dates = trade_cal.cal_date.to_list()
trade_date = dfg['trade_date'].iloc[0]
hold = dfg.copy()
hold['成分日期'] = trade_dates[trade_dates.index(trade_date.strftime("%Y%m%d")) + 1]
hold['持仓权重'] = 0.98 / len(dfg)
hold.rename({'ts_code': "证券代码", "close": "交易价格"}, inplace=True, axis=1)
hold = hold[['证券代码', '持仓权重', '交易价格', '成分日期']]
hold['成分日期'] = pd.to_datetime(hold['成分日期']).apply(lambda x: x.strftime("%Y/%m/%d"))
return hold
def get_latest_strong(self, fc_top_n=20, fc_min_n=2, min_total_mv=1e6):
"""获取最近一个交易日的选股结果"""
df = self.ssd.copy()
trade_date = df['trade_date'].max()
dfg = df[df['trade_date'] == trade_date]
dfg, key_concepts = self.filter_by_concepts(dfg, fc_top_n, fc_min_n)
dfg = self.filter_by_market_value(dfg, min_total_mv)
holds = self.create_next_positions(dfg)
return dfg, holds
def validate_performance(self, fc_top_n=20, fc_min_n=2, min_total_mv=1e6, file_output=None):
"""验证传感器在一组过滤参数下的表现"""
dc = self.dc
sdt = self.sdt
edt = self.edt
df = self.ssd.copy()
results = []
detail = []
holds = []
for trade_date, dfg in df.groupby('trade_date'):
try:
if dfg.empty:
print(f"{trade_date} 选股结果为空")
continue
dfg, key_concepts = self.filter_by_concepts(dfg, top_n=fc_top_n, min_n=fc_min_n)
dfg = self.filter_by_market_value(dfg, min_total_mv)
res = {'trade_date': trade_date, "key_concepts": key_concepts, 'number': len(dfg)}
res.update(dfg[['n1b', 'n2b', 'n3b', 'n5b', 'n10b', 'n20b']].mean().to_dict())
results.append(res)
detail.append(dfg)
holds.append(self.create_next_positions(dfg))
except:
traceback.print_exc()
df_detail = pd.concat(detail)
df_holds = pd.concat(holds, ignore_index=True)
df_merged =
|
pd.DataFrame(results)
|
pandas.DataFrame
|
"""Utility functions."""
import math
import pickle
import random
import time
import numpy as np
import pandas as pd
import cv2
import torch
import torch.nn.functional as F
def warp_wrapper(aug_fn):
def do_aug(image, target):
"""Input image should be of (n, h, w) or (n, h, w, c)."""
if image.ndim == 3:
image = np.expand_dims(image, axis=3)
n, h, w, c = image.shape
image = np.transpose(image, (1, 2, 0, 3))
image = image.reshape(*image.shape[:2], -1)
image, target = aug_fn(image, target)
image = image.reshape(*image.shape[:2], n, c)
image = np.transpose(image, (2, 3, 0, 1)) #nchw
return image, target
return do_aug
def GCN_wrapper(gcn):
def do_gcn(image, target):
"""Input image should be of (n, c, h, w)"""
im_dim = image.ndim
im_shape = image.shape
if im_dim == 4:
image = image.reshape(-1, *im_shape[2:])
image, target = gcn(image, target)
if im_dim == 4:
image = image.reshape(*im_shape)
return image, target
return do_gcn
class RandChannel(object):
"""Randomly select a channel for multi-channel images."""
def __init__(self, channel_idx=3):
self.channel_idx = channel_idx
def __call__ (self, data, target):
if self.channel_idx < data.ndim:
num_channel = data.shape[self.channel_idx]
if num_channel > 1:
idx = np.random.randint(num_channel, size=1)[0]
data=np.take(data, 1, axis=self.channel_idx)
return data, target
def transform_label(label, l2i, events=None):
if events is not None:
label.zero_()
if len(events['b']) > 0:
label[torch.LongTensor(events['b'])] = 1
return label.cumsum(0).float()
else:
return (label == l2i["b"]).cumsum(0).float()
def transform_label_kl(label, l2i, kernel_size, kernel, events=None):
assert events is not None, "only works for annotation with events"
padding = kernel_size // 2
label_new = torch.zeros(label.size(0) + padding, 3)
# for events y, randomly assign it to x or w
for e in events['y']:
if np.random.rand(1)[0] > 0.5:
events['x'].append(e)
else:
events['w'].append(e)
tmp = torch.zeros(label.size(0) + padding)
if len(events['x']) > 0:
tmp[torch.LongTensor(events['x'])] = 1
label_new[:, 0] =F.conv1d(tmp.view(1, 1, -1), kernel,
padding=padding).data.view(-1)
_max = label_new[:,0].max()
if _max > 0:
label_new[:, 0] /= _max
tmp.zero_()
if len(events['w']) > 0:
tmp[torch.LongTensor(events['w'])] = 1
label_new[:, 1] =F.conv1d(tmp.view(1, 1, -1), kernel,
padding=padding).data.view(-1)
_max = label_new[:, 1].max()
if _max > 0:
label_new[:, 1] /= _max
_max = label_new[:, :2].sum(dim=1).max()
if _max > 0:
label_new[:, :2] /= _max
label_new[:, 2] = 1 - label_new[:, :2].sum(dim=1)
return label_new
class BudJitter(object):
def __init__(self, random_init, frame_dropout):
self.random_init = random_init
self.frame_dropout = frame_dropout
def __call__ (self, data, target):
if target is None:
return data, target
data_len = data.shape[0]
if self.random_init and data_len > 15:
ss = np.random.randint(data_len // 3 * 2)
if ss > 0:
data = data[ss:]
target = target[ss:]
if self.frame_dropout > 0:
r = torch.rand(2)
if r[0] < self.frame_dropout:
s = 1 if r[1] < 0.5 else 0
idx1 = np.array(range(s, data.shape[0], 2))
idx2 = np.array(range(s, target.shape[0], 2))
data = data[idx1]
target = target[idx2]
return data, target
def read_meta(meta_file, offset):
"""Reading the meta data."""
meta = pd.read_csv(meta_file)
meta['end'] = meta['end'] - offset
meta = meta[meta['end'] > 0]
return {'id': meta['sample_id'].values, 'label': meta['end'].values}
def filter_list(alist, mask):
return [alist[i] for i in range(len(mask)) if mask[i]]
def read_and_merge_meta(meta_files):
meta_list = []
for meta_file in meta_files.split(','):
with open(meta_file, 'rb') as f:
meta_list.append(pickle.load(f))
if len(meta_list) == 1:
return meta_list[0]
meta = {}
for k in ['label', 'id', 'events', 'symbols']:
meta[k] = [x for m in meta_list for x in m[k]]
meta['symbols'] = sorted(set(meta['symbols']))
meta['symbols'].remove('$')
meta['symbols'].append('$')
meta['symbol2id'] = {v:i for i,v in enumerate(meta['symbols'])}
for k, v in meta.items():
print(k, len(v))
return meta
def read_pkl_meta(meta_files, test_list_file, tfm_label, loss_type='L1',
jpg_input_only=False):
"""Read the meta data in pickled format."""
test_list =
|
pd.read_csv(test_list_file, header=None)
|
pandas.read_csv
|
import pandas as pd
from definitions import DATA_DIR
import os
event_data_path = 'hesinApril2019.tsv'
diag_event_data_path = 'hesin_diag10April2019.tsv'
event_data_path = os.path.join('raw', event_data_path)
diag_event_data_path = os.path.join('raw', diag_event_data_path)
patient_base_raw_path = 'patient_nov16.csv'
patient_base_raw_path = os.path.join('raw', patient_base_raw_path)
# patient_events = pd.read_csv(os.path.join(DATA_DIR, 'interim/202006012159_patient_events.csv'))
# cohort = pd.read_csv(os.path.join(DATA_DIR, 'raw/cohort_hf.csv'))
event_data = pd.read_csv(DATA_DIR + event_data_path, delimiter='\t')
diag_data = pd.read_csv(DATA_DIR + diag_event_data_path, delimiter='\t')
patient_base = pd.read_csv(os.path.join(DATA_DIR, patient_base_raw_path), low_memory=False)
# Spiros columns
patient_base.loc[:, "sex"] = patient_base.loc[:, "31-0.0"]
patient_base.loc[:, "yob"] = patient_base.loc[:, "34-0.0"]
patient_base.loc[:, "mob"] = patient_base.loc[:, "52-0.0"]
patient_base.loc[:, "dob"] = pd.to_datetime(patient_base.apply(
lambda x: '{}/{}/01'.format(x.yob, x.mob) if x is not None else '{}/07/01'.format(x.yob), axis=1))
patient_base.loc[:, "center_ass"] = patient_base.loc[:, '54-0.0']
patient_base.loc[:, "year_ass"] = pd.to_datetime(patient_base.loc[:, '53-0.0']).dt.year
patient_base.loc[:, "age_ass"] = patient_base.loc[:, '21003-0.0']
# Get all hf events
hf_events_primary = event_data[
(event_data.diag_icd10.str[:4].str.match('I50.|I110|I130|I132|I260')) & (event_data.admidate.notnull())]
diag_data_w_prim_record =
|
pd.merge(event_data, diag_data, on=['eid', 'record_id'])
|
pandas.merge
|
import pytest
import pandas as pd
from pandas.testing import assert_series_equal
from yeast import Recipe
from yeast.steps import JoinStep, SortStep, RenameColumnsStep
from yeast.errors import YeastValidationError
from tests.data_samples import startrek_starships
from tests.data_samples import startrek_starships_specs
def test_join_on_left_step(startrek_starships, startrek_starships_specs):
"""
Left Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="left"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (5, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NX-01', 'name': 'Enterprise', 'warp': None}, name=4)
assert_series_equal(baked_data.loc[4], row)
def test_join_on_inner_step(startrek_starships, startrek_starships_specs):
"""
Inner Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="inner"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (4, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NCC-74656', 'name': 'USS Voyager', 'warp': 9.975}, name=3)
assert_series_equal(baked_data.loc[3], row)
def test_join_on_right_step(startrek_starships, startrek_starships_specs):
"""
Right Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="right"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (4, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
assert_series_equal(baked_data.loc[0], row)
row = pd.Series({'uid': 'NCC-74656', 'name': 'USS Voyager', 'warp': 9.975}, name=3)
assert_series_equal(baked_data.loc[3], row)
def test_join_on_fullouter_step(startrek_starships, startrek_starships_specs):
"""
Full outer Join with NA mismmatches
"""
recipe = Recipe([
JoinStep(startrek_starships_specs, by="uid", how="full"),
SortStep('uid')
])
baked_data = recipe.prepare(startrek_starships).bake(startrek_starships)
assert baked_data.shape == (5, 3)
row = pd.Series({'uid': 'NCC-1031', 'name': 'USS Discovery', 'warp': 9.9}, name=0)
|
assert_series_equal(baked_data.loc[0], row)
|
pandas.testing.assert_series_equal
|
'''
Application to integrate all functionalities
@dlegor
'''
from typing import List
from pathlib import Path
import hashlib
from PIL import Image
from bokeh.models.annotations import Label
from bokeh.models.layouts import Column
from bokeh.models.widgets import tables
from networkx.classes import graph
from networkx.classes.graph import Graph
import pandas as pd
import numpy as np
import matplotlib as mpl
import streamlit as st
from utils import kind_file, filter_otus, get_colour_name
from umap_hdbscan import Embedding_Output
from sparcc import SparCC_MicNet
from SessionState import get
from bokeh.models import ColumnDataSource, Plot
from bokeh.plotting import figure
import streamlit as st
import pandas as pd
from network_alg.utils import _build_network
from network_alg.utils import create_normalize_graph
from network_alg import NetWork_MicNet
from network_alg import HDBSCAN_subnetwork
from network_alg import plot_matplotlib
from network_alg import plot_bokeh
#CONTS
key='1e629b5c8f2e7fff85ed133a8713d545678bd44badac98200cbd156d'
METRIC=['euclidean','manhattan','canberra','braycurtis',
'cosine','correlation','hellinger']
METRIC_HDB=['euclidean','manhattan','canberra','braycurtis']
PATH_IMAG=Path('images')
OPTIONS=['Menu','UMAP/HDBSCAN','SparCC','Network']
image_path=PATH_IMAG.resolve()/'logo_ie.png'
imagen_ixulabs=Image.open(image_path)
#st.cache
def convert_df(df):
# Cache the conversion to prevent computation on every rerun
return df.to_csv().encode('utf-8')
def clean_previous_file(name_file:str)->None:
file_name=Path(name_file)
if file_name.is_file():
file_name.unlink()
def menu_app():
st.sidebar.markdown("""
### References
* [:link: UMAP](https://umap-learn.readthedocs.io/en/latest/)
* [:link: Outlier description with HDBSCAN](https://hdbscan.readthedocs.io/en/latest/outlier_detection.html)
* [:link: SparCC](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002687)
* [:link: NetworkX](https://networkx.org/)
""")
st.markdown("---")
st.header("General Menu")
st.markdown("""
### The application has three main components:
* UMAP/HDBSCAN :
Exploration of the data with UMAP and clustering algorithm HDBSCAN.
* SparCC:
Algorithm that can be run to estimate correlations from abundance data. For a more detail explanation the following paper is recommended:
[Inferring Correlation Networks from Genomic Survey Data](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002687)
* Networks:
Large-scale metrics, structural balance and community information can be calculated for the network, this section uses as input the Sparcc output.
**Note**:
This dashobard runs in a virtual machine with limited capacity (2 CPUs and 6 GB of RAM), for large datasets please download our [repository](https://github.com/Labevo/MicNetToolbox) and run locally.
""")
def sparcc_app():
st.sidebar.header("File")
file_input=st.sidebar.file_uploader("Upload abundance table",type=["csv","txt"])
low_abundance=st.sidebar.selectbox('Filter low abudance',options=[True, False],index=1,
help='Do you want to filter low abundace (<5) OTUs?')
st.sidebar.title("Correlation parameters")
st.sidebar.header("Number of inferences")
n_iteractions=st.sidebar.slider(label='n_iter',min_value=2,max_value=50,step=1,value=20)
st.sidebar.header("Number of exclusions")
x_iteractions=st.sidebar.slider(label='x_iter',min_value=2,max_value=30,step=1,value=10)
st.sidebar.header("Exclusion threshold")
threshold=st.sidebar.slider(label='th',min_value=0.1,max_value=0.9,step=0.05,value=0.1)
normalization=st.sidebar.selectbox(label="Normalization type",options=['dirichlet','normalization'])
log_transform=st.sidebar.selectbox(label="Log Transformation",options=[True,False])
With_Covarianza=st.sidebar.selectbox(label="Covariance file",options=[False,True])
st.sidebar.title("P - Values")
num_simulate_data=st.sidebar.slider(label="Number of simulations",min_value=5,max_value=100,step=5,value=5)
type_pvalues=st.sidebar.text_input(label="P-value type",value="one_sided")
remove_taxa=st.sidebar.text_input(label='Column to remove',value='Taxa')
B=st.sidebar.button(label='Run estimation')
if file_input is not None and B==True:
SparCC_MN=SparCC_MicNet(n_iteractions=n_iteractions,
x_iteractions=x_iteractions,
low_abundance=low_abundance,
threshold=threshold,
normalization=normalization,
log_transform=log_transform,
save_cov_file=None if With_Covarianza==False else "sparcc/example/cov_sparcc.csv",
num_simulate_data=num_simulate_data,
perm_template="permutation_#.csv",
outpath="sparcc/example/pvals/",
type_pvalues=type_pvalues,
outfile_pvals='sparcc/example/pvals/pvals_one_sided.csv',
name_output_file="sparcc_output"
)
st.write(SparCC_MN)
st.write('-----')
with st.spinner("In progress"):
if kind_file(file_input.name):
dataframe = pd.read_table(file_input,index_col=0)
else:
dataframe = pd.read_csv(file_input,index_col=0)
st.text("Data sample")
st.dataframe(dataframe.head())
if remove_taxa in dataframe.columns:
dataframe=dataframe.drop(columns=[remove_taxa])
SparCC_MN.run_all(data_input=dataframe)
st.info("Correlation estimation has finished")
DF_SparCC=pd.read_csv(Path(SparCC_MN.save_corr_file).resolve(),index_col=0)
DF_PValues=pd.read_csv(Path(SparCC_MN.outfile_pvals).resolve(),index_col=0)
assert DF_SparCC.shape==DF_PValues.shape , "Error with SparCC Output and Pvalues"
DF_Output=DF_SparCC[DF_PValues<0.05]
del DF_SparCC,DF_PValues
DF_Output.index=SparCC_MN._Index_col
DF_Output.columns=SparCC_MN._Index_col
#Fill NaN with zeros
DF_Output = DF_Output.fillna(0)
csv = convert_df(DF_Output)
st.download_button(label="Download correlation file",
data=csv,
file_name='SparCC_Output.csv',help='Downloads the correlation file')
#clean files
clean_previous_file(SparCC_MN.save_corr_file)
clean_previous_file(SparCC_MN.outfile_pvals)
clean_previous_file(name_file='temp_sample_output.csv')
def dashboar_app():
st.sidebar.header("Interactive Visualizer")
#Parameters
file_input=st.sidebar.file_uploader(label='Input file',type=['txt','csv'],
help='Upload the file to process')
taxa=st.sidebar.selectbox('Include Taxa',options=[True, False],index=1,
help='Does your file includes a column indicating taxa?')
abudance_filter=st.sidebar.selectbox('Filter low abudance',options=[True, False],index=1,
help='Do you want to filter low abundace (<5) OTUs?')
st.sidebar.markdown('---')
st.sidebar.header('UMAP parameters')
n_neighbors=st.sidebar.slider(label='n_neighbors',min_value=5,max_value=50,step=1,
value=15,help='Check UMAP documentation')
min_dist=st.sidebar.slider(label='min_dist',min_value=0.0,max_value=0.99,step=0.1,
value=0.1,help='Check UMAP documentation')
n_components=st.sidebar.slider(label='n_components',min_value=2,max_value=3,step=1,
value=2,help='Check UMAP documentation')
metric_umap=st.sidebar.selectbox('Select metric',options=METRIC,index=6,
help='Check UMAP documentation')
st.sidebar.markdown('---')
st.sidebar.header('HDBSCAN parameters')
metric_hdb=st.sidebar.selectbox('Select metric',options=METRIC_HDB,index=3,
help='Check HDBSCAN documentation for more information')
min_cluster_size=st.sidebar.slider(label='min_cluster_size',min_value=5,max_value=100,step=1,value=15,
help='Check HDBSCAN documentation for more information')
min_sample=st.sidebar.slider(label='min_sample',min_value=1,max_value=60,step=1,value=5,
help='Check HDBSCAN documentation for more information')
B=st.sidebar.button(label='Run estimation')
embedding_outliers=Embedding_Output(n_neighbors=n_neighbors,min_dist=min_dist,
n_components=n_components,metric_umap=metric_umap,metric_hdb=metric_hdb,min_cluster_size=min_cluster_size,min_sample=min_sample,output=True)
if file_input is not None and B==True:
if kind_file(file_input.name):
dataframe = pd.read_table(file_input)
else:
dataframe = pd.read_csv(file_input)
st.info("Data sample")
st.dataframe(dataframe.head())
if taxa:
X=dataframe.iloc[:,2:].copy()
X=X.astype('float').copy()
indx, X=filter_otus(X, abudance_filter)
Text=dataframe.iloc[indx,:2].copy()
Taxa=dataframe.iloc[indx,1].str.split(';').str.get(0)+'-'+\
dataframe.iloc[indx,1].str.split(';').str.get(1)+'-'+\
dataframe.iloc[indx,1].str.split(';').str.get(5)
TOOLTIPS=[("Name", "@Name"),("Taxa","@Taxa")]
else:
X=dataframe.iloc[:,1:].copy()
indx, X=filter_otus(X,abudance_filter)
Text=dataframe.iloc[indx,:1].copy()
X=X.astype('float').copy()
TOOLTIPS=[("Name", "@Name")]
with st.spinner("In progress"):
st.info("Embedding plot")
embedding_,o,l=embedding_outliers.fit(X)
x = embedding_[:, 0]
y = embedding_[:, 1]
z = np.sqrt(1 + np.sum(embedding_**2, axis=1))
disk_x = x / (1 + z)
disk_y = y / (1 + z)
colors = ["#%02x%02x%02x" % (int(r), int(g), int(b)) \
for r, g, b, _ in 255*mpl.cm.viridis(mpl.colors.Normalize()(l))]
colors2 = [(int(r), int(g), int(b)) \
for r, g, b, _ in 255*mpl.cm.viridis(mpl.colors.Normalize()(l))]
tempd = dict(zip(l, colors2))
TOOLS="hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select,"
if taxa:
dataE=dict(x=disk_x.tolist(),y=disk_y.tolist(),Color=colors,Name=Text.iloc[:,0].tolist(),Taxa=Taxa.tolist())
else:
st.write(Text.head())
dataE=dict(x=disk_x.tolist(),y=disk_y.tolist(),Color=colors,Name=Text.iloc[:,0].tolist())
S=ColumnDataSource(dataE)
p = figure(title="Embedding", x_axis_label='x',y_axis_label='y', output_backend = "svg",
x_range=[-1,1],y_range=[-1,1],width=800, height=800,tools=TOOLS,tooltips=TOOLTIPS)
p.circle(x=0.0,y=0.0,fill_alpha=0.1,line_color='black',size=20,radius=1,fill_color=None,line_width=2,muted=False)
p.scatter(x='x',y='y',fill_color='Color', fill_alpha=0.3,line_color='black',radius=0.03,source=S)
p.hover.point_policy="none"
st.bokeh_chart(figure=p)
st.markdown("---")
st.markdown(f"""
## Description:
There is a total of {len(disk_x)} registers in the file, of which {sum([otu == -1 for otu in l])} were considered noise (signaled by -1 on the output file) and a total of:
* Number of clusters: {len(set(l))-1}
* Number of outliers: {np.sum(o)}""")
#if name_file is not None:
name_file='Output_UMAP_HDBSCAN.csv'
DF=pd.DataFrame()
if taxa:
DF['Taxa']= Text.iloc[:,1]
DF['Outliers']=o
DF['Cluster']=l
csv = convert_df(DF)
st.download_button(
label="Download file",
data=csv,
file_name=name_file,
mime='text/csv',help='This file contains \
cluster belonging and outlier information')
def network_app():
st.title('Network analysis')
file_input=st.sidebar.file_uploader(label='Upload SparCC output file',type=['csv'],
help="If you don't have this file, please calculate it at SparCC section")
file_input2=st.sidebar.file_uploader(label='Upload HDBSCAN output file',type=['csv'],
help="If you don't have this file, please calculate it at UMAP/HDBSCAN section")
layout_kind=st.sidebar.selectbox(label='Plot layout',options=['Circular','Spring'],
help='For more information check layout in networkx')
# KindP=st.sidebar.selectbox(label='Color de los Nodos',options=['HDBSCAN','Comunidades'])
B=st.sidebar.button(label='Run estimation')
if file_input is not None and file_input2 is not None and B==True:
sparcc_corr =
|
pd.read_csv(file_input,header=0,index_col=0)
|
pandas.read_csv
|
import pandas as pd
import json
import paramiko
from scp import SCPClient
"""
Function that fetches all the measurements from the controller back to MARC.
Args:
number_agents: The number of initiated 5G-EmPOWER agents
number_users: The number of users per each initiated 5G-EmPOWER agent
cpu_utilization_data: The data with respect to the cpu utilization measurements
mem_utilization_data: The data with respect to the memory consumption measurements
round_no: The run for which the measurements where performed
"""
def utilization_report(number_agents, number_users, cpu_utilization_data, mem_utilization_data, round_no):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.load_system_host_keys()
ssh.connect(hostname="controller's ip", port=22, username="controller's pc username",
password="<PASSWORD>")
# Fetch the measurements started at the controller for memory, cpu and packet utilization
with SCPClient(ssh.get_transport()) as scp:
scp.get('~/marc/measurement_tools/cpu_utilization', 'reports/5gempower_cpu_utilization')
scp.get('~/marc/measurement_tools/mem_utilization', 'reports/5gempower_mem_utilization')
scp.get('~/marc/measurement_tools/packet_rate_tx', '5gempower_measurements/packet_rate_tx/'
'agent-{}-users-{}-{}'.format(number_agents, number_users,
round_no))
scp.get('~/marc/measurement_tools/packet_rate_rx', '5gempower_measurements/packet_rate_rx/'
'agent-{}-users-{}-{}'.format(number_agents, number_users,
round_no))
cpu_utilization = list(line.rstrip() for line in open('reports/5gempower_cpu_utilization'))
mem_utilization = list(line.rstrip() for line in open('reports/5gempower_mem_utilization'))
cpu_df = pd.DataFrame(cpu_utilization)
mem_df =
|
pd.DataFrame(mem_utilization)
|
pandas.DataFrame
|
#!/usr/bin/env python3.6
"""This module describes functions for analysis of the SNSS Dataset"""
import os
import pandas as pd
from sas7bdat import SAS7BDAT
import numpy as np
import subprocess
from datetime import datetime, date
from csv import DictReader
from shutil import rmtree
from json import load as jsonLoad
import functools
import itertools
from colour import Color
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as patches
import seaborn as sb
import textwrap as txtwrp
import ast
import imageio as imgio
import tqdm
import pickle
import scipy.stats as stats
import statsmodels.stats.api as sms
import scipy.interpolate as interpolate
from statsmodels.stats.weightstats import CompareMeans, DescrStatsW
from statsmodels.discrete.discrete_model import Logit
from statsmodels.tools.tools import add_constant
from sklearn import preprocessing, decomposition, manifold
from sklearn.metrics import confusion_matrix, \
accuracy_score, roc_auc_score, roc_curve, \
classification_report, precision_score, recall_score, explained_variance_score, r2_score, f1_score
from scipy.stats import logistic
from scipy.optimize import curve_fit
import pydot
from tensorflow.keras.metrics import top_k_categorical_accuracy
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Dropout, AlphaDropout, LeakyReLU
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import CSVLogger, TensorBoard, Callback, EarlyStopping, ModelCheckpoint
from tensorflow.keras.backend import clear_session
import tensorflow.compat as tfCompat
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "Apache-2.0"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL> <EMAIL>"
""" Short title
Description
Args:
arg1: arg1 Description
Returns:
output1: output1 description.
Raises:
excpeption1: excpetion circumstances.
"""
def loadJSON(fname):
# Load configuration
f = open(fname) # Open config file...
cfg = jsonLoad(f) # Load data...
f.close() # Close config file...
return cfg
def moduleInit():
pd.options.display.max_columns = None
pd.options.display.max_rows = 20
tfCompat.v1.disable_eager_execution()
def rmws(strList):
stripList = []
for s in strList:
stripList.append(s.replace(" ", ""))
return stripList
def timeAppend(varList, T):
timeVarList = []
for v in varList:
timeVarList.append(T + '_' + v)
return timeVarList
def autoscale(x):
return (x-np.min(x))/np.max(x)
def normalise(x):
return (x-np.mean(x))/np.std(x)
def import_SNSS(usr, pwd, local_file=0):
""" Mount UoE CMVM smb and import SNSS as dataframe.
Note you must have access permissions to specific share.
Keyword arguments:
usr = Edinburgh University matriculation number
pwd = <PASSWORD>
Location of data is specified in a JSON config file not included.
The SNSS dataset includes confidential patient information and must be
handled according to Caldicott principles.
"""
cfg = loadJSON("config.json")
if local_file:
print('Importing local data file...')
# Open and read SNSS data file
fp = '../../../../../Volumes/mount/SNSSFull.sas7bdat'
f = SAS7BDAT(fp)
rawDf = f.to_data_frame()
print('Dataframe loaded!')
else:
cmd = "mount_smbfs"
mountCmd = cmd+" //'"+cfg['dom']+";"+usr+":"+pwd+"'@"+cfg['shr']+" "+cfg['mnt']
uMountCmd = 'umount raw_data/mount/'
# Send smb mount command..
print('Mounting datashare...')
smbCall = subprocess.call(mountCmd, shell=True)
# Open and read SNSS data file
f = SAS7BDAT(cfg['fpath'])
print('Converting sas7bdat file to pd.dataframe...')
rawDf = f.to_data_frame()
print('Conversion completed! Closing file...')
f.close()
print('Attempting Unmount..')
try:
smbCall = subprocess.call(uMountCmd, shell=True)
print('dataShare Unmounted Successfully!')
except(OSError, EOFError):
print('Unmount failed...')
return rawDf
def SNSSNullity(raw):
""" Assess nullity of raw data import
Takes the raw imported dataset, ensures index integrity, assigns new binary
variables for follow up at each study timepoint and computes attrittion numbers
and ratios for each.
Args:
raw: Pandas DataFrame object from SAS7BDAT file.
Returns:
raw: The validated raw dataframe.
retentionTable: A pandas dataframe of counts for use in scripts if required.
Raises:
NONE
"""
# Assign nPatid as index variable.
raw = raw.set_index('nPatid', verify_integrity=True)
# Convert diagnostic nullity into binary variable in dataframe.
raw['All'] = raw.T0_PatID.notna()
raw['T1_HCData'] = raw.T1_HealthChange.notna()
raw['T2_HCData'] = raw.T2_HealthChange.notna()
raw['T1and2_HCData'] = (raw.T2_HealthChange.notna()) & (raw.T1_HealthChange.notna())
# Quantify diagnostic nullity and export
T = []
FULabels = ['T1_HCData', 'T2_HCData', 'T1and2_HCData']
for FU in FULabels:
T.append(raw.groupby(FU)['ExpGroups'].agg([('Total', 'count'),
('Label', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[0])),
('N(i)', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[1])),
('%', lambda x:
tuple((np.unique(x[~np.isnan(x)],
return_counts=True)[1]/sum(~np.isnan(x))*100).round(2)))]))
retentionTable = pd.concat(T, keys=FULabels, axis=0)
retentionTable.index = retentionTable.index.rename(['', 'FUDataAvailable'])
retentionTable.to_csv('output/0_SNSS_retention.tsv', sep='\t')
return raw, retentionTable
def SNSSCompoundVariables(df):
"""Produce variable compund measures e.g. SF12, HADS etc.
Adds the specified custom variables normally products or sums of other Variables
or binarisation etc to the provided dataframe. This function also undertakes
SIMD quintile mapping to patient postcodes.
Args:
df: Pandas dataframe.
Returns:
df: The dataframe with new variables added..
Raises:
KeyError, ValueError: If errors in postcode mapping.
"""
# Deactivate assignment warning which slows down SIMD processing.
pd.options.mode.chained_assignment = None
# Declare variable groups
varGroups = {'PHQ13': ['StomachPain', 'BackPain', 'Paininarmslegsjoints',
'Headaches', 'Chestpain', 'Dizziness',
'FaintingSpells', 'HeartPoundingorRacing', 'ShortnessofBreath',
'Constipation', 'NauseaorGas', 'Tired', 'Sleeping'],
'NeuroSymptoms': ['Lackofcoordination', 'MemorConcentration', 'LossofSensation',
'LossofVision', 'LossofHearing', 'Paralysisorweakness',
'DoubleorBlurredVision', 'DifficultySwallowing',
'DifficultySpeaking', 'SeizureorFit',
'AnxietyattackorPanicAttack', 'Littleinterestorpleasure',
'Feelingdownorhopeless', 'Nervesorfeelinganxious',
'Worryingalot'],
'IllnessWorry': ['Wworry', 'Wseriousworry', 'Wattention'],
'Satisfaction': ['Sat1', 'Sat2', 'Sat3', 'Sat4', 'Sat5', 'Sat6', 'Sat7', 'Sat8'],
'other': ['LossofHearing', 'Littleinterestorpleasure', 'Feelingdownorhopeless',
'Nervesorfeelinganxious', 'Worryingalot', 'AnxietyattackorPanicAttack']}
# Time specify certain groups into useful keysets.
T0IllnessWorryKeys = timeAppend(varGroups['IllnessWorry'], 'T0')
T0PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T0')
T1PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T1')
T2PHQ13Keys = timeAppend(varGroups['PHQ13'], 'T2')
T0PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T0')
T1PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T1')
T2PHQNeuro28Keys = timeAppend(varGroups['PHQ13'] + varGroups['NeuroSymptoms'], 'T2')
T0SatisfactionKeys = timeAppend(varGroups['Satisfaction'], 'T0')
T1SatisfactionKeys = timeAppend(varGroups['Satisfaction'], 'T1')
# Criteria Used for defining successful follow up as T1 any satisfaction data available..
# df['T1_Satisfaction_Bool'] = df['T1_Satisfaction_Total'].notna() # Strict
df['T1_Satisfaction_Bool'] = df[T1SatisfactionKeys].notna().any(axis=1) # Loose
# Add binarised ExpGroups.
df['ExpGroups_bin'] = (df['ExpGroups']-2)*-1
# Add binarised gender.
df['Gender_bin'] = df['Gender']-1
# Adding summative compound measures
df['T0_PHQNeuro28_Total'] = df[T0PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T1_PHQNeuro28_Total'] = df[T1PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T2_PHQNeuro28_Total'] = df[T2PHQNeuro28Keys].sum(axis=1, skipna=False)
df['T0_PHQ13_Total'] = df[T0PHQ13Keys].sum(axis=1, skipna=False)
df['T1_PHQ13_Total'] = df[T1PHQ13Keys].sum(axis=1, skipna=False)
df['T2_PHQ13_Total'] = df[T2PHQ13Keys].sum(axis=1, skipna=False)
df['T0_IllnessWorry'] = df[T0IllnessWorryKeys].sum(axis=1, skipna=False)
df['T0_Satisfaction_Total'] = df[T0SatisfactionKeys].sum(axis=1, skipna=False)
df['T1_Satisfaction_Total'] = df[T1SatisfactionKeys].sum(axis=1, skipna=False)
df['T2_Satisfaction_Total'] = df[T1SatisfactionKeys].sum(axis=1, skipna=False)
# Adding boolean compound measures
df['T0_NegExpectation'] = (df['T0_IPQ1'] > 3).astype(int) # Define "Negative Expectation"
df['T0_NegExpectation'].loc[df['T0_IPQ1'].isna()] = np.nan # Boolean operator treats NaN as 0 so replace with NaNs
df['T0_PsychAttribution'] = ((df['T0_C7'] > 3) | (df['T0_C8'] > 3)).astype(int)
df['T0_PsychAttribution'].loc[(df['T0_C7'].isna()) | (df['T0_C8'].isna())] = np.nan
df['T0_LackofPsychAttribution'] = (df['T0_PsychAttribution']-1)*-1
for S in ['T0_Sat1', 'T0_Sat2', 'T0_Sat3',
'T0_Sat4', 'T0_Sat5', 'T0_Sat6', 'T0_Sat7', 'T0_Sat8']:
satNAIdx = df[S].isna()
df[S + '_Poor_Bin'] = df[S] <= 2 # Binarise Satsifaction into Poor/Fair or not
df[S + '_Poor_Bin'].loc[satNAIdx] = np.nan
# Add binned measures
df['T0_PHQ13_Binned'] = pd.cut(df['T0_PHQ13_Total'], [0, 2.1, 5.1, 8.1, 13.1],
labels=['0-2', '3-5', '6-8', '9-13'],
right=True, include_lowest=True)
df['T0_PHQ13_BinInt'] = pd.cut(df['T0_PHQ13_Total'], [0, 2.1, 5.1, 8.1, 13.1],
labels=False,
right=True, include_lowest=True)
df['T0_PHQNeuro28_Binned'] = pd.cut(df['T0_PHQNeuro28_Total'], [0, 5.1, 8.1, 13.1, 27.1],
labels=['0-5', '6-8', '9-13', '14-27'],
right=True, include_lowest=True)
df['T0_PHQNeuro28_BinInt'] = pd.cut(df['T0_PHQNeuro28_Total'], [0, 5.1, 8.1, 13.1, 27.1],
labels=False,
right=True, include_lowest=True)
df['AgeBins'] = pd.cut(df['Age'], [0, 36, 46, 56, max(df['Age'])+0.1],
labels=['<=35', '36-45', '46-55', '>=56'],
right=True, include_lowest=True)
df['AgeBinInt'] = pd.cut(df['Age'], [0, 36, 46, 56, max(df['Age'])+0.1],
labels=False,
right=True, include_lowest=True)
df['T0_HADS_Binned'] = pd.cut(df['T0_HADS'], [0, 7.1, 14.1, 21.1, max(df['T0_HADS'])+0.1],
labels=['0-7', '8-14', '15-21', '>=22'],
right=True, include_lowest=True)
df['T0_HADS_BinInt'] = pd.cut(df['T0_HADS'], [0, 7.1, 14.1, 21.1, max(df['T0_HADS'])+0.1],
labels=False,
right=True, include_lowest=True)
df['T0_SF12_PF_Binned'] = pd.cut(df['T0_SF12_PF'], [-0.1, 24.9, 49.9, 74.9, 99.9, 100.1],
labels=['0', '25', '50', '75', '100'],
right=True, include_lowest=True)
df['T0_SF12_PF_BinInt'] = pd.cut(df['T0_SF12_PF'], [-0.1, 24.9, 49.9, 74.9, 99.9, 100.1],
labels=False,
right=True, include_lowest=True)
# Add binarised outcomes
poorOutcomeDict = {0: 1, 1: 1, 2: 1, 3: 0, 4: 0}
strictPoorOutcomeDict = {0: 1, 1: 1, 2: 0, 3: 0, 4: 0}
ternaryPoorOutcomeDict = {0: 2, 1: 2, 2: 1, 3: 0, 4: 0}
df['T1_poorCGI'] = df['T1_HealthChange'].replace(poorOutcomeDict)
df['T1_poorIPS'] = df['T1_SymptomsChange'].replace(poorOutcomeDict)
df['T2_poorCGI'] = df['T2_HealthChange'].replace(poorOutcomeDict)
df['T2_poorIPS'] = df['T2_SymptomsChange'].replace(poorOutcomeDict)
df['T2_strictPoorCGI'] = df['T2_HealthChange'].replace(strictPoorOutcomeDict)
df['T2_strictPoorIPS'] = df['T2_SymptomsChange'].replace(strictPoorOutcomeDict)
df['T2_ternaryCGI'] = df['T2_HealthChange'].replace(ternaryPoorOutcomeDict)
df['T2_ternaryIPS'] = df['T2_SymptomsChange'].replace(ternaryPoorOutcomeDict)
# Add relative secondary outcomes
df['T0T1_SF12_NormedMCS'] = df['T1_SF12_NormedMCS'] - df['T0_SF12_NormedMCS']
df['T1T2_SF12_NormedMCS'] = df['T2_SF12_NormedMCS'] - df['T1_SF12_NormedMCS']
df['T0T2_SF12_NormedMCS'] = df['T2_SF12_NormedMCS'] - df['T0_SF12_NormedMCS']
df['T0T2_SF12_binaryNormedMCS'] = (df['T0T2_SF12_NormedMCS'] < 0).astype(int)
df['T0T2_SF12_binaryNormedMCS'].loc[df['T0T2_SF12_NormedMCS'].isna()] = np.nan
df['T0T1_SF12_NormedPCS'] = df['T1_SF12_NormedPCS'] - df['T0_SF12_NormedPCS']
df['T1T2_SF12_NormedPCS'] = df['T2_SF12_NormedPCS'] - df['T1_SF12_NormedPCS']
df['T0T2_SF12_NormedPCS'] = df['T2_SF12_NormedPCS'] - df['T0_SF12_NormedPCS']
df['T0T2_SF12_binaryNormedPCS'] = (df['T0T2_SF12_NormedPCS'] < 0).astype(int)
df['T0T2_SF12_binaryNormedPCS'].loc[df['T0T2_SF12_NormedPCS'].isna()] = np.nan
df['T0T1_HADS'] = df['T1_HADS'] - df['T0_HADS']
df['T1T2_HADS'] = df['T2_HADS'] - df['T1_HADS']
df['T0T2_HADS'] = df['T2_HADS'] - df['T0_HADS']
df['T0T2_binaryHADS'] = (df['T0T2_HADS'] < 0).astype(int)
df['T0T2_binaryHADS'].loc[df['T0T2_HADS'].isna()] = np.nan
df['T0T1_PHQNeuro28_Total'] = df['T1_PHQNeuro28_Total'] - df['T0_PHQNeuro28_Total']
df['T1T2_PHQNeuro28_Total'] = df['T2_PHQNeuro28_Total'] - df['T1_PHQNeuro28_Total']
df['T0T2_PHQNeuro28_Total'] = df['T2_PHQNeuro28_Total'] - df['T0_PHQNeuro28_Total']
df['T0T2_binaryPHQNeuro28_Total'] = (df['T0T2_PHQNeuro28_Total'] < 0).astype(int)
df['T0T2_binaryPHQNeuro28_Total'].loc[df['T0T2_PHQNeuro28_Total'].isna()] = np.nan
print('SIMD 2004 to 2006 Postcode conversion...')
SIMD04 = pd.read_csv('raw_data/SIMDData/postcode_2006_2_simd2004.csv', index_col=0)
nullIdx = SIMD04['simd2004rank'].str.contains(' ')
domains = ['inc', 'emp', 'hlth', 'educ', 'access', 'house']
for d in domains:
SIMD04['simd2004_' + d + '_quintile'] = 5-pd.qcut(SIMD04['simd2004_' + d + '_rank']
[~nullIdx].astype(float), 5,
retbins=False, labels=False)
SIMDDict = dict(zip([str.replace(' ', '') for str in SIMD04.sort_index().index.values.tolist()],
SIMD04[['simd2004_sc_quintile',
'simd2004score',
'simd2004_inc_score',
'simd2004_emp_score',
'simd2004_hlth_score',
'simd2004_educ_score',
'simd2004_access_score',
'simd2004_house_score',
'simd2004_inc_quintile',
'simd2004_emp_quintile',
'simd2004_hlth_quintile',
'simd2004_educ_quintile',
'simd2004_access_quintile',
'simd2004_house_quintile']].values))
# Initialising variables as NaN arrays
df['T0_SIMD04'] = np.nan
df['T0_SIMD04_score'] = np.nan
for d in domains:
df['T0_SIMD04_' + d + '_score'] = np.nan
df['T0_SIMD04_' + d + '_quintile'] = np.nan
print('Constructed SIMD quintiles and Initialised Panda Variables')
print('Iterating through postcodes')
i = 0
for p in df['Postcode']:
if (p == '') | pd.isnull(p):
df['Postcode'].iloc[i] = np.nan
df['T0_SIMD04'].iloc[i] = np.nan
i = i + 1
# print('No Postcode Data')
else:
try:
p = p.replace(' ', '')
# print(p)
df['T0_SIMD04'].iloc[i] = int(SIMDDict[p][0])
df['T0_SIMD04_score'].iloc[i] = float(SIMDDict[p][1])
dd = 2
for d in domains:
df['T0_SIMD04_' + d + '_score'].iloc[i] = float(SIMDDict[p][dd])
df['T0_SIMD04_' + d + '_quintile'].iloc[i] = int(SIMDDict[p][dd+len(domains)])
dd += 1
except (KeyError, ValueError) as err:
# print('%s: Error!' % (p))
df['T0_SIMD04'].iloc[i] = np.nan
# print('No SIMD04 postcode map')
i = i + 1
# Add most deprived binarisation
df['T0_SIMD04_bin'] = df['T0_SIMD04'] >= 4
# Add interaction variables
df['Diagnosis*T0_IncapacityBenefitorDLA'] = df['Diagnosis']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups*T0_IncapacityBenefitorDLA'] = df['ExpGroups']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups_bin*T0_IncapacityBenefitorDLA'] = df['ExpGroups_bin']*df['T0_IncapacityBenefitorDLA']
df['ExpGroups_bin*T0_LackofPsychAttribution'] = df['ExpGroups_bin']*df['T0_LackofPsychAttribution']
df['ExpGroups_bin*T0_SIMD04_bin'] = df['ExpGroups_bin']*df['T0_SIMD04_bin']
df['ExpGroups_bin*T0_SF12_PF_BinInt'] = df['ExpGroups_bin']*df['T0_SF12_PF_BinInt']
df['ExpGroups_bin*T0_NegExpectation'] = df['ExpGroups_bin']*df['T0_NegExpectation']
df['ExpGroups_bin*Gender_bin'] = df['ExpGroups_bin']*df['Gender_bin']
print('Complete!')
return df
def cohen_d(x, y):
stats = {}
nx = len(x); meanx = np.mean(x); stdx = np.std(x, ddof=1); semx = stdx/np.sqrt(nx);
ny = len(y); meany = np.mean(y); stdy = np.std(y, ddof=1); semy = stdy/np.sqrt(ny);
meancix = [meanx+(1.96*i*semx) for i in [-1, 1]]
meanciy = [meany+(1.96*i*semy) for i in [-1, 1]]
dof = nx + ny - 2
d = (meanx - meany) / np.sqrt(((nx-1)*stdx ** 2 +
(ny-1)*stdy ** 2) / dof)
vard = (((nx+ny)/(nx*ny))+((d**2)/(2*(nx+ny-2))))*((nx+ny)/(nx+ny-2))
sed = np.sqrt(vard)
cid = [d+(1.96*i*sed) for i in [-1, 1]]
stats['d'] = d
stats['cid'] = cid
stats['mean'] = [meanx, meany]
stats['std'] = [stdx, stdy]
stats['sem'] = [semx, semy]
return d, stats
def cramersV(nrows, ncols, chisquared, correct_bias=True):
nobs = nrows*ncols
if correct_bias is True:
phi = 0
else:
phi = chisquared/nobs
V = np.sqrt((phi**2)/(min(nrows-1, ncols-1)))
return V, phi
def partitionData(df, partitionRatio=0.7):
""" Partition data into training and evaluation sets
Takes a dataframe and returns two arrays with the proportion to use for
training declared as the partition ratio and the other as evaluation of
(1-partitionRatio) size.
Args:
df: Pandas DataFrame to be partitioned.
partitionRatio: Ratio of the data to be used for training.
Returns:
trainIdx: The indices of data asssigned to training set.
evalIdx: The indices of data asssigned to eval set.
Raises:
NONE
"""
randIdx = np.linspace(0, df.shape[0]-1, df.shape[0]).astype(int)
np.random.shuffle(randIdx)
trainIdx = randIdx[0:round(df.shape[0]*partitionRatio)]
evalIdx = randIdx[round(df.shape[0]*(partitionRatio)):len(randIdx)]
return trainIdx, evalIdx
def FollowUpandBaselineComparison(df):
""" A group-wise and follow-up wise comparison of declared Vars
Takes a pandas dataframe and as per the declared variables of interest below,
compares between groups and between lost to follow up and retained.
Args:
df: Pandas DataFrame to be assessed.
Returns:
NONE: All relevant tables are exported to CSV in the function.
Raises:
NONE
"""
def sigTest(G, varList, vType, df):
sigDict = {}
if vType == 'cat':
for v in varList:
T = pd.crosstab(index=df[G], columns=df[v],
margins=False, normalize=False)
chi2Stat, chi2p, _, _ = stats.chi2_contingency(T, correction=True)
cats = np.unique(df[v].dropna())
if len(cats) == 2:
LOR = np.log((T.iloc[0,0]*T.iloc[1,1])/(T.iloc[1,0]*T.iloc[0,1]))
SE = np.sqrt((1/T.iloc[0,0])+(1/T.iloc[1,0])+(1/T.iloc[0,1])+(1/T.iloc[1,1]))
CI = [np.exp(LOR-1.96*SE), np.exp(LOR+1.96*SE)]
OR = np.exp(LOR)
else:
OR = np.nan
CI = np.nan
sigDict[v] = [chi2p, chi2Stat, OR, CI]
elif vType == 'cont':
for v in varList:
if G == 'ExpGroups':
Gi = [1, 2]
elif G == 'T2_HCData':
Gi = [0, 1]
elif G == 'T2_poorCGI':
Gi = [0, 1]
cm = CompareMeans.from_data(df[v][(df[G] == Gi[0]) & (df[v].notna())],
df[v][(df[G] == Gi[1]) & (df[v].notna())])
tStat, tp, _ = cm.ttest_ind()
cohend, cohenstat = cohen_d(cm.d1.data, cm.d2.data)
sigDict[v] = [tp, tStat, cohend, cohenstat['cid']]
sigT = pd.DataFrame.from_dict(sigDict, orient='index', columns=['p', 'stat', 'effect', 'effectCI'])
return sigT
def varTables(G, varList, vType, df):
if vType == 'cont':
T = df.groupby(G)[varList].agg([('N', 'count'), ('Mean', 'mean'),
('SD', 'std')])
elif vType == 'cat':
T = df.groupby(G)[varList].\
agg([('N', 'count'),
('i', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[0])),
('N(i)', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[1])),
('%', lambda x:
tuple(np.unique(x[~np.isnan(x)],
return_counts=True)[1]/sum(~np.isnan(x))))])
return T
contVars = ['Age', 'T0_PHQ13_Total', 'T0_PHQNeuro28_Total', 'T0_HADS', 'T0_IllnessWorry', 'T0_SF12_PF']
catVars = ['AgeBins', 'Gender', 'ExpGroups', 'T0_PHQ13_Binned', 'T0_SF12_PF', 'T0_HADS_Binned',
'T0_NegExpectation', 'T0_PsychAttribution', 'T0_IllnessWorry', 'T0_IncapacityBenefitorDLA', 'T0_SIMD04_bin',
'ExpGroups_bin*T0_IncapacityBenefitorDLA', 'ExpGroups_bin*T0_LackofPsychAttribution','T0_Inemployment']
groupVar = 'T2_HCData'
catT = varTables(G=groupVar, varList=catVars, vType='cat', df=df)
catStats = sigTest(G=groupVar, varList=catVars, vType='cat', df=df)
catT.transpose().to_csv('output/0_FollowUpCategoricalTable.tsv', sep='\t')
catStats.transpose().to_csv('output/0_FollowUpCategoricalStats.tsv', sep='\t')
contT = varTables(G=groupVar, varList=contVars, vType='cont', df=df)
contStats = sigTest(G=groupVar, varList=contVars, vType='cont', df=df)
contT.transpose().to_csv('output/0_FollowUpContinuousTable.tsv', sep='\t')
contStats.transpose().to_csv('output/0_FollowUpContinuousStats.tsv', sep='\t')
groupVar = 'ExpGroups'
catT = varTables(G=groupVar, varList=catVars, vType='cat', df=df[df.T2_HCData == 1])
catStats = sigTest(G=groupVar, varList=catVars, vType='cat', df=df[df.T2_HCData == 1])
catT.transpose().to_csv('output/0_BaselineCategoricalTable.tsv', sep='\t')
catStats.transpose().to_csv('output/0_BaselineCategoricalStats.tsv', sep='\t')
contT = varTables(G=groupVar, varList=contVars, vType='cont', df=df[df.T2_HCData == 1])
contStats = sigTest(G=groupVar, varList=contVars, vType='cont', df=df[df.T2_HCData == 1])
contT.transpose().to_csv('output/0_BaselineContinuousTable.tsv', sep='\t')
contStats.transpose().to_csv('output/0_BaselineContinuousStats.tsv', sep='\t')
groupVar = 'T2_poorCGI'
catT = varTables(G=groupVar, varList=catVars, vType='cat', df=df[df.T2_HCData == 1])
catStats = sigTest(G=groupVar, varList=catVars, vType='cat', df=df[df.T2_HCData == 1])
catT.transpose().to_csv('output/0_OutcomeCategoricalTable.tsv', sep='\t')
catStats.transpose().to_csv('output/0_OutcomeCategoricalStats.tsv', sep='\t')
contT = varTables(G=groupVar, varList=contVars, vType='cont', df=df[df.T2_HCData == 1])
contStats = sigTest(G=groupVar, varList=contVars, vType='cont', df=df[df.T2_HCData == 1])
contT.transpose().to_csv('output/0_OutcomeContinuousTable.tsv', sep='\t')
contStats.transpose().to_csv('output/0_OutcomeContinuousStats.tsv', sep='\t')
return
def SNSSPrimaryOutcomeMeasures(df):
""" Compare IPS and CGI outcomes between functional groups.
This function compares CGI and IPS both in raw and pooled form between
functional groups. Outputs tables of counts and proportions of reported outcomes.
Args:
df: Pandas DataFrame to be assessed.
Returns:
NONE: All relevant tables are exported to CSV in the function.
Raises:
NONE
"""
outcomes = [['T2_HealthChange', 'T2_SymptomsChange'], ['T2_poorCGI', 'T2_poorIPS']]
outcomeTag = ['', 'Pool']
i = 0
for O in outcomes:
PrimaryOutcomeGroupT = []
PrimaryOutcomeGroupT.append(pd.crosstab(index=df.ExpGroups, columns=df[O[0]],
margins=False, normalize=False,
dropna=True))
PrimaryOutcomeGroupT.append(pd.crosstab(index=df.ExpGroups, columns=df[O[0]],
margins=False, normalize='index',
dropna=True))
PrimaryOutcomeGroupT.append(pd.crosstab(index=df.ExpGroups, columns=df[O[1]],
margins=False, normalize=False,
dropna=True))
PrimaryOutcomeGroupT.append(pd.crosstab(index=df.ExpGroups, columns=df[O[1]],
margins=False, normalize='index',
dropna=True))
PrimaryOutcomeGroupTExport = pd.concat(PrimaryOutcomeGroupT,
keys=['CGI_N', 'CGI_%',
'IPS_N', 'IPS_%'],
axis=0)
if i:
CGIchi2stat, CGIchi2p, _, _ = stats.chi2_contingency(PrimaryOutcomeGroupT[0],
correction=True)
CGIfisherOR, CGIfisherp = stats.fisher_exact(PrimaryOutcomeGroupT[0])
IPSchi2stat, IPSchi2p, _, _ = stats.chi2_contingency(PrimaryOutcomeGroupT[2],
correction=True)
IPSfisherOR, IPSfisherp = stats.fisher_exact(PrimaryOutcomeGroupT[2])
PrimaryOutcomeGroupTExport['chi2p'] = [CGIchi2p]*4 + [IPSchi2p]*4
PrimaryOutcomeGroupTExport['fisher2p'] = [CGIfisherp]*4 + [IPSfisherp]*4
PrimaryOutcomeGroupTExport.to_csv('output/1_PrimaryOutcome' + outcomeTag[i] + 'byGroup.tsv',
sep='\t')
i = i+1
return
def multi_text(ax, x, y, s, txt_params={}):
""" Matplotlib multi-line text plotting
Takes a matplotlib axes, set of strings and positions and plots.
Args:
ax: Matplotlib axes.
x: Array of x values
y: constant y value
s: Array of strings.
txt_params: Dict of text params.
Returns:
NONE: Text is plotted onto provided axes.
Raises:
NONE
"""
for i in range(len(s)):
ax.text(x[i], y, s[i], **txt_params)
def stackedBarPlot(x_var, y_vars, df, featMetaData):
""" Plots stacked bar charts as per declared variables.
Takes a matplotlib axes, set of strings and positions and plots a stacked bar
chart with the X variables being subdivided by the Y variables.
Args:
x_var: Names of variables on X_axis
y_vars: Names of variables with which to subdivide X variables.
df: Pandas dataframe to be used.
featMetaData: Variable meta data provided in JSON file.
Returns:
NONE: Figure is saved in function.
Raises:
NONE
"""
if not isinstance(y_vars, list):
y_vars = [y_vars]
fig_params={'num': 1,
'figsize': (6*len(y_vars), 6),
'dpi': 200,
'frameon': False}
txt_params={'fontsize': 6,
'ha': 'center',
'va': 'center'}
label_params={'fontsize': 10,
'ha': 'center',
'va': 'top'}
fig = plt.figure(**fig_params)
sp = 1
for y_var in y_vars:
data = df.dropna(subset=[y_var])
ax_params={'title': featMetaData[y_var]['label'],
'ylabel': 'Normalised Frequency',
'xlabel': y_var}
ax = fig.add_subplot(1, len(y_vars), sp, **ax_params)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
x_cats = np.unique(data[x_var])
x_cats = x_cats[~np.isnan(x_cats)]
x_var_meta = featMetaData[x_var]
y_cats = np.unique(data[y_var])
y_cats = y_cats[~np.isnan(y_cats)]
y_var_meta = featMetaData[y_var]
xMinorTicks = []
xMinorLabels = []
x = 0
bw = 0.8
y_bottom=0
for xc in x_cats:
for yc in y_cats:
y = np.nanmean(data[y_var][data[x_var] == xc] == yc)
t = str(int(round(y*100, 0)))+'%'
ax.bar(x=x, height=y, width=bw,
color=ast.literal_eval(y_var_meta['colors'][y_var_meta['values'].index(yc)]),
bottom=y_bottom)
ax.text(x, y_bottom+(y/2), t, **txt_params)
xMinorLabels.append(x_var_meta['truncvaluelabels'][x_var_meta['values'].index(xc)])
xMinorTicks.append(x)
y_bottom = y+y_bottom
y_bottom=0
x += 1
ax.set_xticks(xMinorTicks)
ax.set_xticklabels(xMinorLabels, **label_params)
sp+=1
fig.savefig('output/1_SNSSPrimaryOutcomeStackedBars.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
def subCatBarPlot(x_vars, x_sub_var, df, featMetaData):
""" Plots stacked bar charts as per declared variables.
Takes a matplotlib axes, set of strings and positions and plots a bar
chart with the X variables being subdivided by the X_sub variables and the
subdivisions being plotted side by side.
Args:
x_vars: Names of variables on X_axis
x_sub_var: Names of variables with which to subdivide X variables.
df: Pandas dataframe to be used.
featMetaData: Variable meta data provided in JSON file.
Returns:
NONE: Figure is saved in function.
Raises:
NONE
"""
if not isinstance(x_vars, list):
x_vars = [x_vars]
print('is not list')
fig_params={'num': 1,
'figsize': (6*len(x_vars), 6),
'dpi': 200,
'frameon': False}
txt_params={'fontsize': 6,
'ha': 'center',
'va': 'bottom'}
label_params={'fontsize': 10,
'ha': 'center',
'va': 'top'}
fig = plt.figure(**fig_params)
sp = 1
for x_var in x_vars:
data = df.dropna(subset=[x_var])
ax_params={'title': featMetaData[x_var]['label'],
'ylabel': 'Normalised Frequency',
'xlabel': ''}
ax = fig.add_subplot(1, len(x_vars), sp, **ax_params)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
x_cats = np.unique(data[x_var])
x_cats = x_cats[~np.isnan(x_cats)]
x_var_meta = featMetaData[x_var]
x_sub_cats = np.unique(data[x_sub_var])
x_sub_cats = x_sub_cats[~np.isnan(x_sub_cats)]
x_sub_var_meta = featMetaData[x_sub_var]
xMinorTicks = []
xMajorTicks = []
xMinorLabels = []
xMajorLabels = []
x = 0
bw = 1
for xc in x_cats:
for xsc in x_sub_cats:
y = np.nanmean(data[x_var][data[x_sub_var] == xsc] == xc)
t = str(int(round(y*100, 0)))+'%'
ax.bar(x=x, height=y, width=bw,
color=x_sub_var_meta['colors'][x_sub_var_meta['values'].index(xsc)])
ax.text(x, y, t, **txt_params)
xMinorLabels.append(x_sub_var_meta['truncvaluelabels'][x_sub_var_meta['values'].index(xsc)])
xMinorTicks.append(x)
x += 1
xMajorLabels.append(x_var_meta['truncvaluelabels'][x_var_meta['values'].index(xc)])
xMajorTicks.append(x-1-((len(x_sub_cats)-1)/2))
x += 1
ax.set_xticks(xMinorTicks)
ax.set_xticklabels(xMinorLabels, **label_params)
multi_text(ax, xMajorTicks, ax.get_ylim()[1]*-0.1, xMajorLabels, label_params)
sp+=1
fig.savefig('output/1_SNSSPrimaryOutcomeBars.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
def primaryOutcomePlot(outcome, group_var, data, featMetaData, style='subCat'):
""" Plots bar charts of declared outcome and grouping var.
Takes declared variables and plots bar chart accordingly.
Args:
outcome: name of outcome variable
group_var: Names grouping variable i.e. X variables to be used
data: Pandas dataframe to be used.
featMetaData: Variable meta data provided in JSON file.
style: Defaults to side-by-side vs stacked plotting.
Returns:
NONE: Figure is saved in respective function.
Raises:
NONE
"""
if style == 'subCat':
subCatBarPlot(outcome, group_var, data, featMetaData)
elif style == 'stacked':
stackedBarPlot(group_var, outcome, data, featMetaData)
def SNSSSecondaryOutcomeMeasures(df):
""" Plots line chart and produced table of secondary SNSS outcomes
Takes pandas dataframe and assesses between group differences over time
of secindary outcome measures including depressino scales and physical/mental
functioning.
Args:
df: Pandas dataframe
Returns:
outcomeT: Table of outcome measures grouped by functional diagnosis.
Raises:
NONE
"""
groupVar = 'ExpGroups'
SNSSVars = loadJSON('raw_data/SNSS_vars.json')
rowDict = dict(zip(SNSSVars[groupVar]['values'],
SNSSVars[groupVar]['valuelabels']))
outcomes = ['T0_SF12_NormedMCS', 'T1_SF12_NormedMCS', 'T2_SF12_NormedMCS',
'T0_SF12_NormedPCS', 'T1_SF12_NormedPCS', 'T2_SF12_NormedPCS',
'T0_PHQNeuro28_Total', 'T1_PHQNeuro28_Total', 'T2_PHQNeuro28_Total',
'T0_HADS', 'T1_HADS', 'T2_HADS',
'T0T1_SF12_NormedMCS', 'T1T2_SF12_NormedMCS',
'T0T1_SF12_NormedPCS', 'T1T2_SF12_NormedPCS',
'T0T1_PHQNeuro28_Total', 'T1T2_PHQNeuro28_Total',
'T0T1_HADS', 'T1T2_HADS']
outcomeT = df.groupby(groupVar)[outcomes].agg([('N', 'count'),
('Mean', 'mean'),
('SD', 'std'),
('CI', lambda x:
tuple(np.round(
DescrStatsW(x.dropna()).
tconfint_mean(), 2)))])
# Significance testing
for O in outcomes:
NE = (df.ExpGroups == 1) & (df[O].notna())
E = (df.ExpGroups == 2) & (df[O].notna())
cm = CompareMeans.from_data(df[O].loc[NE], df[O].loc[E])
outcomeT[O, 'tTestp'] = [cm.ttest_ind()[1]]*2
outcomeT[O, 'cohend'], _ = cohen_d(cm.d1.data, cm.d2.data)
outcomeT = outcomeT.sort_index(axis=1)
outcomeT.rename(index=rowDict).transpose().\
to_csv('output/2_SecondaryOutcomeMeasures.tsv', sep='\t')
return outcomeT
def plot_ci(ax, x, y, color, style='t'):
if style == 't':
for i in range(len(y)):
ax.plot([x[i], x[i]], [y[i][0], y[i][1]],
color=color, alpha=0.4,
marker='_', linewidth=2)
def lineTimeSeriesPlot(y_vars, groupVar, df, featMetaData):
fig_params={'num': 1,
'figsize': (6*4, 6),
'dpi': 200,
'frameon': False}
txt_params={'fontsize': 6,
'ha': 'center',
'va': 'center'}
label_params={'fontsize': 10,
'ha': 'center',
'va': 'top'}
fig = plt.figure(**fig_params)
grps = np.unique(df[groupVar])
grps = grps[~np.isnan(grps)]
groupVar_meta = featMetaData[groupVar]
sp = 1
time = [0, 3, 12]
for y_var_group in y_vars:
for y_var in y_var_group:
ax_params={'title': y_var[0],
'ylabel': 'Secondary Measure',
'xlabel': 'Time',
'xticks': [0, 3, 12],
'xticklabels': ['Baseline', '3 Months', '12 Months']}
ax = fig.add_subplot(1, 4, sp, **ax_params)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
grp_jitter = [0.1, 0.1, 0.1]
for grp in grps:
mean_array = []
ci_array = []
for T_var in y_var:
data = df.dropna(subset=[T_var])
mean_array.append(np.nanmean(data[T_var][data[groupVar] == grp]))
ci_array.append(DescrStatsW(data[T_var][data[groupVar] == grp]).tconfint_mean())
ax.plot(time, mean_array, color=groupVar_meta['colors'][groupVar_meta['values'].index(grp)],
alpha=0.9, linewidth=4)
plot_ci(ax, time, ci_array, groupVar_meta['colors'][groupVar_meta['values'].index(grp)],
't')
# ax.set_ylim([0, ax.get_ylim()[1]])
sp += 1
fig.subplots_adjust(wspace=0.3)
fig.savefig('output/2_SNSSSecondaryOutcomePlot.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
# color=groupVar_meta['colors'][groupVar_meta['values'].index(grp)]
def secondaryOutcomePlot(outcome, groupVar, df, featMetaData, style='line'):
if style == 'line':
lineTimeSeriesPlot(outcome, groupVar, df, featMetaData)
def SNSSSocioeconomicAssessment(df):
""" Multiple plots comparing SIMD quintile to functional diagnosis and outcome
Takes pandas dataframe and plots SIMD quintiles as per each functional Diagnosis
and primary and secondary outcomes.
Args:
df: Pandas dataframe
Returns:
NONE: All plots saved within function.
Raises:
NONE
"""
# Figure & Table 1: Are functional vs structural patients from different SIMD quintiles?
SIMDGroupT = []
SIMDGroupT.append(pd.crosstab(index=[df.ExpGroups], columns=df.T0_SIMD04,
margins=False, normalize='index',
dropna=True))
SIMDGroupT.append(pd.crosstab(index=[df.ExpGroups], columns=df.T0_SIMD04,
margins=False, normalize=False,
dropna=True))
SIMDGroupTExport = pd.concat(SIMDGroupT, keys=['N', '%'])
SIMDGroupTExport.to_csv('output/3_DeprivationGroups.tsv', sep='\t')
SIMDOutcomeT = []
SIMDOutcomeT.append(pd.crosstab(index=[df.ExpGroups, df.T2_poorCGI], columns=df.T0_SIMD04,
margins=False, normalize=False,
dropna=True))
SIMDOutcomeT.append(pd.crosstab(index=[df.ExpGroups, df.T2_poorCGI], columns=df.T0_SIMD04,
margins=False, normalize='index',
dropna=True))
SIMDOutcomeT.append(pd.crosstab(index=[df.ExpGroups, df.T2_poorIPS], columns=df.T0_SIMD04,
margins=False, normalize=False,
dropna=True))
SIMDOutcomeT.append(pd.crosstab(index=[df.ExpGroups, df.T2_poorIPS], columns=df.T0_SIMD04,
margins=False, normalize='index',
dropna=True))
SIMDOutcomeTExport = pd.concat(SIMDOutcomeT, keys=['CGI_N', 'CGI_%', 'IPS_N', 'IPS_%'])
SIMDOutcomeTExport.to_csv('output/3_DeprivationOutcomeAndGroup.tsv', sep='\t')
fig1 = plt.figure(num=1, figsize=(5, 5), dpi=200, frameon=False)
ax = fig1.add_subplot(111)
sb.distplot(df.T0_SIMD04[(df.T0_SIMD04.notna()) & (df.ExpGroups == 1)],
ax=ax, kde=False, norm_hist=True, bins=5,
kde_kws={'bw': 0.55}, hist_kws={'rwidth': 0.8},
color='xkcd:blood red')
sb.distplot(df.T0_SIMD04[(df.T0_SIMD04.notna()) & (df.ExpGroups == 2)],
ax=ax, kde=False, norm_hist=True, bins=5,
kde_kws={'bw': 0.55}, hist_kws={'rwidth': 0.8},
color='xkcd:ocean blue')
1.4+0.8*4
ax.set_xlabel('SIMD04 Quintile')
ax.set_xticks(np.linspace(start=1.4, stop=4.6, num=5))
ax.set_xticklabels(['1 (Least Deprived)',
'2', '3', '4',
'5 (Most Deprived)'],
rotation=45, ha='right', fontsize=8)
ax.set_ylabel('Proportion')
ax.set_xlim([1, 5])
ax.legend(labels=['Not Explained', 'Explained'],
bbox_to_anchor=(1.25, 1), loc=1)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
fig1.savefig('output/3_SNSSSocioeconomicGroups.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
# Figure 2: Does SIMD correlate with socioeconomic questions in SNSS and are outcomes different?
contOutcomes = ['T0_PHQNeuro28_Total',
'T0_HADS',
'T0_SF12_NormedMCS',
'T0_SF12_NormedPCS',
'T0T2_PHQNeuro28_Total',
'T0T2_HADS',
'T0T2_SF12_NormedMCS',
'T0T2_SF12_NormedPCS']
catOutcomes = ['T0_Inemployment',
'T0_IncapacityBenefitorDLA',
'T2_poorIPS',
'T2_poorCGI']
ylabels = ['Symptom Count (Baseline)',
'HADS Score (Baseline)',
'SF12 MCS (Baseline)',
'SF12 PCS (Baseline)',
'Symptom Count (12 Month Change)',
'HADS Score (12 Month Change)',
'SF12 MCS (12 Month Change)',
'SF12 PCS (12 Month Change)',
'% in Employment (Baseline)',
'% in Receipt of DLA (Baseline)',
'% Reporting Poor IPS (12 Months)',
'% Reporting Poor CGI (12 Months)']
fig2 = plt.figure(num=1, figsize=(16, 12), dpi=200, frameon=False)
i = 0
ax = []
for o in contOutcomes:
ax.append(fig2.add_subplot(3, 4, i+1))
sb.boxplot(x='ExpGroups', y=o, hue='T0_SIMD04',
data=df, ax=ax[i],
palette=sb.cubehelix_palette(5, start=0, reverse=False),
flierprops={'marker': '+'})
ax[i].set_xticklabels(labels=['Unexplained', 'Explained'])
ax[i].set_ylabel(ylabels[i])
if i == 3:
handles, _ = ax[i].get_legend_handles_labels()
ax[i].legend(handles=handles, labels=['1 (Least Deprived)',
'2', '3', '4',
'5 (Most Deprived)'],
bbox_to_anchor=(1.55, 1), loc=1)
else:
ax[i].legend_.remove()
i = i+1
for o in catOutcomes:
ax.append(fig2.add_subplot(3, 4, i+1))
sb.barplot(x='ExpGroups', y=o, hue='T0_SIMD04', data=df,
palette=sb.cubehelix_palette(5, start=0, reverse=False),
ax=ax[i])
ax[i].set_ylabel(ylabels[i])
ax[i].set_xticklabels(labels=['Unexplained', 'Explained'])
ax[i].set_ylim([0, 1])
ax[i].legend_.remove()
i = i+1
fig2.subplots_adjust(wspace=0.3, hspace=0.3)
fig2.savefig('output/3_SNSSSocioeconomicAssessment.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
plt.close()
# Figure 3: Do individual domains differ in outcome (!!! Not population weighted)
for Y in ['T0_Inemployment', 'T0_IncapacityBenefitorDLA', 'T2_poorCGI']:
fig3 = plt.figure(num=1, figsize=(9, 6), dpi=200, frameon=False)
i = 0
ax = []
domains = ['inc', 'emp', 'hlth', 'educ', 'access', 'house']
for d in domains:
ax.append(fig3.add_subplot(2, 3, i+1))
sb.barplot(x='ExpGroups', y=Y, hue='T0_SIMD04_' + d + '_quintile', data=df,
palette=sb.cubehelix_palette(5, start=0, reverse=False),
ax=ax[i])
# ax[i].set_ylabel(ylabels[i])
ax[i].set_xticklabels(labels=['Unexplained', 'Explained'])
ax[i].set_ylim([0, 1])
ax[i].legend_.remove()
ax[i].set_title(d)
i = i+1
fig3.subplots_adjust(wspace=0.3, hspace=0.3)
plt.close()
# sb.violinplot(x='ExpGroups', y='T0_SIMD04_access_score', hue='T2_SymptomsChange',
# palette=sb.cubehelix_palette(5, start=2, reverse=True), data=df)
fig3.savefig('output/3_SNSSSocioeconomicDomainsAssessment_' + Y + '.pdf', dpi=300,
format='pdf', pad_inches=0.1, bbox_inches='tight')
return
def performanceMetrics(trainDat, evalDat):
""" General function for assessing training vs eval performance
Takes two arrays of Nx2 size. Each array is made up of a TRUE label [0] and
a PREDICTED Score [1], the arrays are training and eval. The function computes
binary or multivariate performance metrics and outputs a dictionary.
Args:
trainDat: An Nx2 array of true labels and predicted scores for the training set.
evalDat: An Nx2 array of true labels and predicted scores for the eval set.
Returns:
perfDict: A dictionary which includes the original scores and labels as well as
all computed metrics.
Raises:
NONE
"""
perfDict = {}
nClasses = len(np.unique(trainDat[0]))
dLabels = ['train', 'eval']
i = 0
for d in [trainDat, evalDat]:
true = d[0]
score = d[1]
if nClasses == 2: # If binary classification problem...
perfDict['problemType'] = 'binaryProblem'
# Calculate 'optimal' ROC operating threshold to assign binary pred.
fpr, tpr, t = roc_curve(true, score)
optimalIdx = np.argmax(tpr - fpr)
optimalThreshold = t[optimalIdx]
pred = np.reshape((score >= optimalThreshold).astype(int), [len(score), ])
# Compute Accuracy Scores
Acc = accuracy_score(true, pred, normalize=True)
Auroc = roc_auc_score(true, score)
R2 = r2_score(true, pred)
f1 = f1_score(true, pred)
precision = precision_score(true, pred, average='binary')
recall = recall_score(true, pred, average='binary')
CM = confusion_matrix(true, pred)
TN = CM[0][0]
TP = CM[1][1]
FN = CM[1][0]
FP = CM[0][1]
Sens = TP/(TP+FN)
Spec = TN/(TN+FP)
perfDict[dLabels[i] + 'True'] = true
perfDict[dLabels[i] + 'Pred'] = pred
perfDict[dLabels[i] + 'Score'] = score
perfDict[dLabels[i] + 'Acc'] = Acc
perfDict[dLabels[i] + 'Auroc'] = Auroc
perfDict[dLabels[i] + 'R2'] = R2
perfDict[dLabels[i] + 'F1'] = f1
perfDict[dLabels[i] + 'Precision'] = precision
perfDict[dLabels[i] + 'Recall'] = recall
perfDict[dLabels[i] + 'CM'] = CM
perfDict[dLabels[i] + 'Sens'] = Sens
perfDict[dLabels[i] + 'Spec'] = Spec
perfDict[dLabels[i] + 'OperatingThreshold'] = optimalThreshold
i += 1
else: # If multiclass classification problem...
perfDict['problemType'] = 'multiClassProblem'
pred = np.argmax(score, axis=1)
Acc = accuracy_score(true, pred, normalize=True)
CM = confusion_matrix(true, pred)
microPrecision = precision_score(true, pred, average='micro')
microRecall = recall_score(true, pred, average='micro')
macroPrecision = precision_score(true, pred, average='macro')
macroRecall = recall_score(true, pred, average='macro')
# microAuroc = roc_auc_score(true, score, average='micro')
# macroAuroc = roc_auc_score(true, score, average='macro')
perfDict[dLabels[i] + 'True'] = true
perfDict[dLabels[i] + 'Pred'] = pred
perfDict[dLabels[i] + 'Score'] = score
perfDict[dLabels[i] + 'Acc'] = Acc
perfDict[dLabels[i] + 'CM'] = CM
perfDict[dLabels[i] + 'Precision'] = microPrecision
perfDict[dLabels[i] + 'Recall'] = microRecall
perfDict[dLabels[i] + 'MicroPrecision'] = microPrecision
perfDict[dLabels[i] + 'MicroRecall'] = microRecall
perfDict[dLabels[i] + 'MacroPrecision'] = macroPrecision
perfDict[dLabels[i] + 'MacroRecall'] = macroRecall
# perfDict[dLabels[i] + 'Auroc'] = microAuroc
# perfDict[dLabels[i] + 'MicroAuroc'] = microAuroc
# perfDict[dLabels[i] + 'MacroAuroc'] = macroAuroc
i += 1
return perfDict
def UVLogisticRegression_v2(df, featureSet, outcomeVar, featMetaData, featDataTypeDict,
dummyExceptionDict, trainIdx=[], evalIdx=[]):
""" Conducts univariable logistic regression for every variable in the feature set.
Takes a dataframe, feature set and outcome variable and conducts univariable logistic modelling.
Args:
df: The pandas dataframe object
featureSet: The names of columns to be assess as inputs into the model. (Exog)
outcomeVar: The outcome variable. (Endog)
featMetaData: Feature meta data for constructing legible tables etc.
featDataTypeDict: Feature data type dictionary e.g. discrete vs continuous. NOTE variables will be treated as per this in the models
discrete variables will be dummy encoded automatically.
dummyExceptionDict: A dictionary of variables and the value to use as the dummy variable if not the first.
trainIdx: Dataframe indices for observations to be used for training. If empty all will be used.
evalIdx: Dataframe indices for observations to be used for evaluation. If empty all will be used.
Returns:
UVMdlExportT: Returns the model results in a table of OR CIs and p-values.
mdlArray: Returns an array of statsmodels regression objects.
modelSummaryInfoDict: A dict of dictionaries containing summary statistics about each model.
Raises:
NONE
"""
mdlExportTArray = []
mdlArray = []
modelSummaryInfoDict = {}
for P in featureSet: # For each feature construct k-1 dummy array and construct model.
# Exclude missing data from featureSet subset
rDat = df.dropna(subset=[P] + [outcomeVar])
# Initialise feat & outcome arrays
outcome = np.asarray(rDat[outcomeVar]).astype(int) # Initialise outcome array, MUST BE BINARY
feats = np.ones([len(rDat), 1]).astype(int) # Initialise dummy feat array with constant
featNames = ['constant'] # Initialise dummy featNames array with constant
featNameIndex = ['constant']
sigTestIdx = {}
modelSummaryInfo = {}
if featDataTypeDict[P] in ['nominal', 'binary']:
if rDat[P].dtype.name != 'category': # If not a categorical then convert...
rDat[P] = pd.Categorical(rDat[P])
# Drop single category as constant.
# Decision based on dummy exception dict, defaults to first category.
try:
X = pd.get_dummies(rDat[P], drop_first=False).drop(axis=1,
columns=dummyExceptionDict[P])
except (KeyError) as err:
X = pd.get_dummies(rDat[P], drop_first=True)
# Translate categorical series labels into SNSS var value labels..
varDict = dict(zip(featMetaData[P]['values'],
featMetaData[P]['valuelabels']))
for col in range(X.shape[1]): # Construct featNames array for output
try:
featNames.append(featMetaData[P]['label'] + ' - ' +
varDict[X.columns.
categories[X.columns.codes].values.tolist()[col]])
except (KeyError) as err:
# Convert int column names to str
featNames.append(featMetaData[P]['label'] + ' - ' +
str(X.columns.
categories[X.columns.codes].values.tolist()[col]))
elif featDataTypeDict[P] in ['continuous', 'ordinal']:
X = np.array(rDat[P]).reshape(len(rDat[P]),1)
# X = (X-min(X))/max(X) # Option to autoscale
featNames.append(featMetaData[P]['label'])
featNameIndex = featNameIndex + ([P]*len(range(X.shape[1]))) # Label for indexing in pandas export T
# Save column indices of each P in dict for significance testing later...
sigTestIdx[P] = range(feats.shape[1], feats.shape[1]+X.shape[1])
# Append dummy encoded variable to exog array...
feats = np.append(feats, X, axis=1)
# If no evaluation partition is provided just use the whole dataset for training/eval
trainIdx = np.linspace(0, len(outcome)-1, len(outcome)).astype(int)
evalIdx = np.linspace(0, len(outcome)-1, len(outcome)).astype(int)
# Construct Logistic model from all variable array...
lr = Logit(endog=outcome[trainIdx], exog=feats[trainIdx])
mdl = lr.fit(disp=0)
# Export salient mdl features into table for writing...
mdlExportT = pd.DataFrame(mdl.params, index=[[P]*len(featNames), featNames],
columns=['coeff'])
mdlExportT['coeffLCI'] = mdl.conf_int()[:, 0]
mdlExportT['coeffUCI'] = mdl.conf_int()[:, 1]
mdlExportT['OR'] = np.exp(mdl.params)
mdlExportT['ORLCI'] = np.exp(mdl.conf_int())[:, 0]
mdlExportT['ORUCI'] = np.exp(mdl.conf_int())[:, 1]
mdlExportT['p'] = mdl.pvalues
pValArray = [1]
# Variable significance testing...
testLr = Logit(endog=outcome, exog=np.delete(feats, sigTestIdx[P], axis=1))
testMdl = testLr.fit(disp=0)
Chi2p = 1 - stats.chi2.cdf(2*(mdl.llf - testMdl.llf), df=len(sigTestIdx[P]))
pValArray = pValArray + [Chi2p]*len(sigTestIdx[P])
mdlExportT['llrp'] = pValArray
# Assess trained model predictive capacity
trainTrue = outcome[trainIdx]
trainScore = mdl.predict()
evalTrue = outcome[evalIdx]
evalScore = mdl.predict(feats[evalIdx])
modelSummaryInfo.update(performanceMetrics([trainTrue, trainScore], [evalTrue, evalScore]))
# Store common info and model objectin model summary output dict.
modelSummaryInfo['nTotal'] = len(outcome)
modelSummaryInfo['nTrain'] = len(trainIdx)
modelSummaryInfo['nEval'] = len(evalIdx)
modelSummaryInfo['partitionRatio'] = len(trainIdx)/(len(evalIdx)+len(trainIdx))
modelSummaryInfo['outcomeVar'] = outcome
modelSummaryInfo['outcomeLabels'] = featMetaData[outcomeVar]['truncvaluelabels']
modelSummaryInfo['modelType'] = 'logisticRegression'
modelSummaryInfo['featureSet'] = feats
modelSummaryInfo['nFeatures'] = len(feats)
# Add to array of univariate models for export.
mdlExportTArray = mdlExportTArray + [mdlExportT]
mdlArray = mdlArray + [mdl]
modelSummaryInfoDict[P] = modelSummaryInfo
UVMdlExportT = pd.concat(mdlExportTArray, axis=0)
return UVMdlExportT, mdlArray, modelSummaryInfoDict
def MVLogisticRegression_v2(df, featureSet, outcomeVar, featMetaData, featDataTypeDict,
dummyExceptionDict, trainIdx=[], evalIdx=[]):
""" Conducts multivariable logistic regression for all variables in the feature set.
Takes a dataframe, feature set and outcome variable and conducts multivariable logistic modelling.
Args:
df: The pandas dataframe object
featureSet: The names of columns to be assess as inputs into the model. (Exog)
outcomeVar: The outcome variable. (Endog)
featMetaData: Feature meta data for constructing legible tables etc.
featDataTypeDict: Feature data type dictionary e.g. discrete vs continuous. NOTE variables will be treated as per this in the models
discrete variables will be dummy encoded automatically.
dummyExceptionDict: A dictionary of variables and the value to use as the dummy variable if not the first.
trainIdx: Dataframe indices for observations to be used for training. If empty all will be used.
evalIdx: Dataframe indices for observations to be used for evaluation. If empty all will be used.
Returns:
mdlExportT: Returns the model results in a table of OR CIs and p-values.
mdl: Returns the statsmodels regression object.
modelSummaryInfo: An dicionary containing summary statistics the model.
Raises:
NONE
"""
# Exclude missing data from featureSet subset
rDat = df.dropna(subset=featureSet + [outcomeVar])
# Initialise feat & outcome arrays
outcome = np.asarray(rDat[outcomeVar]).astype(int) # Initialise outcome array, MUST BE BINARY
feats = np.ones([len(rDat), 1]).astype(int) # Initialise dummy feat array with constant
featNames = ['constant'] # Initialise dummy featNames array with constant
featNameIndex = ['constant']
sigTestIdx = {}
modelSummaryInfo = {}
for P in featureSet: # For each feature construct k-1 dummy array and add to feats array.
if featDataTypeDict[P] in ['nominal', 'binary']:
if rDat[P].dtype.name != 'category': # If not a categorical then convert...
rDat[P] =
|
pd.Categorical(rDat[P])
|
pandas.Categorical
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesApply(TestData, tm.TestCase):
def test_apply(self):
with np.errstate(all='ignore'):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assertIsInstance(result[0], list)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns]')
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns, US/Eastern]')
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'timedelta64[ns]')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'object')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
|
tm.assert_series_equal(result, exp)
|
pandas.util.testing.assert_series_equal
|
__author__ = 'lucabasa'
__version__ = '5.1.0'
__status__ = 'development'
import pandas as pd
import numpy as np
from source.aggregated_stats import process_details, full_stats, rolling_stats
from source.add_info import add_seed, add_rank, highlow_seed, add_stage, add_quality
def make_teams_target(data, league):
'''
Take the playoff compact data and double the dataframe by inverting W and L
It also creates the ID column
data: playoff compact results
league: men or women, useful to know when to cut the data
'''
if league == 'men':
limit = 2003
else:
limit = 2010
df = data[data.Season >= limit].copy()
df['Team1'] = np.where((df.WTeamID < df.LTeamID), df.WTeamID, df.LTeamID)
df['Team2'] = np.where((df.WTeamID > df.LTeamID), df.WTeamID, df.LTeamID)
df['target'] = np.where((df['WTeamID'] < df['LTeamID']), 1, 0)
df['target_points'] = np.where((df['WTeamID'] < df['LTeamID']), df.WScore - df.LScore, df.LScore - df.WScore)
df.loc[df.WLoc == 'N', 'LLoc'] = 'N'
df.loc[df.WLoc == 'H', 'LLoc'] = 'A'
df.loc[df.WLoc == 'A', 'LLoc'] = 'H'
df['T1_Loc'] = np.where((df.WTeamID < df.LTeamID), df.WLoc, df.LLoc)
df['T2_Loc'] = np.where((df.WTeamID > df.LTeamID), df.WLoc, df.LLoc)
df['T1_Loc'] = df['T1_Loc'].map({'H': 1, 'A': -1, 'N': 0})
df['T2_Loc'] = df['T2_Loc'].map({'H': 1, 'A': -1, 'N': 0})
reverse = data[data.Season >= limit].copy()
reverse['Team1'] = np.where((reverse.WTeamID > reverse.LTeamID), reverse.WTeamID, reverse.LTeamID)
reverse['Team2'] = np.where((reverse.WTeamID < reverse.LTeamID), reverse.WTeamID, reverse.LTeamID)
reverse['target'] = np.where((reverse['WTeamID'] > reverse['LTeamID']),1,0)
reverse['target_points'] = np.where((reverse['WTeamID'] > reverse['LTeamID']),
reverse.WScore - reverse.LScore,
reverse.LScore - reverse.WScore)
reverse.loc[reverse.WLoc == 'N', 'LLoc'] = 'N'
reverse.loc[reverse.WLoc == 'H', 'LLoc'] = 'A'
reverse.loc[reverse.WLoc == 'A', 'LLoc'] = 'H'
reverse['T1_Loc'] = np.where((reverse.WTeamID > reverse.LTeamID), reverse.WLoc, reverse.LLoc)
reverse['T2_Loc'] = np.where((reverse.WTeamID < reverse.LTeamID), reverse.WLoc, reverse.LLoc)
reverse['T1_Loc'] = reverse['T1_Loc'].map({'H': 1, 'A': -1, 'N': 0})
reverse['T2_Loc'] = reverse['T2_Loc'].map({'H': 1, 'A': -1, 'N': 0})
df = pd.concat([df, reverse], ignore_index=True)
to_drop = ['WScore','WTeamID', 'LTeamID', 'LScore', 'WLoc', 'LLoc', 'NumOT']
for col in to_drop:
del df[col]
df.loc[:,'ID'] = df.Season.astype(str) + '_' + df.Team1.astype(str) + '_' + df.Team2.astype(str)
return df
def make_training_data(details, targets):
'''
details: seasonal stats by team
targets: result of make_teams_target with each playoff game present twice
Add the prefix T1_ and T2_ to the seasonal stats and add it to the playoff game
This creates the core training set where we use seasonal stats to predict the playoff games
Add the delta_ statistics, given by the difference between T1_ and T2_
'''
tmp = details.copy()
tmp.columns = ['Season', 'Team1'] + \
['T1_'+col for col in tmp.columns if col not in ['Season', 'TeamID']]
total = pd.merge(targets, tmp, on=['Season', 'Team1'], how='left')
tmp = details.copy()
tmp.columns = ['Season', 'Team2'] + \
['T2_'+col for col in tmp.columns if col not in ['Season', 'TeamID']]
total = pd.merge(total, tmp, on=['Season', 'Team2'], how='left')
if total.isnull().any().any():
print(total.columns[total.isnull().any()])
raise ValueError('Something went wrong')
stats = [col[3:] for col in total.columns if 'T1_' in col and 'region' not in col]
for stat in stats:
total['delta_'+stat] = total['T1_'+stat] - total['T2_'+stat]
try:
total['delta_off_edge'] = total['T1_off_rating'] - total['T2_def_rating']
total['delta_def_edge'] = total['T2_off_rating'] - total['T1_def_rating']
except KeyError:
pass
return total
def prepare_data(league):
save_loc = 'processed_data/' + league + '/'
if league == 'women':
regular_season = 'data/raw_women/WDataFiles_Stage2/WRegularSeasonDetailedResults.csv'
playoff = 'data/raw_women/WDataFiles_Stage2/WNCAATourneyDetailedResults.csv'
playoff_compact = 'data/raw_women/WDataFiles_Stage2/WNCAATourneyCompactResults.csv'
seed = 'data/raw_women/WDataFiles_Stage2/WNCAATourneySeeds.csv'
rank = None
stage2 = 'data/raw_women/WDataFiles_Stage2/WSampleSubmissionStage2.csv'
stage2_yr = 2021
save_loc = 'data/processed_women/'
else:
regular_season = 'data/raw_men/MDataFiles_Stage2/MRegularSeasonDetailedResults.csv'
playoff = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyDetailedResults.csv'
playoff_compact = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyCompactResults.csv'
seed = 'data/raw_men/MDataFiles_Stage2/MNCAATourneySeeds.csv'
rank = 'data/raw_men/MDataFiles_Stage2/MMasseyOrdinals.csv'
stage2 = 'data/raw_men/MDataFiles_Stage2/MSampleSubmissionStage2.csv'
stage2_yr = 2021
save_loc = 'data/processed_men/'
# Season stats
reg = pd.read_csv(regular_season)
reg = process_details(reg, rank)
reg.to_csv(save_loc + 'game_details_regular_extended.csv', index=False)
regular_stats = full_stats(reg)
# Last 2 weeks stats
last2weeks = reg[reg.DayNum >= 118].copy()
last2weeks = full_stats(last2weeks)
last2weeks.columns = ['L2W_' + col for col in last2weeks]
last2weeks.rename(columns={'L2W_Season': 'Season', 'L2W_TeamID': 'TeamID'}, inplace=True)
regular_stats = pd.merge(regular_stats, last2weeks, on=['Season', 'TeamID'], how='left')
regular_stats = add_seed(seed, regular_stats)
# Playoff stats
play = pd.read_csv(playoff)
play = process_details(play)
play.to_csv(save_loc + 'game_details_playoff_extended.csv', index=False)
playoff_stats = full_stats(play)
playoff_stats = add_seed(seed, playoff_stats)
if rank:
regular_stats = add_rank(rank, regular_stats)
playoff_stats = add_rank(rank, playoff_stats)
# Target data generation
target_data = pd.read_csv(playoff_compact)
target_data = make_teams_target(target_data, league)
# Add high and low seed wins perc
regular_stats = highlow_seed(regular_stats, reg, seed)
all_reg = make_training_data(regular_stats, target_data)
all_reg = all_reg[all_reg.DayNum >= 136] # remove pre tourney
all_reg = add_stage(all_reg)
all_reg = add_quality(all_reg, reg)
all_reg.to_csv(save_loc + 'training_data.csv', index=False)
playoff_stats.to_csv(save_loc + 'playoff_stats.csv', index=False)
if stage2:
test_data_reg = regular_stats[regular_stats.Season == stage2_yr].copy()
sub = pd.read_csv(stage2)
sub['Team1'] = sub['ID'].apply(lambda x: int(x[5:9]))
sub['Team2'] = sub['ID'].apply(lambda x: int(x[10:]))
tmp = sub.copy()
tmp = tmp.rename(columns={'Team1': 'Team2', 'Team2': 'Team1'})
tmp = tmp[['Team1', 'Team2', 'Pred']]
sub = pd.concat([sub[['Team1', 'Team2', 'Pred']], tmp], ignore_index=True)
sub['Season'] = stage2_yr
test_data = make_training_data(test_data_reg, sub)
test_data = add_stage(test_data)
test_data = add_quality(test_data, reg[reg.Season == stage2_yr])
test_data.to_csv(save_loc + f'{stage2_yr}_test_data.csv', index=False)
return all_reg, test_data
return all_reg
def prepare_competitive(league):
if league == 'women':
regular_season = 'data/raw_women/WDataFiles_Stage2/WRegularSeasonDetailedResults.csv'
playoff = 'data/raw_women/WDataFiles_Stage2/WNCAATourneyDetailedResults.csv'
rank = None
season_info = 'data/raw_women/WDataFiles_Stage2/WSeasons.csv'
events_data = 'data/processed_women/events.csv'
save_loc = 'data/processed_women/'
else:
regular_season = 'data/raw_men/MDataFiles_Stage2/MRegularSeasonDetailedResults.csv'
playoff = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyDetailedResults.csv'
playoff_compact = 'data/raw_men/MDataFiles_Stage2/MNCAATourneyCompactResults.csv'
rank = 'data/raw_men/MDataFiles_Stage2/MMasseyOrdinals.csv'
season_info = 'data/raw_men/MDataFiles_Stage2/MSeasons.csv'
events_data = 'data/processed_men/events.csv'
save_loc = 'data/processed_men/'
reg = pd.read_csv(regular_season)
reg = process_details(reg, rank)
play = pd.read_csv(playoff)
play = process_details(play)
full = pd.concat([reg, play])
events = pd.read_csv(events_data)
to_use = [col for col in events if not col.endswith('_game') and
'FinalScore' not in col and
'n_OT' not in col and
'_difference' not in col]
full = pd.merge(full, events[to_use], on=['Season', 'DayNum', 'WTeamID', 'LTeamID'])
full.to_csv(save_loc + 'events_extended.csv', index=False)
rolling = rolling_stats(full, season_info)
rolling.to_csv(save_loc + 'rolling_stats.csv', index=False)
competitive = events[['Season', 'DayNum', 'WTeamID', 'LTeamID',
'tourney', 'Final_difference', 'Halftime_difference', '3mins_difference',
'game_lc', 'half2_lc', 'crunchtime_lc', 'competitive']].copy()
tmp = rolling.copy()
tmp.columns = ['Season'] + \
['W'+col for col in tmp.columns if col not in ['Season', 'DayNum']] + ['DayNum']
competitive = pd.merge(competitive, tmp, on=['Season', 'DayNum', 'WTeamID'])
tmp = rolling.copy()
tmp.columns = ['Season'] + \
['L'+col for col in tmp.columns if col not in ['Season', 'DayNum']] + ['DayNum']
competitive =
|
pd.merge(competitive, tmp, on=['Season', 'DayNum', 'LTeamID'])
|
pandas.merge
|
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer, 1] = 1
expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
class TestSetitemTZAwareValues:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
class TestDataFrameSetItemWithExpansion:
# TODO(ArrayManager) update parent (_maybe_update_cacher)
@td.skip_array_manager_not_yet_implemented
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
# GH#39010
df = DataFrame([[1, 2], [3, 4]])
df["0 - Name"] = [5, 6]
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_expansion_categorical_dtype(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels)
cat = ser.values
# setting with a Categorical
df["D"] = cat
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
# setting with a Series
df["E"] = ser
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr.array, cat)
# sorting
ser.name = "E"
tm.assert_series_equal(result2.sort_index(), ser.sort_index())
def test_setitem_scalars_no_index(self):
# GH#16823 / GH#17894
df = DataFrame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
def test_setitem_newcol_tuple_key(self, float_frame):
assert (
"A",
"B",
) not in float_frame.columns
float_frame["A", "B"] = float_frame["A"]
assert ("A", "B") in float_frame.columns
result = float_frame["A", "B"]
expected = float_frame["A"]
tm.assert_series_equal(result, expected, check_names=False)
def test_frame_setitem_newcol_timestamp(self):
# GH#2155
columns = date_range(start="1/1/2012", end="2/1/2012", freq=BDay())
data = DataFrame(columns=columns, index=range(10))
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
# GH#31469
df = DataFrame(np.zeros((100, 1)))
df[-4:] = 1
arr = np.zeros((100, 1))
arr[-4:] = 1
expected = DataFrame(arr)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"])
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame([[1, 3, 5]] + [[10, 11, 12]] * n, columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs_mixed_dtypes(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = DataFrame(
[[1, 3, 5], ["x", "y", "z"]] + [[2, 4, 6]] * n, columns=["a", "b", "c"]
)
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame(
[[1, 3, 5]] + [[10, 11, 12]] * (n + 1),
columns=["a", "b", "c"],
dtype="object",
)
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemCallable:
def test_setitem_callable(self):
# GH#12533
df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
df[lambda x: "A"] = [11, 12, 13, 14]
exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH#13299
def inc(x):
return x + 1
df = DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = DataFrame([[-1, inc], [inc, -1]])
|
tm.assert_frame_equal(df, expected)
|
pandas._testing.assert_frame_equal
|
import matplotlib.pyplot as plt
import pandas
import csv
import os
import re
pattern = re.compile('.+#.+\.csv')
for filename in os.listdir('results_csv/plot_target'):
if pattern.match(filename):
csv_name = re.search('#.+', filename).group(0)[1:-4]
x_label = 'epochs'
train_label_acc = 'train_acc'
val_label_acc = 'val_acc'
train_label_loss = 'train_loss'
val_label_loss = 'val_loss'
df = pandas.read_csv(f"results_csv/plot_target/{filename}")
df[train_label_acc] = pandas.to_numeric(df[train_label_acc])
df[val_label_acc] = pandas.to_numeric(df[val_label_acc])
df[train_label_loss] =
|
pandas.to_numeric(df[train_label_loss])
|
pandas.to_numeric
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2021/11/2 21:08
Desc: 同花顺-数据中心-技术选股
http://data.10jqka.com.cn/rank/cxg/
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.datasets import get_ths_js
def _get_file_content_ths(file: str = "ths.js") -> str:
"""
获取 JS 文件的内容
:param file: JS 文件名
:type file: str
:return: 文件内容
:rtype: str
"""
setting_file_path = get_ths_js(file)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def stock_rank_cxg_ths(symbol: str = "创月新高") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新高
http://data.10jqka.com.cn/rank/cxg/
:param symbol: choice of {"创月新高", "半年新高", "一年新高", "历史新高"}
:type symbol: str
:return: 创新高数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新高": "4",
"半年新高": "3",
"一年新高": "2",
"历史新高": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxg/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期高点", "前期高点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期高点日期"] = pd.to_datetime(big_df["前期高点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期高点"] = pd.to_numeric(big_df["前期高点"])
return big_df
def stock_rank_cxd_ths(symbol: str = "创月新低") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-创新低
http://data.10jqka.com.cn/rank/cxd/
:param symbol: choice of {"创月新低", "半年新低", "一年新低", "历史新低"}
:type symbol: str
:return: 创新低数据
:rtype: pandas.DataFrame
"""
symbol_map = {
"创月新低": "4",
"半年新低": "3",
"一年新低": "2",
"历史新低": "1",
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxd/board/{symbol_map[symbol]}/field/stockcode/order/asc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = ["序号", "股票代码", "股票简称", "涨跌幅", "换手率", "最新价", "前期低点", "前期低点日期"]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].str.strip("%")
big_df["换手率"] = big_df["换手率"].str.strip("%")
big_df["前期低点日期"] = pd.to_datetime(big_df["前期低点日期"]).dt.date
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["前期低点"] = pd.to_numeric(big_df["前期低点"])
return big_df
def stock_rank_lxsz_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-连续上涨
http://data.10jqka.com.cn/rank/lxsz/
:return: 连续上涨
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxsz/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"收盘价",
"最高价",
"最低价",
"连涨天数",
"连续涨跌幅",
"累计换手率",
"所属行业",
]
big_df["连续涨跌幅"] = big_df["连续涨跌幅"].str.strip("%")
big_df["累计换手率"] = big_df["累计换手率"].str.strip("%")
big_df["连续涨跌幅"] = pd.to_numeric(big_df["连续涨跌幅"])
big_df["累计换手率"] = pd.to_numeric(big_df["累计换手率"])
big_df["收盘价"] = pd.to_numeric(big_df["收盘价"])
big_df["最高价"] = pd.to_numeric(big_df["最高价"])
big_df["最低价"] = pd.to_numeric(big_df["最低价"])
big_df["连涨天数"] = pd.to_numeric(big_df["连涨天数"])
return big_df
def stock_rank_lxxd_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-连续下跌
http://data.10jqka.com.cn/rank/lxxd/
:return: 连续下跌
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxxd/field/lxts/order/desc/page/1/ajax/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/lxxd/field/lxts/order/desc/page/{page}/ajax/1/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"收盘价",
"最高价",
"最低价",
"连涨天数",
"连续涨跌幅",
"累计换手率",
"所属行业",
]
big_df["连续涨跌幅"] = big_df["连续涨跌幅"].str.strip("%")
big_df["累计换手率"] = big_df["累计换手率"].str.strip("%")
big_df["连续涨跌幅"] = pd.to_numeric(big_df["连续涨跌幅"])
big_df["累计换手率"] = pd.to_numeric(big_df["累计换手率"])
big_df["收盘价"] = pd.to_numeric(big_df["收盘价"])
big_df["最高价"] = pd.to_numeric(big_df["最高价"])
big_df["最低价"] = pd.to_numeric(big_df["最低价"])
big_df["连涨天数"] = pd.to_numeric(big_df["连涨天数"])
return big_df
def stock_rank_cxfl_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-持续放量
http://data.10jqka.com.cn/rank/cxfl/
:return: 持续放量
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxfl/field/count/order/desc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxfl/field/count/order/desc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"涨跌幅",
"最新价",
"成交量",
"基准日成交量",
"放量天数",
"阶段涨跌幅",
"所属行业",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["阶段涨跌幅"] = big_df["阶段涨跌幅"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["阶段涨跌幅"] = pd.to_numeric(big_df["阶段涨跌幅"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["放量天数"] = pd.to_numeric(big_df["放量天数"])
return big_df
def stock_rank_cxsl_ths() -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-持续缩量
http://data.10jqka.com.cn/rank/cxsl/
:return: 持续缩量
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxsl/field/count/order/desc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/cxsl/field/count/order/desc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"涨跌幅",
"最新价",
"成交量",
"基准日成交量",
"缩量天数",
"阶段涨跌幅",
"所属行业",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["阶段涨跌幅"] = big_df["阶段涨跌幅"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["阶段涨跌幅"] = pd.to_numeric(big_df["阶段涨跌幅"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
big_df["缩量天数"] = pd.to_numeric(big_df["缩量天数"])
return big_df
def stock_rank_xstp_ths(symbol: str = "500日均线") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-向上突破
http://data.10jqka.com.cn/rank/xstp/
:param symbol: choice of {"5日均线", "10日均线", "20日均线", "30日均线", "60日均线", "90日均线", "250日均线", "500日均线"}
:type symbol: str
:return: 向上突破
:rtype: pandas.DataFrame
"""
symbol_map = {
"5日均线": 5,
"10日均线": 10,
"20日均线": 20,
"30日均线": 30,
"60日均线": 60,
"90日均线": 90,
"250日均线": 250,
"500日均线": 500,
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xstp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xstp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text, converters={"股票代码": str})[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"序号",
"股票代码",
"股票简称",
"最新价",
"成交额",
"成交量",
"涨跌幅",
"换手率",
]
big_df["股票代码"] = big_df["股票代码"].astype(str).str.zfill(6)
big_df["涨跌幅"] = big_df["涨跌幅"].astype(str).str.strip("%")
big_df["换手率"] = big_df["换手率"].astype(str).str.strip("%")
big_df["涨跌幅"] = pd.to_numeric(big_df["涨跌幅"])
big_df["换手率"] = pd.to_numeric(big_df["换手率"])
big_df["最新价"] = pd.to_numeric(big_df["最新价"])
return big_df
def stock_rank_xxtp_ths(symbol: str = "500日均线") -> pd.DataFrame:
"""
同花顺-数据中心-技术选股-向下突破
http://data.10jqka.com.cn/rank/xxtp/
:param symbol: choice of {"5日均线", "10日均线", "20日均线", "30日均线", "60日均线", "90日均线", "250日均线", "500日均线"}
:type symbol: str
:return: 向下突破
:rtype: pandas.DataFrame
"""
symbol_map = {
"5日均线": 5,
"10日均线": 10,
"20日均线": 20,
"30日均线": 30,
"60日均线": 60,
"90日均线": 90,
"250日均线": 250,
"500日均线": 500,
}
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xxtp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/1/free/1/"
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
total_page = soup.find("span", attrs={"class": "page_info"}).text.split("/")[1]
except AttributeError as e:
total_page = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page) + 1), leave=False):
v_code = js_code.call("v")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"Cookie": f"v={v_code}",
}
url = f"http://data.10jqka.com.cn/rank/xxtp/board/{symbol_map[symbol]}/order/asc/ajax/1/free/1/page/{page}/free/1/"
r = requests.get(url, headers=headers)
temp_df =
|
pd.read_html(r.text, converters={"股票代码": str})
|
pandas.read_html
|
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
def process_song_file(cur, filepath):
"""
This procedure processes a song file whose filepath has been provided as an arugment.
It extracts the song information in order to store it into the songs table.
Then it extracts the artist information in order to store it into the artists table.
INPUTS:
* cur the cursor variable
* filepath the file path to the song file
"""
# open song file
df = pd.read_json(filepath, lines=True)
# insert song record
song_data = df[['song_id','title','artist_id','year','duration']].values[0].tolist()
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = df[['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']].values[0].tolist()
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
"""
This procedure processes the log files with the provided path, creating a pandas dataframe with the data and filtering by the 'page' = 'NextSong' attribute.
From that dataframe, the following tables are created: 'time', 'users', and 'songplays'.
INPUTS:
* cur the cursor variable
* filepath the file path to the song file
"""
# open log file
df = pd.read_json(filepath, lines=True)
# filter by NextSong action
df = df[df['page']=='NextSong']
# convert timestamp column to datetime
t = pd.to_datetime(df['ts'], unit='ms')
df['ts'] =
|
pd.to_datetime(df['ts'], unit='ms')
|
pandas.to_datetime
|
import dash_tabulator
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
from dash_extensions.javascript import Namespace
import pandas as pd
from faker import Faker
import random
fake = Faker()
external_scripts = ['https://oss.sheetjs.com/sheetjs/xlsx.full.min.js']
external_stylesheets = ['https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.1/css/all.min.css']
app = dash.Dash(__name__, external_scripts=external_scripts, external_stylesheets=external_stylesheets)
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
# The asset folder there is JS methods can be declared
# a reference can be passed using Namespace that then gets mapped client side
# see https://github.com/preftech/dash-tabulator/pull/11
# The namespace here must match the name space of the JavaScript asset.
# See https://github.com/pjaol/dash-tabulator-playground/blob/main/assets/playground.js
ns = Namespace("myNamespace", "tabulator")
columns = [
{ "title": "Name", "field": "name", "width": 150, "headerFilter":True, "editor":"input"},
{ "title": "Age", "field": "age", "hozAlign": "left", "formatter": "progress" },
{ "title": "Favourite Color", "field": "col", "headerFilter":True },
{ "title": "Date Of Birth", "field": "dob", "hozAlign": "center" },
{ "title": "Rating", "field": "rating", "hozAlign": "center", "formatter": "star" },
{ "title": "Passed?", "field": "passed", "hozAlign": "center", "formatter": "tickCross" }
]
# Options and Headers is where a lot of tabulator magic happens
options = {
# groupBy is one of the most popular tabulator functions
# You can do it by field, JS Functions etc. http://tabulator.info/docs/4.8/group
"groupBy": "col",
# selectable allows you to interact with the data
"selectable":"false",
# An example of passing a client side JS function using NameSpace
# See https://github.com/pjaol/dash-tabulator-playground/blob/main/assets/playground.js
#
# These callbacks have slightly modified signiture to the tabulator callbacks
# http://tabulator.info/docs/4.8/callbacks
# Where the table being referenced is added as the last argument to the callback
# eg.
# Tabulator's cellClick => function (e, cell) {}
# dash-tabulator cellClick => function (e, cell, table){}
'cellClick': ns('cellLog'),
"clipBoard" : "true",
# adding a row freeze / double click example
"rowDblClick" : ns("rowDblClick")
}
downloadButtonType = {"css": "btn btn-primary", "text":"Export", "type":"csv"}
clearFilterButtonType = {"css": "btn btn-outline-dark", "text":"Clear Filters"}
initialHeaderFilter = [{"field":"col", "value":"blue"}]
app.layout = html.Div([
dash_tabulator.DashTabulator(
id='tabulator',
options=options,
downloadButtonType=downloadButtonType,
clearFilterButtonType=clearFilterButtonType,
),
html.Div(id='output'),
dcc.Interval(
id='interval-component-iu',
interval=1*10, # in milliseconds
n_intervals=0,
max_intervals=0
)
])
@app.callback([ Output('tabulator', 'columns'),
Output('tabulator', 'data'),
Output('tabulator', 'initialHeaderFilter')],
[Input('interval-component-iu', 'n_intervals')])
def initialize(val):
# Here we're going to test what to do with Nones / nulls
# Nones will appear as null in excel exports, so it's easier to convert them to ""
# Using dataframes, it's possible to do client side, but will be faster with a dataframe
d = [[1, "<NAME>",None,None,None,None],[2,"<NAME>", 1, None, None, "foo"]]
for i in range(3, 100):
p = [i, fake.name(),random.randint(10, 80),fake.color_name() , fake.date_of_birth().strftime("%m/%d/%Y"), random.randint(0,5), "foo" ]
d.append(p)
df =
|
pd.DataFrame(d, columns=["id", "name", "age", "col","dob","rating","print" ])
|
pandas.DataFrame
|
from flask import Flask, render_template, redirect, url_for, request, send_file
import pandas as pd
import numpy as np
from pandas import DataFrame, read_csv
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.pyplot as plt
import jinja2
plt.style.use('ggplot')
from io import BytesIO
import seaborn as sns
app = Flask(__name__)
#df = pd.read_csv("movie_metadataproject.csv")
#df["budget"] = df["budget"].fillna(0)
#df["gross"] = df["gross"].fillna(0)
#df['Profit'] = df['gross'] - df['budget']
@app.route("/", methods=['GET', 'POST'])
def home():
return render_template("home.html")
@app.route("/input", methods = ['POST','GET'])
def input():
if request.method == 'POST':
moviename = request.form["moviename"]
directorname = request.form["dname"]
actor1 = request.form["a1name"]
actor2 = request.form["a2name"]
actor3 = request.form["a3name"]
genres = request.form.getlist("genre")
language = request.form.get("lang")
genred = concatenate_list(genres)
iparray = [language,directorname,actor1,actor2,actor3,moviename,genred,0,0,0]
df = pd.read_csv("movie_metadataproject.csv")
#print(df.shape)
df = pd.read_csv("movie_metadataproject.csv")
df["budget"] = df["budget"].fillna(0)
df["gross"] = df["gross"].fillna(0)
df['Profit'] = df['gross'] - df['budget']
df = df.drop(['aspect_ratio','movie_imdb_link','plot_keywords'],axis =1)
df = df.dropna()
#print(df.shape)
df= df[df['language'] != "Telugu"]
df= df[df['language'] != "Arabic"]
df= df[df['language'] != "Aramaic"]
df= df[df['language'] != "Bosnian"]
df= df[df['language'] != "Czech"]
df= df[df['language'] != "Dzongkha"]
df= df[df['language'] != "Filipino"]
df= df[df['language'] != "Hungarian"]
df= df[df['language'] != "Icelandic"]
df= df[df['language'] != "Kazakh"]
df= df[df['language'] != "Maya"]
df= df[df['language'] != "Mongolian"]
df= df[df['language'] != "None"]
df= df[df['language'] != "Romanian"]
df= df[df['language'] != "Russian"]
df= df[df['language'] != "Swedish"]
df= df[df['language'] != "Vietnamese"]
df= df[df['language'] != "Zulu"]
df_usefuldata = df[['language','director_name','actor_1_name','actor_2_name','actor_3_name','movie_title','genres','gross','budget','Profit']]
df_usefuldata = df_usefuldata.dropna()
df_appendedlang = df_usefuldata.append(pd.Series([iparray[0],iparray[1],iparray[2],iparray[3],iparray[4],iparray[5],iparray[6],iparray[7],iparray[8],iparray[9]], index=df_usefuldata.columns), ignore_index=True)
#print(df_appendedlang.shape)
df_appendedlang1 = df_appendedlang[df_appendedlang['language'] != 'None']
df_appendedlang1 = df_appendedlang1.dropna()
#print(df_appendedlang1.shape)
column_values1 = df_appendedlang1["language"].unique().tolist()
#print(column_values1)
column_values2 = df_appendedlang1["director_name"].unique().tolist()
df_appendedlang2 = df_appendedlang1
#df_appendedlang3 = df_appendedlang1
for value in column_values1:
df_appendedlang2 = pd.concat([df_appendedlang2,pd.get_dummies(value)], axis=1)
for value in column_values1:
df_appendedlang2[value] = 0
for value in column_values1:
df_appendedlang2.loc[df_appendedlang2['language'] == value,value] = 1
drop_cols = ['language','genres','movie_title','director_name','actor_1_name','actor_2_name','actor_3_name','gross','budget','Profit']
for dropCol in drop_cols:
df_appendedlang2 = df_appendedlang2.drop(dropCol,axis=1)
df_appendedlang2 = df_appendedlang2.dropna()
#print(df_appendedlang2)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 29)
kmeans = kmeans.fit(df_appendedlang2)
df_appendedlang2['cluster'] = kmeans.labels_
df_appendedlang3 = pd.concat([df_appendedlang1,df_appendedlang2], axis=1, ignore_index=False)
df_appendedlang3 = df_appendedlang3.dropna()
df_appendedlang3 = df_appendedlang3.loc[df_appendedlang3['language'] == iparray[0]]
df_appendedlang3 = df_appendedlang3.drop('cluster',axis=1)
for value in column_values1:
df_appendedlang3 = df_appendedlang3.drop(value,axis=1)
# print(df_appendedlang3.shape)
df_appendedlang4 = df_appendedlang3
for value in column_values2:
df_appendedlang4 = pd.concat([df_appendedlang4,pd.get_dummies(value)], axis=1)
for value in column_values2:
df_appendedlang4[value] = 0
for value in column_values2:
df_appendedlang4.loc[df_appendedlang4['director_name'] == value,value] = 1
for dropCol in drop_cols:
df_appendedlang4 = df_appendedlang4.drop(dropCol,axis=1)
# print(df_appendedlang4.shape)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 3)
kmeans = kmeans.fit(df_appendedlang4)
df_appendedlang4['cluster'] = kmeans.labels_
df_appendedlang5 = pd.concat([df_appendedlang3,df_appendedlang4], axis=1, ignore_index=False)
df_appendedlang5 = df_appendedlang5.dropna()
cnum = df_appendedlang5.loc[df_appendedlang5['director_name'] == iparray[1],'cluster']
print(cnum.size)
if cnum.size == 1:
cnumb = cnum.item()
else:
cnumb = cnum[0]
df_appendedlang5 = df_appendedlang5.loc[df_appendedlang5['cluster'] == cnumb]
df_appendedlang5 = df_appendedlang5.drop('cluster',axis=1)
for value in column_values2:
df_appendedlang5 = df_appendedlang5.drop(value,axis=1)
df_appendedlang6 = df_appendedlang5
column_values6 = df_appendedlang6["actor_1_name"].unique().tolist()
#column_values6
column_values7 = df_appendedlang6["actor_2_name"].unique().tolist()
column_values8 = df_appendedlang6["actor_3_name"].unique().tolist()
column678 = column_values6+column_values7+column_values8
unique_values678 = pd.unique(column678)
# for v in unique_values678:
# print(v)
for value in unique_values678:
df_appendedlang6 = pd.concat([df_appendedlang6,pd.get_dummies(value)], axis=1)
df_appendedlang6[value] = 0
df_appendedlang6.loc[df_appendedlang6['actor_1_name'] == value,value] = 1
df_appendedlang6.loc[df_appendedlang6['actor_2_name'] == value,value] = 1
df_appendedlang6.loc[df_appendedlang6['actor_3_name'] == value,value] = 1
drop_cols = ['language','director_name','genres','movie_title','gross','budget','Profit','actor_1_name','actor_2_name','actor_3_name']
for value in drop_cols:
df_appendedlang6 = df_appendedlang6.drop(value,axis=1)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 2)
kmeans = kmeans.fit(df_appendedlang6)
df_appendedlang6['cluster'] = kmeans.labels_
df_final = pd.concat([df_appendedlang5,df_appendedlang6], axis=1, ignore_index=False)
df_final = df_final.dropna()
min_budget = df_final['budget'].where(df_final['budget'].gt(0)).min(0)
print(df_final['budget'].unique().tolist())
print(min_budget)
max_budget = df_final['budget'].max()
print(max_budget)
print(df_final['Profit'].unique().tolist())
min_profit = df_final['Profit'].min()
print(min_profit)
max_profit = df_final['Profit'].max()
print(max_profit)
return redirect(url_for('result', min_budget= min_budget, max_budget= max_budget, min_profit= min_profit,max_profit=max_profit))
return render_template('input.html')
def concatenate_list(list):
result = ''
for elements in list:
if result == '':
result = elements
else:
result = result + "|" + elements
return result
#-----------------------------------------DATA VISUALIZATION--------------------------------------
def split(x):
df =
|
pd.read_csv("movie_metadataproject.csv")
|
pandas.read_csv
|
import sys
import os
import logging
import datetime
import pandas as pd
from job import Job, Trace
from policies import ShortestJobFirst, FirstInFirstOut, ShortestRemainingTimeFirst, QuasiShortestServiceFirst
sys.path.append('..')
def simulate_vc(trace, vc, placement, log_dir, policy, logger, start_ts, *args):
if policy == 'sjf':
scheduler = ShortestJobFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'fifo':
scheduler = FirstInFirstOut(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'srtf':
scheduler = ShortestRemainingTimeFirst(
trace, vc, placement, log_dir, logger, start_ts)
elif policy == 'qssf':
scheduler = QuasiShortestServiceFirst(
trace, vc, placement, log_dir, logger, start_ts, args[0])
scheduler.simulate()
logger.info(f'Finish {vc.vc_name}')
return True
def get_available_schedulers():
return ['fifo', 'sjf', 'srtf', 'qssf']
def get_available_placers():
return ['random', 'consolidate', 'consolidateFirst']
def trace_process(dir, date_range):
start = '2020-04-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['job_id', 'user', 'vc', 'jobname', 'gpu_num',
'cpu_num', 'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_philly_process(dir, date_range):
start = '2017-10-01 00:00:00'
df = pd.read_csv(dir+'/cluster_log.csv', parse_dates=['submit_time'], usecols=['user', 'vc', 'jobname', 'gpu_num',
'state', 'submit_time', 'duration'])
# Consider gpu jobs only
df = df[df['gpu_num'] > 0]
# VC filter
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vc_list = vc_dict.keys()
df = df[df['vc'].isin(vc_list)]
df = df[df['submit_time'] >= pd.Timestamp(start)]
df['submit_time'] = df['submit_time'].apply(
lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))
df['state'] = df['state'].replace('Pass', 'COMPLETED')
df['state'] = df['state'].replace('Failed', 'FAILED')
df['state'] = df['state'].replace('Killed', 'CANCELLED')
# Normalizing
df['submit_time'] = df['submit_time'] - df.iloc[0]['submit_time']
df['remain'] = df['duration']
df[['start_time', 'end_time']] = sys.maxsize
df[['ckpt_times', 'queue', 'jct']] = 0
df['status'] = None
# Slicing simulation part
begin = (pd.Timestamp(date_range[0])-pd.Timestamp(start)).total_seconds()
end = (pd.Timestamp(date_range[1])-pd.Timestamp(start)).total_seconds()
df = df[(df['submit_time'] >= begin) & (df['submit_time'] <= end)]
df.sort_values(by='submit_time', inplace=True)
df.reset_index(inplace=True, drop=True)
return df, begin
def trace_parser(df):
trace = Trace()
for _, series in df.iterrows():
trace.append_job(Job(series))
trace.sort_jobs('submit_time')
return trace
def logger_init(file):
logger = logging.getLogger()
handler_file = logging.FileHandler(f'{file}.log', 'w')
handler_stream = logging.StreamHandler() # sys.stdout
logger.setLevel(logging.INFO)
handler_file.setLevel(logging.INFO)
handler_stream.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s | %(processName)s | %(message)s', datefmt='%Y %b %d %H:%M:%S')
handler_file.setFormatter(formatter)
handler_stream.setFormatter(formatter)
logger.addHandler(handler_file)
logger.addHandler(handler_stream)
return logger
def cluster_concatenate(policy, placer, log_dir, dir):
prefix = f'{policy}_{placer}'
if not os.path.exists(log_dir+'/all'):
os.mkdir(log_dir+'/all')
vc_dict = pd.read_pickle(dir+'/vc_dict_homo.pkl')
vcs = list(vc_dict.keys())
'''Log'''
cluster_log = pd.DataFrame()
for vc in vcs:
vc_log =
|
pd.read_csv(f'{log_dir}/{vc}/{prefix}_{vc}_log.csv')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
def clean_df(df, col1, col2, look_for):
'''
INPUT:
df - the pandas dataframe you want to search
col1 - the column name you want to look through
col2 - the column you want to count values from
look_for - a list of strings you want to search for in each row of df[col]
OUTPUT:
new_df - a dataframe of each look_for with col2 values
'''
new_df = pd.DataFrame(columns=[col1,col2])
#loop through list of ed types
for val in look_for:
#loop through rows
for idx in range(df.shape[0]):
#if the ed type is in the row add 1
if df[col1][idx] == df[col1][idx] and df[col2][idx] == df[col2][idx]:
if val in df[col1][idx]:
x2 = df[col2][idx]
x1 = val
new_df = new_df.append(pd.DataFrame([[x1,x2]],columns=[col1,col2]))
# new_df = pd.DataFrame(pd.Series(new_df)).reset_index()
# new_df.columns = [col1, col2]
# new_df.sort_values('count', ascending=False, inplace=True)
return new_df
##Function to group "At least once each week" and "At least once each day" together are 1
def frequency(row):
if row['StackOverflowFoundAnswer'] == "At least once each week" or row['StackOverflowFoundAnswer'] == "At least once each day":
val = 1
else:
val = 0
return val
def total_count(df, col1, col2, look_for):
'''
INPUT:
df - the pandas dataframe you want to search
col1 - the column name you want to look through
col2 - the column you want to count values from
look_for - a list of strings you want to search for in each row of df[col]
OUTPUT:
new_df - a dataframe of each look_for with the count of how often it shows up
'''
new_df = defaultdict(int)
#loop through list of ed types
for val in look_for:
#loop through rows
for idx in range(df.shape[0]):
#if the ed type is in the row add 1
if val in df[col1][idx]:
new_df[val] += int(df[col2][idx])
new_df = pd.DataFrame(
|
pd.Series(new_df)
|
pandas.Series
|
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [NaT] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [NaT] * 5 + [Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join([re.sub(r"\s+", " ", x).strip() for x in lines[1:]])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
|
tm.assert_series_equal(recons["B"], biggie["B"])
|
pandas._testing.assert_series_equal
|
import pandas as pd
class Basic:
def get_shape(self,data):
return data.shape
def get_missing_value(self):
return self.data.isnull()
def get_count_missing_value(self):
return self.data.isnull().sum()
def get_percentage_missing_values(self, missing_value_count):
return missing_value_count/len(self.data)
# This Function is used to get column names
def get_columns_names(self, df):
column =
|
pd.DataFrame(df.columns)
|
pandas.DataFrame
|
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import altair_transform
from altair_transform.transform.aggregate import AGG_REPLACEMENTS
AGGREGATES = [
"argmax",
"argmin",
"average",
"count",
"distinct",
"max",
"mean",
"median",
"min",
"missing",
"q1",
"q3",
"ci0",
"ci1",
"stderr",
"stdev",
"stdevp",
"sum",
"valid",
"values",
"variance",
"variancep",
]
AGG_SKIP = ["ci0", "ci1", "values"] # These require scipy.
@pytest.fixture
def data():
rand = np.random.RandomState(42)
return pd.DataFrame(
{
"x": rand.randint(0, 100, 12),
"y": rand.randint(0, 100, 12),
"t": pd.date_range("2012-01-15", freq="M", periods=12),
"i": range(12),
"c": list("AAABBBCCCDDD"),
"d": list("ABCABCABCABC"),
}
)
def test_calculate_transform(data):
transform = {"calculate": "datum.x + datum.y", "as": "z"}
out1 = altair_transform.apply(data, transform)
out2 = data.copy()
out2["z"] = data.x + data.y
assert_frame_equal(out1, out2)
@pytest.mark.parametrize("groupby", [True, False])
@pytest.mark.parametrize("op", set(AGGREGATES) - set(AGG_SKIP))
def test_joinaggregate_transform(data, groupby, op):
field = "x"
col = "z"
group = "c"
transform = {"joinaggregate": [{"op": op, "field": field, "as": col}]}
if groupby:
transform["groupby"] = [group]
op = AGG_REPLACEMENTS.get(op, op)
out = altair_transform.apply(data, transform)
def validate(group):
return np.allclose(group[field].aggregate(op), group[col])
if groupby:
assert out.groupby(group).apply(validate).all()
else:
assert validate(out)
def test_quantile_values():
np.random.seed(0)
data = pd.DataFrame(
{"x": np.random.randn(12), "C": np.random.choice(["A", "B"], 12)}
)
transform = {"quantile": "x", "groupby": ["C"], "as": ["p", "v"], "step": 0.1}
# Copied from vega editor for above data/transform
expected = pd.DataFrame(
[
["A", 0.05, -0.853389779139604],
["A", 0.15, -0.6056135776659901],
["A", 0.25, -0.3578373761923762],
["A", 0.35, -0.12325942278589436],
["A", 0.45, 0.04532729028492671],
["A", 0.55, 0.21391400335574778],
["A", 0.65, 0.38250071642656897],
["A", 0.75, 0.7489619629456958],
["A", 0.85, 1.1549981161544833],
["A", 0.95, 1.5610342693632706],
["B", 0.05, -0.016677003759505288],
["B", 0.15, 0.15684925302119532],
["B", 0.25, 0.336128799065637],
["B", 0.35, 0.6476262524884882],
["B", 0.45, 0.9543858525126119],
["B", 0.55, 0.9744405491187167],
["B", 0.65, 1.2402825216772193],
["B", 0.75, 1.5575946277597235],
["B", 0.85, 1.8468937659906184],
["B", 0.95, 2.1102258760334363],
],
columns=["C", "p", "v"],
)
out = altair_transform.apply(data, transform)
|
assert_frame_equal(out, expected)
|
pandas.testing.assert_frame_equal
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# # Simulate dummy data
# Code to generate some dummy data that can be used for development and tests.
#
# **Important note**: No data files are committed to this repo. Any "data" used in this repo is entirely dummy data, i.e. it has been randomised and all names have been masked so they can be used for training purposes. This notebook is for training purposes only.
# + [markdown] _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# <!-- This table of contents is updated *manually* -->
# # Contents
# 1. [Setup](#Setup): Import packages, Configuration variables
# 1. [Typical workflow](#Typical-workflow)
# 1. [Manual specifications](#Manual-specifications)
# 1. [Automated generate function](#Automated-generate-function)
# 1. [Generated dummy data](#Generated-dummy-data)
# -
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
#
# # Setup
# Set warning messages
import warnings
# Show all warnings in IPython
warnings.filterwarnings('always')
# Ignore specific numpy warnings (as per <https://github.com/numpy/numpy/issues/11788#issuecomment-422846396>)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# +
# Import built-in modules
import sys
import platform
import os
from pathlib import Path
import io
# Import external modules
from IPython import __version__ as IPy_version
import numpy as np
import pandas as pd
# Import project modules
from pyprojroot import here
root_dir_path = here()
# Allow modules to be imported relative to the project root directory
if not sys.path[0] == root_dir_path:
sys.path.insert(0, str(root_dir_path))
import proj_config
# Check they have loaded and the versions are as expected
assert platform.python_version_tuple() == ('3', '6', '6')
print(f"Python version:\t\t\t{sys.version}")
assert IPy_version == '7.13.0'
print(f'IPython version:\t\t{IPy_version}')
assert np.__version__ == '1.18.2'
print(f'numpy version:\t\t\t{np.__version__}')
assert pd.__version__ == '0.25.3'
print(f'pandas version:\t\t\t{pd.__version__}')
# -
# Configuration variables
raw_data_folder_path = proj_config.example_data_dir_path
assert raw_data_folder_path.is_dir()
print("Correct: All locations are available as expected")
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
#
# # Typical workflow
# Define some minimal, dummy data that can be used for development and unit tests. Specifically, we define some `in_row_sers`, each of which is a Series representation of the row in the input data, i.e. it includes the *whole* row (not just the truncated version that will be read in). From this we can create:
# - The input data CSV, saved at a specified location.
# - The DataFrame `df_raw` that we expect will be the result of loading the input CSV (i.e. after truncation).
# - The expected DataFrame result of the conversion. We specify this in an ad hoc way, and use it to check the actual result of the conversion is as expected. There may be various outputs, depending on which of the `in_row_sers` are used and in what order.
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
#
# # Manual specifications
# ## Variables and utility functions
# We put these in a simple class, so they can be accessed with the same syntax that will be used once they are incorporated into the package.
# +
class premierconverter:
"""Class to hold constants that will actually form part of the package module"""
def __init__(self):
"""Define constants that are needed for this script"""
self.RAW_STRUCT = {
'stop_row_at': 'Total Peril Premium',
'stem': {
'ncols': 5,
'chosen_cols': [0, 1],
'col_names': ['Premier_Test_Status', 'Total_Premium'],
'col_types': [np.dtype('object'), np.dtype('float')],
},
'f_set': {
'ncols': 4,
'col_names': ['Peril_Factor', 'Relativity', 'Premium_increment', 'Premium_cumulative'],
'col_types': [np.dtype('object')] + [np.dtype('float')] * 3,
},
'bp_name': 'Base Premium',
}
self.TRUNC_AFTER_REGEX = r",\s*{}.*".format(self.RAW_STRUCT['stop_row_at'])
self.ROW_ID_NAME = "Ref_num"
self.OUTPUT_DEFAULTS = {
'pf_sep': ' ',
'file_delimiter': ','
}
# Get an object for use in the rest of the script
PCon = premierconverter()
# -
# Utility functions
def add_one_to_index(df):
"""Add 1 to the index values of a Series of DataFrame"""
df.index += 1
return df
# ## `in_row_sers`
# Typical raw rows as Series
# +
# Usual data rows
in_row_sers_01 = pd.Series([
'Ok', 96.95, np.nan, np.nan, 9,
'Peril1 Base Premium', 0.0, 91.95, 91.95,
'AnotherPrlBase Premium', 0.0, 5.17, 5.17,
'Peril1Factor1', 0.99818, -0.17, 91.78,
'Total Peril Premium', '[some more text]',
]).pipe(add_one_to_index)
in_row_sers_02 = pd.Series([
'Ok', 170.73, np.nan, np.nan, 11,
'AnotherPrlBase Premium', 0.0, 101.56, 101.56,
'AnotherPrlFactor1', 1.064887, 6.59, 108.15,
'Peril1 Base Premium', 0.0, 100.55, 100.55,
'AnotherPrlSomeFact', 0.648875, -37.97, 70.18,
'Total Peril Premium', 2, 'extra text and figures',
]).pipe(add_one_to_index)
in_row_sers_03 = pd.Series([
'Ok', 161.68, np.nan, np.nan, 5,
'Peril1NewFact', 0.999998, 0.0, 110.34,
'Peril1Factor1', 1.2, 18.39, 110.34,
np.nan, np.nan, np.nan, np.nan,
'AnotherPrlBase Premium', 0, 51.34, 51.34,
'Peril1 Base Premium', 0.0, 91.95, 91.95,
'Total Peril Premium', np.nan,
]).pipe(add_one_to_index)
# An error row
in_row_sers_error = pd.Series([
'Error: Some text, that indicates an error.', 0.0, np.nan, np.nan, 4,
]).pipe(add_one_to_index)
# A declined row
in_row_sers_declined = pd.Series([
'Declined', np.nan, np.nan, np.nan, 4,
'Some more text on a declined row', 'even, more. text', np.nan, 0, 0,
]).pipe(add_one_to_index)
# -
# ## Expected results from conversion
# +
# Set up and utilty function
df_expected_tests = dict()
def get_output_col_names(perils, factors):
"""Column names of the output data frame that contains `perils` and `factors`"""
return (
PCon.RAW_STRUCT['stem']['col_names'] +
[per + PCon.OUTPUT_DEFAULTS['pf_sep'] + fac
for per, fac in pd.MultiIndex.from_product(
[perils, [PCon.RAW_STRUCT['bp_name']] + factors]
)]
)
# +
# Output from 4 rows
df_expected_tests[4] = pd.DataFrame(
columns=get_output_col_names(
perils=['AnotherPrl', 'Peril1'],
factors=['Factor1', 'NewFact', 'SomeFact']
),
data=[
(in_row_sers_01[[1, 2, 5+4*2]].to_list() + [1.] * 3 +
in_row_sers_01[[5+4*1, 5+4*2+2]].to_list() + [1.] * 2),
(in_row_sers_02[[1, 2, 5+4*1, 5+4*1+2]].to_list() + [1.] +
in_row_sers_02[[5+4*3+2, 5+4*3]].to_list() + [1.] * 3),
in_row_sers_error[[1]].to_list() + [0.] * 9,
(in_row_sers_03[[1, 2, 5+4*4]].to_list() + [1.] * 3 +
in_row_sers_03[[5+4*5, 5+4*1+2, 5+2]].to_list() + [1.])
],
).pipe(add_one_to_index).rename_axis(index=PCon.ROW_ID_NAME)
# Output including the additional 5th 'declined' row
df_expected_tests[5] = df_expected_tests[4].append(
pd.Series({PCon.RAW_STRUCT['stem']['col_names'][0]: in_row_sers_declined[1]}, name=5)
).fillna(0.)
# Output from 2 rows
df_expected_tests[2] = pd.DataFrame(
columns=get_output_col_names(
perils=['AnotherPrl', 'Peril1'],
factors=['Factor1', 'SomeFact']
),
data=[
(in_row_sers_01[[1, 2, 5+4*2]].to_list() + [1.] * 2 +
in_row_sers_01[[5+4*1, 5+4*2+2]].to_list() + [1.]),
in_row_sers_02[[1, 2, 5+4*1, 5+4*1+2, 5+4*3+2, 5+4*3]].to_list() + [1.] * 2,
],
).pipe(add_one_to_index).rename_axis(index=PCon.ROW_ID_NAME)
# Output from 2 rows but including additional factor
df_expected_tests['2_all_facts'] = pd.DataFrame(
columns=get_output_col_names(
perils=['AnotherPrl', 'Peril1'],
factors=['Factor1', 'NewFact', 'SomeFact']
),
data=[
(in_row_sers_01[[1, 2, 5+4*2]].to_list() + [1.] * 3 +
in_row_sers_01[[5+4*1, 5+4*2+2]].to_list() + [1.] * 2),
(in_row_sers_02[[1, 2, 5+4*1, 5+4*1+2]].to_list() + [1.] +
in_row_sers_02[[5+4*3+2, 5+4*3]].to_list() + [1.] * 3),
],
).pipe(add_one_to_index).rename_axis(index=PCon.ROW_ID_NAME)
# -
# <div align="right" style="text-align: right"><a href="#Contents">Back to Contents</a></div>
#
# # Automated generate function
# ## Generate input CSVs
def simulate_row_str(row_id, in_row_sers):
"""
Convert an `in_row_sers` into a string that looks like an input file row.
row_id: The index column value that you want for the row.
"""
return(
str(row_id) + ',"' + in_row_sers[1] + '",' +
|
pd.DataFrame([in_row_sers[1:]])
|
pandas.DataFrame
|
#결측치에 관련 된 함수
#데이터프레임 결측값 처리
#pandas에서는 결측값: NaN, None
#NaN :데이터 베이스에선 문자
#None : 딥러닝에선 행
# import pandas as pd
# from pandas import DataFrame as df
# df_left = df({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# df_right = df({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# df_all=pd.merge(df_left,df_right,how='outer',on='key')
# print(df_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null 판별
# print(pd.isnull(df_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(df_all.isnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(pd.notnull(df_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(df_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # 특정 위치에 결측치 입력 : None ==> 결측치란 의미를 담고 있는 예약어
# df_all.ix[[0,1],['a','b']]=None
# print(df_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a열(string)=None, b열(float) = NaN
#
#
# print(df_all[['a','b']].isnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #각 열의 결측치의 갯수 확인
# print(df_all.isnull().sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # 단일 열의 결측치의 갯수
# print(df_all['a'].isnull().sum())
# # 4
#
# #각 열의 결측치가 아닌 데이터의 갯수 확인
# print(df_all.notnull().sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(df_all)
# # 각 행의 결측치의 합
# print(df_all.isnull().sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# df_all['NaN_cnt']=df_all.isnull().sum(1)
# df_all['NotNaN_cnt']=df_all.notnull().sum(1)
# print(df_all)
#
# #결측값 여부?isnull(), notnull()
# #열단위 결측값 개수 : df.isnull().sum()
# #행단위 결측값 개수 : df.isnull().sum(1)
#
# import numpy as np
#
# df=df(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(df)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# df.ix[['b','e'],['c1']]=None
# df.ix[['b','c'],['c2']]=None
# print(df)
#
# print(df.sum()) # sum() : NaN=>0으로 취급하여 계산
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(df['c1'].sum()) # 한 열 합계
# # 10.0
#
# print(df['c1'].cumsum()) # cumsum() : 누적합계
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(df.mean()) #열기준 평균 : (0+4+6)/3,NaN=>제외
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(df.mean(1)) #행기준 평균
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(df.std()) #열기준 표준편차
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #데이터프레임 컬럼간 연산 : NaN이 하나라도 있으면 NaN
# df['c3'] = df['c1']+df['c2']
# print(df)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import pandas as pd
import numpy as np
from pandas import DataFrame as df
from pandas import DataFrame
df=DataFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
df2=DataFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
df['c3'] = df['c1']+df['c2']
print(df)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(df2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(df+df2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
df = DataFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(df)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
df.ix[0,0]=None
df.ix[1,['c1','c3']]=np.nan
df.ix[2,'c2']=np.nan
df.ix[3,'c2']=np.nan
df.ix[4,'c3']=np.nan
print(df)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
df_0=df.fillna(0)
print(df_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
df_missing =
|
df.fillna('missing')
|
pandas.DataFrame.fillna
|
#!python
#--coding:utf-8 --
"""
getIntDensity.py
Get the interaction density for a region.
"""
__date__ = "2019-10-08"
__modified__ = ""
__email__ = "<EMAIL>"
#general library
import os
import sys
import json
import argparse
from glob import glob
from datetime import datetime
from argparse import RawTextHelpFormatter
#3rd library
import numpy as np
import pandas as pd
from tqdm import tqdm
from joblib import Parallel, delayed
from scipy.stats import hypergeom, binom, poisson
#cLoops2
from cLoops2.ds import XY
from cLoops2.io import parseTxt2Loops, ixy2pet
from cLoops2.callCisLoops import getPerRegions, estAnchorSig
from cLoops2.settings import *
def help():
"""
Create the command line interface for the script of getAggLoopsPlot.py.
"""
description = """
Get the interaction density for regions.
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=RawTextHelpFormatter)
parser.add_argument("-d",
dest="predir",
required=True,
type=str,
help="Directory for cLoops2 pre generated.")
parser.add_argument(
"-b",
dest="fbed",
required=True,
type=str,
help=
"The .bed file which contains regions to get the interaction density.")
parser.add_argument("-o",
dest="output",
required=True,
type=str,
help="Output prefix.")
parser.add_argument(
"-pcut",
dest="pcut",
type=int,
default=0,
help=
"Distance cutoff for PETs to filter, default is 0. Can be set as the estimated self-ligation distance cutoff."
)
parser.add_argument('-p',
dest="cpu",
required=False,
default=1,
type=int,
help="Number of CPUs to run the job, default is 1.")
op = parser.parse_args()
return op
def quantifyRegions(key, rs, fixy, pcut=0, pseudo=1):
"""
@param key: str, such as chr21-chr21
@param loops: list of Loop object
@param fixy: cLoops2 pre generated .ixy file
"""
print("%s\t quantify interaction density of %s regions in %s." %
(datetime.now(), len(rs), key))
xy = ixy2pet(fixy, cut=pcut)
N = xy.number
ds = {}
for r in tqdm(rs):
local = xy.queryPeakBoth(int(r[1]), int(r[2]))
a = xy.queryPeak(int(r[1]), int(r[2]))
distal = a.difference(local)
ds["|".join(r)] = {
"chrom":
r[0],
"start":
r[1],
"end":
r[2],
"name":
r[3],
"allPETs":
len(local) * 2 + len(distal),
"localPETs":
len(local) * 2,
"distalPETs":
len(distal),
"allRPKM": (len(local) * 2 + len(distal)) /
(int(r[2]) - int(r[1])) / N / 2 * 10**9,
"localRPKM":
len(local) * 2 / (int(r[2]) - int(r[1])) / N / 2 * 10**9,
"distalRPKM":
len(distal) * 2 / (int(r[2]) - int(r[1])) / N / 2 * 10**9,
}
return ds
def parseBed(f):
regions = {}
for line in open(f):
line = line.split("\n")[0].split("\t")
key = line[0] + "-" + line[0]
if key not in regions:
regions[key] = []
regions[key].append(line)
return regions
def main():
op = help()
regions = parseBed(op.fbed)
metaf = op.predir + "/petMeta.json"
meta = json.loads(open(metaf).read())
keys = list(meta["data"]["cis"].keys())
keys = list(set(keys).intersection(set(regions.keys())))
ds = Parallel(n_jobs=op.cpu,backend="multiprocessing")(delayed(quantifyRegions)(
key,
regions[key],
meta["data"]["cis"][key]["ixy"],
pcut=op.pcut,
) for key in keys)
data = {}
for d in ds:
for k, v in d.items():
data[k] = v
data =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
with tm.assertRaises(ValueError):
s.interpolate(method='spline', order=0)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
self.assertEqual(result.filename, 'fname1.csv|fname2.csv')
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
self.assertEqual(result.filename, 'foo+foo')
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_tz_convert_and_localize(self):
l0 = date_range('20140701', periods=5, freq='D')
# TODO: l1 should be a PeriodIndex for testing
# after GH2106 is addressed
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_convert('UTC')
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_localize('UTC')
# l1 = period_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
for fn in ['tz_localize', 'tz_convert']:
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
self.assertTrue(df1.index.equals(l0_expected))
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
self.assertFalse(df3.index.levels[0].equals(l0))
self.assertTrue(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1))
self.assertFalse(df3.index.levels[1].equals(l1_expected))
df3 = getattr(df2, fn)('US/Pacific', level=1)
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
# Bad Inputs
for fn in ['tz_localize', 'tz_convert']:
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with tm.assertRaisesRegexp(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x': [1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert_equal(df.y, 5)
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
mask = pnl.isnull()
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
expected[mask] = np.nan
result = pnl.pct_change(axis=axis, fill_method='pad')
self.assert_frame_equal(result, expected)
class TestPanel(tm.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y)
class TestNDFrame(tm.TestCase):
# tests that don't fit elsewhere
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.squeeze(), p)
for p4d in [tm.makePanel4D()]:
tm.assert_panel4d_equal(p4d.squeeze(), p4d)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(), p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
tm.assert_series_equal(p.squeeze(), p.ix['ItemA', :, 'A'])
p4d = tm.makePanel4D().reindex(labels=['label1'])
tm.assert_panel_equal(p4d.squeeze(), p4d['label1'])
p4d =
|
tm.makePanel4D()
|
pandas.util.testing.makePanel4D
|
# -*- coding: utf-8 -*-
"""Data Presentation Script.
Utilizes the pandas library -
See https://pandas.pydata.org/pandas-docs/stable/api.html
Utilizes the matplotlib library -
See https://matplotlib.org/
Created by: <NAME>
Created on: 08/02/2018
Last Updated: 02/27/2019
"""
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PyQt5 import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import \
NavigationToolbar2QT as NavigationToolbar
class ScrollableWindow(QtWidgets.QMainWindow):
"""Found online, not my own creation. Thank you to whoever wrote this!
"""
def __init__(self, fig):
self.qapp = QtWidgets.QApplication([])
QtWidgets.QMainWindow.__init__(self)
self.widget = QtWidgets.QWidget()
self.setCentralWidget(self.widget)
self.widget.setLayout(QtWidgets.QVBoxLayout())
self.widget.layout().setContentsMargins(0, 0, 0, 0)
self.widget.layout().setSpacing(0)
self.fig = fig
self.canvas = FigureCanvas(self.fig)
self.canvas.draw()
self.scroll = QtWidgets.QScrollArea(self.widget)
self.scroll.setWidget(self.canvas)
self.nav = NavigationToolbar(self.canvas, self.widget)
self.widget.layout().addWidget(self.nav)
self.widget.layout().addWidget(self.scroll)
self.show()
exit(self.qapp.exec_())
def multiple_plots_scrollable(groups, nplots=5, layout=None):
"""Plots groups.
:param groups:
:param nplots:
:param layout:
:return:
"""
cids = iter(list(groups.groups.keys())) # Iterable list of Group keys
fig, axes = plt.subplots(nrows=nplots,
ncols=1,
figsize=(16, nplots * 5),
gridspec_kw=layout)
for ax in axes:
cid = next(cids)
group = groups.get_group(cid)
# deltas = group.index.to_series().diff().dt.days.values[1:]
ax.set_title(f'Character ID: {cid}')
ax.plot(group.index, group.kd_ratio)
fig.text(x=0.5, y=0.01, s='Killmail Date (year-month)', ha='center',
fontsize=14)
fig.text(x=0.015, y=0.5, s='Kill/Death Ratio',
va='center', rotation='vertical', fontsize=14)
fig.suptitle('Investment and Performance Series Using Combat-Based Frigate',
fontsize=16, y=0.99)
a = ScrollableWindow(fig) # Pass the figure to the custom window
def create_poster_graphic(groups, cids, layout=None):
"""Poster graphic for BI&A Conference, Fall 2018, Stevens Inst. of Tech.
:param groups:
:param cids:
:param layout:
:return:
"""
from matplotlib.font_manager import FontProperties
def plot(groups, cid, layout=None):
"""TODO
:param groups:
:param cid:
:param layout:
:return:
"""
# Create Unique Fonts for Title, Labels, and Legend
title_font = FontProperties(family='Arial', style='italic',
variant='small-caps', weight='roman',
size=42)
xlabel_font = FontProperties(family='Arial', weight='demi', size=32)
ylabel_font = FontProperties(family='Arial', weight='demi', size=32)
legend_font = FontProperties(family='Arial', weight='medium', size=24)
# Get data
df = groups.get_group(cid)[['kd_ratio', 'hi_slot', 'mid_slot',
'lo_slot', 'od_prct']]
# Plot each axis
ax1 = df['kd_ratio'].plot(c='#00bbff', ls='--', lw=3, marker='.',
mfc='#0033ff', ms=10, mew=0)
ax2 = ax1.twinx()
# Only fill if the next y value is the same sign as the current y value
# mask = (abs(df['od_prct'] + df['od_prct'].shift(-1)) ==
# abs(df['od_prct']) + abs(df['od_prct'].shift(-1)))
ax2.fill_between(df.index, 0, df['od_prct'], where=None,
step='post', alpha=0.5, color='#00c900')
# ax3 = ax1.twinx()
# df[['slot_hi', 'slot_mid', 'slot_lo']].plot(ax=ax3)
# Adjust axes spines to include tertiary plot
# ax2.spines['right'].set_position(('axes', 1.0))
# ax3.spines['right'].set_position(('axes', 1.1))
# Add a legend and a grid
lns1 = ax1.get_lines()
lns2 = mpatches.Patch(color='#00c900', alpha=0.5)
# lns3 = ax3.get_lines()
hndls = lns1
hndls.append(lns2)
# hndls.extend(lns3)
lbls = ['K/D Ratio', 'Offensive Investment']
# lbls.extend([l.get_label() for l in lns3])
ax1.legend(handles=hndls, labels=lbls, loc=4, framealpha=0.7,
prop=legend_font
)
ax1.grid(True)
# Style the axes
ax1.set_title(f'CHARACTER ID: {cid}', pad=30,
font_properties=title_font
)
ax1.set_xlabel('Date (Year-Month)', labelpad=30,
font_properties=xlabel_font
)
ax1.set_ylabel('Kill-to-Death Ratio (Kills/Deaths)', labelpad=30,
font_properties=ylabel_font
)
ax1.tick_params(length=8, width=2, labelsize=18, grid_color='#00bbff',
grid_alpha=0.3, grid_linestyle=':'
)
ax2.set_ylabel('% Investment in Offense', labelpad=30,
font_properties=ylabel_font
)
ax2.set_yticklabels([f'{x/100:.0%}' for x in ax2.get_yticks()],
fontsize=18)
ax2.tick_params(length=8, width=2, labelsize=18)
# ax3.set_ylabel('Slot Value (ISK)', fontsize=16)
# Set the figure size
ax1.figure.set_size_inches(15, 15)
if layout is not None:
plt.subplots_adjust(left=layout['left'],
bottom=layout['bottom'],
right=layout['right'],
top=layout['top'],
wspace=layout['wspace'],
hspace=layout['hspace'])
plt.savefig(f'../docs/graphs/player_strategy_graphs/cid-{cid}.png')
if type(cids) is np.ndarray:
for cid in cids:
plot(groups, cid, layout)
plt.clf()
else:
plot(groups, cids, layout)
def plot_investment_area_performance(groups, cids, layout=None):
"""Plots to examine relationship b/w investment and performance.
:param groups:
:param cids:
:param layout:
:return:
"""
from matplotlib.font_manager import FontProperties
title_font = FontProperties(family='Arial',
variant='small-caps', weight='roman',
size=54)
xlabel_font = FontProperties(family='Arial', weight='demi', size=40)
ylabel_font = FontProperties(family='Arial', weight='demi', size=40)
legend_font = FontProperties(family='Arial', weight='medium', size=24)
gp = groups.get_group(cid)[['kd_ratio', 'kd_prct', 'hi_slot', 'mid_slot',
'lo_slot']]
hi, mid, lo = gp.hi_slot, gp.mid_slot, gp.lo_slot
total = hi + mid + lo
hi_slot = (hi / total) * 100
mid_slot = (mid / total) * 100
lo_slot = (lo / total) * 100
percentages = pd.concat([hi_slot, mid_slot, lo_slot], axis=1)
ax1 = percentages.plot(kind='area')
# ax1 = gp[['hi_slot', 'mid_slot', 'lo_slot']].plot(kind='area')
ax2 = ax1.twinx()
ax2.plot(gp.index, gp.kd_ratio, c='white', linewidth=4)
colors = [line.get_color() for line in ax1.get_lines()]
lns1 = [mpatches.Patch(color=color, alpha=0.5) for color in colors]
lns1.reverse()
lns2 = mpatches.Patch(color='#ffffff', alpha=0.5)
hndls = lns1
hndls.append(lns2)
lbls = ['High Slot', 'Mid Slot', 'Low Slot', 'Performance (K/D)']
ax1.legend(handles=hndls, labels=lbls, loc=4, framealpha=0.7,
prop=legend_font
)
# Style the axes
ax1.set_title(f'Comparing Trajectory and Success of Sample Player', pad=30,
font_properties=title_font
)
ax1.set_xlabel('Datetime', labelpad=30,
font_properties=xlabel_font
)
ax1.set_ylabel('Percent of Total Investment', labelpad=30,
font_properties=ylabel_font
)
ax1.tick_params(length=8, width=2, labelsize=18, grid_color='#00bbff',
grid_alpha=0.3, grid_linestyle=':'
)
ax1.set_yticklabels([f'{x/100:.0%}' for x in ax1.get_yticks()],
fontsize=18)
ax1.margins(0)
ax2.set_ylabel('Performance (K/D Ratio)', labelpad=30,
font_properties=ylabel_font
)
ax2.tick_params(length=8, width=2, labelsize=18)
ax1.figure.set_size_inches(19.2, 10.8)
if layout is not None:
plt.subplots_adjust(left=layout['left'],
bottom=layout['bottom'],
right=layout['right'],
top=layout['top'],
wspace=layout['wspace'],
hspace=layout['hspace'])
plt.savefig(f'../visuals/graphs/filled_cid{cid}.png')
def plot_correlation_distributions(dfc: pd.DataFrame, sharey=True, layout=None):
"""Frequency plot of r values for BI&A Conference, Fall 2018, Stevens Inst.
of Tech.
:param dfc:
:param sharey:
:param layout:
:return:
"""
from matplotlib.font_manager import FontProperties
title_font = FontProperties(family='Arial', style='italic',
variant='small-caps', weight='roman',
size=54)
xlabel_font = FontProperties(family='Arial', weight='demi', size=40)
ylabel_font = FontProperties(family='Arial', weight='demi', size=40)
fig, axes = plt.subplots(nrows=1,
ncols=2,
sharey=sharey,
figsize=(32, 16),
gridspec_kw=layout)
columns = iter(['kd_od_ratio_corr', 'kd_od_prct_corr'])
measures = iter(['Ratios', 'Percentage'])
for ax in axes:
col = next(columns)
measure = next(measures)
dfc[col].hist(ax=ax, alpha=0.9, color='blue', bins=50)
# Style the axes
ax.grid(False)
ax.set_title(f'Correlations Between {measure}', pad=30,
font_properties=title_font
)
ax.set_xlabel('Pearson Correlation Coefficient (r)', labelpad=30,
font_properties=xlabel_font
)
ax.set_ylabel('Frequency', labelpad=30,
font_properties=ylabel_font
)
ax.tick_params(length=8, width=2, labelsize=18, grid_color='#00bbff',
grid_alpha=0.3, grid_linestyle=':'
)
a = ScrollableWindow(fig) # Pass the figure to the custom window
def plot_tseries(groups: pd.DataFrame, cid, layout=None):
"""Plots 3x3 graphic of 8 series specific to a single character id (cid).
:param groups:
:param layout:
:return:
"""
group = groups.get_group(cid)
fig, axes = plt.subplots(nrows=3, ncols=3,
figsize=(18, 20),
gridspec_kw=layout)
group.kd_ratio.plot(ax=axes[0][0], title='KD_RATIO')
group.kd_prct.plot(ax=axes[1][0], title='KD_PRCT')
group.kd_diff.plot(ax=axes[2][0], title='KD_DIFF')
group.hi_slot.plot(ax=axes[0][1], title='HI_SLOT')
group.mid_slot.plot(ax=axes[1][1], title='MID_SLOT')
group.lo_slot.plot(ax=axes[2][1], title='LO_SLOT')
group[['hi_slot', 'mid_slot', 'lo_slot']].plot(ax=axes[0][2],
title='ALL_SLOTS')
group.od_ratio.plot(ax=axes[1][2], title='OD_RATIO')
group.od_prct.plot(ax=axes[2][2], title='OD_PRCT')
a = ScrollableWindow(fig)
# ============================================================================ #
# Use the Command Line or a Terminal to do basic pre-filtering!
dfpd =
|
pd.read_csv('../data/Series/players_frig_actv_ts-prd.csv', header=0)
|
pandas.read_csv
|
#!/usr/bin/env python
import requests
import os
import string
import random
import json
import datetime
import pandas as pd
import numpy as np
import moment
from operator import itemgetter
class IdsrAppServer:
def __init__(self):
self.dataStore = "ugxzr_idsr_app"
self.period = "LAST_7_DAYS"
self.ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.ID_LENGTH = 11
self.today = moment.now().format('YYYY-MM-DD')
print("Epidemic/Outbreak Detection script started on %s" %self.today)
self.path = os.path.abspath(os.path.dirname(__file__))
newPath = self.path.split('/')
newPath.pop(-1)
newPath.pop(-1)
self.fileDirectory = '/'.join(newPath)
self.url = ""
self.username = ''
self.password = ''
# programs
self.programUid = ''
self.outbreakProgram = ''
# TE Attributes
self.dateOfOnsetUid = ''
self.conditionOrDiseaseUid = ''
self.patientStatusOutcome = ''
self.regPatientStatusOutcome = ''
self.caseClassification = ''
self.testResult=''
self.testResultClassification=''
self.epidemics = {}
self.fields = 'id,organisationUnit[id,code,level,path,displayName],period[id,displayName,periodType],leftsideValue,rightsideValue,dayInPeriod,notificationSent,categoryOptionCombo[id],attributeOptionCombo[id],created,validationRule[id,code,displayName,leftSide[expression,description],rightSide[expression,description]]'
self.eventEndPoint = 'analytics/events/query/'
# Get Authentication details
def getAuth(self):
with open(os.path.join(self.fileDirectory,'.idsr.json'),'r') as jsonfile:
auth = json.load(jsonfile)
return auth
def getIsoWeek(self,d):
ddate = datetime.datetime.strptime(d,'%Y-%m-%d')
return datetime.datetime.strftime(ddate, '%YW%W')
def formatIsoDate(self,d):
return moment.date(d).format('YYYY-MM-DD')
def getDateDifference(self,d1,d2):
if d1 and d2 :
delta = moment.date(d1) - moment.date(d2)
return delta.days
else:
return ""
def addDays(self,d1,days):
if d1:
newDay = moment.date(d1).add(days=days)
return newDay.format('YYYY-MM-DD')
else:
return ""
# create aggregate threshold period
# @param n number of years
# @param m number of periods
# @param type seasonal (SEASONAL) or Non-seasonal (NON_SEASONAL) or case based (CASE_BASED)
def createAggThresholdPeriod(self,m,n,type):
periods = []
currentDate = moment.now().format('YYYY-MM-DD')
currentYear = self.getIsoWeek(currentDate)
if(type == 'SEASONAL'):
for year in range(0,n,1):
currentYDate = moment.date(currentDate).subtract(months=((year +1)*12)).format('YYYY-MM-DD')
for week in range(0,m,1):
currentWDate = moment.date(currentYDate).subtract(weeks=week).format('YYYY-MM-DD')
pe = self.getIsoWeek(currentWDate)
periods.append(pe)
elif(type == 'NON_SEASONAL'):
for week in range(0,(m+1),1):
currentWDate = moment.date(currentDate).subtract(weeks=week).format('YYYY-MM-DD')
pe = self.getIsoWeek(currentWDate)
periods.append(pe)
else:
pe = 'LAST_7_DAYS'
periods.append(pe)
return periods
def getHttpData(self,url,fields,username,password,params):
url = url+fields+".json"
data = requests.get(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
def getHttpDataWithId(self,url,fields,idx,username,password,params):
url = url + fields + "/"+ idx + ".json"
data = requests.get(url, auth=(username, password),params=params)
if(data.status_code == 200):
return data.json()
else:
return 'HTTP_ERROR'
# Post data
def postJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data)
return submittedData
# Post data with parameters
def postJsonDataWithParams(self,url,endPoint,username,password,data,params):
url = url+endPoint
submittedData = requests.post(url, auth=(username, password),json=data,params=params)
return submittedData
# Update data
def updateJsonData(self,url,endPoint,username,password,data):
url = url+endPoint
submittedData = requests.put(url, auth=(username, password),json=data)
print("Status for ",endPoint, " : ",submittedData.status_code)
return submittedData
# Get array from Object Array
def getArrayFromObject(self,arrayObject):
arrayObj = []
for obj in arrayObject:
arrayObj.append(obj['id'])
return arrayObj
# Check datastore existance
def checkDataStore(self,url,fields,username,password,params):
url = url+fields+".json"
storesValues = {"exists": "false", "stores": []}
httpData = requests.get(url, auth=(username, password),params=params)
if(httpData.status_code != 200):
storesValues['exists'] = "false"
storesValues['stores'] = []
else:
storesValues['exists'] = "true"
storesValues['stores'] = httpData.json()
return storesValues
# Get orgUnit
def getOrgUnit(self,detectionOu,ous):
ou = []
if((ous !='undefined') and len(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors']
else:
return ou
# Get orgUnit value
# @param type = { id,name,code}
def getOrgUnitValue(self,detectionOu,ous,level,type):
ou = []
if((ous !='undefined') and len(ous) > 0):
for oux in ous:
if(oux['id'] == detectionOu):
return oux['ancestors'][level][type]
else:
return ou
# Generate code
def generateCode(self,row=None,column=None,prefix='',sep=''):
size = self.ID_LENGTH
chars = string.ascii_uppercase + string.digits
code = ''.join(random.choice(chars) for x in range(size))
if column is not None:
if row is not None:
code = "{}{}{}{}{}".format(prefix,sep,row[column],sep,code)
else:
code = "{}{}{}{}{}".format(prefix,sep,column,sep,code)
else:
code = "{}{}{}".format(prefix,sep,code)
return code
def createMessage(self,outbreak=None,usergroups=[],type='EPIDEMIC'):
message = []
organisationUnits = []
if usergroups is None:
users = []
if usergroups is not None:
users = usergroups
subject = ""
text = ""
if type == 'EPIDEMIC':
subject = outbreak['disease'] + " outbreak in " + outbreak['orgUnitName']
text = "Dear all," + type.lower() + " threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
elif type == 'ALERT':
subject = outbreak['disease'] + " alert"
text = "Dear all, Alert threshold for " + outbreak['disease'] + " is reached at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " on " + self.today
else:
subject = outbreak['disease'] + " reminder"
text = "Dear all," + outbreak['disease'] + " outbreak at " + outbreak['orgUnitName'] + " of " + outbreak['reportingOrgUnitName'] + " is closing in 7 days"
organisationUnits.append({"id": outbreak['orgUnit']})
organisationUnits.append({"id": outbreak['reportingOrgUnit']})
message.append(subject)
message.append(text)
message.append(users)
message.append(organisationUnits)
message = tuple(message)
return pd.Series(message)
def sendSmsAndEmailMessage(self,message):
messageEndPoint = "messageConversations"
sentMessages = self.postJsonData(self.url,messageEndPoint,self.username,self.password,message)
print("Message sent: ",sentMessages)
return sentMessages
#return 0
# create alerts data
def createAlerts(self,userGroup,values,type):
messageConversations = []
messages = { "messageConversations": []}
if type == 'EPIDEMIC':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'ALERT':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
elif type == 'REMINDER':
for val in values:
messageConversations.append(self.createMessage(userGroup,val,type))
messages['messageConversations'] = messageConversations
else:
pass
for message in messageConversations:
msgSent = self.sendSmsAndEmailMessage(message)
print("Message Sent status",msgSent)
return messages
# create columns from event data
def createColumns(self,headers,type):
cols = []
for header in headers:
if(type == 'EVENT'):
if header['name'] == self.dateOfOnsetUid:
cols.append('onSetDate')
elif header['name'] == self.conditionOrDiseaseUid:
cols.append('disease')
elif header['name'] == self.regPatientStatusOutcome:
cols.append('immediateOutcome')
elif header['name'] == self.patientStatusOutcome:
cols.append('statusOutcome')
elif header['name'] == self.testResult:
cols.append('testResult')
elif header['name'] == self.testResultClassification:
cols.append('testResultClassification')
elif header['name'] == self.caseClassification:
cols.append('caseClassification')
else:
cols.append(header['name'])
elif (type == 'DATES'):
cols.append(header['name'])
else:
cols.append(header['column'])
return cols
# Get start and end date
def getStartEndDates(self,year, week):
d = moment.date(year,1,1).date
if(d.weekday() <= 3):
d = d - datetime.timedelta(d.weekday())
else:
d = d + datetime.timedelta(7-d.weekday())
dlt = datetime.timedelta(days = (week-1)*7)
return [d + dlt, d + dlt + datetime.timedelta(days=6)]
# create Panda Data Frame from event data
def createDataFrame(self,events,type=None):
if type is None:
if events is not None:
#pd.DataFrame.from_records(events)
dataFrame = pd.io.json.json_normalize(events)
else:
dataFrame = pd.DataFrame()
else:
cols = self.createColumns(events['headers'],type)
dataFrame = pd.DataFrame.from_records(events['rows'],columns=cols)
return dataFrame
# Detect using aggregated indicators
# Confirmed, Deaths,Suspected
def detectOnAggregateIndicators(self,aggData,diseaseMeta,epidemics,ou,periods,mPeriods,nPeriods):
dhis2Events = pd.DataFrame()
detectionLevel = int(diseaseMeta['detectionLevel'])
reportingLevel = int(diseaseMeta['reportingLevel'])
m=mPeriods
n=nPeriods
if(aggData != 'HTTP_ERROR'):
if((aggData != 'undefined') and (aggData['rows'] != 'undefined') and len(aggData['rows']) >0):
df = self.createDataFrame(aggData,'AGGREGATE')
dfColLength = len(df.columns)
df1 = df.iloc[:,(detectionLevel+4):dfColLength]
df.iloc[:,(detectionLevel+4):dfColLength] = df1.apply(pd.to_numeric,errors='coerce').fillna(0).astype(np.int64)
# print(df.iloc[:,(detectionLevel+4):(detectionLevel+4+m)]) # cases, deaths
### Make generic functions for math
if diseaseMeta['epiAlgorithm'] == "NON_SEASONAL":
# No need to do mean for current cases or deaths
df['mean_current_cases'] = df.iloc[:,(detectionLevel+4)]
df['mean_mn_cases'] = df.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].mean(axis=1)
df['stddev_mn_cases'] = df.iloc[:,(detectionLevel+5):(detectionLevel+4+m)].std(axis=1)
df['mean20std_mn_cases'] = (df.mean_mn_cases + (2*df.stddev_mn_cases))
df['mean15std_mn_cases'] = (df.mean_mn_cases + (1.5*df.stddev_mn_cases))
df['mean_current_deaths'] = df.iloc[:,(detectionLevel+5+m)]
df['mean_mn_deaths'] = df.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].mean(axis=1)
df['stddev_mn_deaths'] = df.iloc[:,(detectionLevel+6+m):(detectionLevel+6+(2*m))].std(axis=1)
df['mean20std_mn_deaths'] = (df.mean_mn_deaths + (2*df.stddev_mn_deaths))
df['mean15std_mn_deaths'] = (df.mean_mn_deaths + (1.5*df.stddev_mn_deaths))
# periods
df['period']= periods[0]
startOfMidPeriod = periods[0].split('W')
startEndDates = self.getStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
df['dateOfOnSetWeek'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
df['firstCaseDate'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
df['lastCaseDate'] = moment.date(startEndDates[1]).format('YYYY-MM-DD')
df['endDate'] = ""
df['closeDate'] = moment.date(startEndDates[1]).add(days=int(diseaseMeta['incubationDays'])).format('YYYY-MM-DD')
if diseaseMeta['epiAlgorithm'] == "SEASONAL":
df['mean_current_cases'] = df.iloc[:,(detectionLevel+4):(detectionLevel+3+m)].mean(axis=1)
df['mean_mn_cases'] = df.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].mean(axis=1)
df['stddev_mn_cases'] = df.iloc[:,(detectionLevel+3+m):(detectionLevel+3+m+(m*n))].std(axis=1)
df['mean20std_mn_cases'] = (df.mean_mn_cases + (2*df.stddev_mn_cases))
df['mean15std_mn_cases'] = (df.mean_mn_cases + (1.5*df.stddev_mn_cases))
df['mean_current_deaths'] = df.iloc[:,(detectionLevel+3+m+(m*n)):(detectionLevel+3+(2*m)+(m*n))].mean(axis=1)
df['mean_mn_deaths'] = df.iloc[:,(detectionLevel+3+(2*m)+(m*n)):dfColLength-1].mean(axis=1)
df['stddev_mn_deaths'] = df.iloc[:,(detectionLevel+3+(2*m)+(m*n)):dfColLength-1].std(axis=1)
df['mean20std_mn_deaths'] = (df.mean_mn_deaths + (2*df.stddev_mn_deaths))
df['mean15std_mn_deaths'] = (df.mean_mn_deaths + (1.5*df.stddev_mn_deaths))
# Mid period for seasonal = mean of range(1,(m+1)) where m = number of periods
midPeriod = int(np.median(range(1,(m+1))))
df['period']= periods[midPeriod]
startOfMidPeriod = periods[midPeriod].split('W')
startEndDates = self.getStartEndDates(int(startOfMidPeriod[0]),int(startOfMidPeriod[1]))
df['dateOfOnSetWeek'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# First case date is the start date of the week where outbreak was detected
df['firstCaseDate'] = moment.date(startEndDates[0]).format('YYYY-MM-DD')
# Last case date is the end date of the week boundary.
startOfEndPeriod = periods[(m+1)].split('W')
endDates = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).format('YYYY-MM-DD')
df['lastCaseDate'] = moment.date(startEndDates[0] + datetime.timedelta(days=(m-1)*(7/2))).format('YYYY-MM-DD')
df['endDate'] = ""
df['closeDate'] = moment.date(startEndDates[0]).add(days=(m-1)*(7/2)+ int(diseaseMeta['incubationDays'])).format('YYYY-MM-DD')
df['reportingOrgUnitName'] = df.iloc[:,reportingLevel-1]
df['reportingOrgUnit'] = df.iloc[:,detectionLevel].apply(self.getOrgUnitValue,args=(ou,(reportingLevel-1),'id'))
df['orgUnit'] = df.iloc[:,detectionLevel]
df['orgUnitName'] = df.iloc[:,detectionLevel+1]
df['orgUnitCode'] = df.iloc[:,detectionLevel+2]
dropColumns = [col for idx,col in enumerate(df.columns.values.tolist()) if idx > (detectionLevel+4) and idx < (detectionLevel+4+(3*m))]
df.drop(columns=dropColumns,inplace=True)
df['confirmedValue'] = df.loc[:,'mean_current_cases']
df['deathValue'] = df.loc[:,'mean_current_deaths']
df['suspectedValue'] = df.loc[:,'mean_current_cases']
df['disease'] = diseaseMeta['disease']
df['incubationDays'] = diseaseMeta['incubationDays']
checkEpidemic = "mean_current_cases >= mean20std_mn_cases & mean_current_cases != 0 & mean20std_mn_cases != 0"
df.query(checkEpidemic,inplace=True)
if df.empty is True:
df['alert'] = "false"
if df.empty is not True:
df['epidemic'] = 'true'
# Filter out those greater or equal to threshold
df = df[df['epidemic'] == 'true']
df['active'] = "true"
df['alert'] = "true"
df['reminder'] = "false"
#df['epicode']=df['orgUnitCode'].str.cat('E',sep="_")
df['epicode'] = df.apply(self.generateCode,args=('orgUnitCode','E','_'), axis=1)
closedQuery = "df['epidemic'] == 'true' && df['active'] == 'true' && df['reminder'] == 'false'"
closedVigilanceQuery = "df['epidemic'] == 'true' && df['active'] == 'true' && df['reminder'] == 'true'"
df[['status','active','closeDate','reminderSent','dateReminderSent']] = df.apply(self.getEpidemicDetails,axis=1)
else:
# No data for cases found
pass
return df
else:
print("No outbreaks/epidemics for " + diseaseMeta['disease'])
return dhis2Events
# Replace all values with standard text
def replaceText(self,df):
df.replace(to_replace='Confirmed case',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='Suspected case',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='Confirmed',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='Suspected',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='confirmed case',value='confirmedValue',regex=True,inplace=True)
df.replace(to_replace='suspected case',value='suspectedValue',regex=True,inplace=True)
df.replace(to_replace='died',value='deathValue',regex=True,inplace=True)
df.replace(to_replace='Died case',value='deathValue',regex=True,inplace=True)
return df
# Get Confirmed,suspected cases and deaths
def getCaseStatus(self,row=None,columns=None,caseType='CONFIRMED'):
if caseType == 'CONFIRMED':
# if all(elem in columns.values for elem in ['confirmedValue']):
if set(['confirmedValue']).issubset(columns.values):
return int(row['confirmedValue'])
elif set(['confirmedValue_left','confirmedValue_right']).issubset(columns.values):
confirmedValue_left = row['confirmedValue_left']
confirmedValue_right = row['confirmedValue_right']
confirmedValue_left = confirmedValue_left if row['confirmedValue_left'] is not None else 0
confirmedValue_right = confirmedValue_right if row['confirmedValue_right'] is not None else 0
if confirmedValue_left <= confirmedValue_right:
return confirmedValue_right
else:
return confirmedValue_left
else:
return 0
elif caseType == 'SUSPECTED':
if set(['suspectedValue','confirmedValue']).issubset(columns.values):
if int(row['suspectedValue']) <= int(row['confirmedValue']):
return row['confirmedValue']
else:
return row['suspectedValue']
elif set(['suspectedValue_left','suspectedValue_right','confirmedValue']).issubset(columns.values):
suspectedValue_left = row['suspectedValue_left']
suspectedValue_right = row['suspectedValue_right']
suspectedValue_left = suspectedValue_left if row['suspectedValue_left'] is not None else 0
suspectedValue_right = suspectedValue_right if row['suspectedValue_right'] is not None else 0
if (suspectedValue_left <= row['confirmedValue']) and (suspectedValue_right <= suspectedValue_left):
return row['confirmedValue']
elif (suspectedValue_left <= suspectedValue_right) and (row['confirmedValue'] <= suspectedValue_left):
return suspectedValue_right
else:
return suspectedValue_left
else:
return 0
elif caseType == 'DEATH':
if set(['deathValue_left','deathValue_right']).issubset(columns.values):
deathValue_left = row['deathValue_left']
deathValue_right = row['deathValue_right']
deathValue_left = deathValue_left if row['deathValue_left'] is not None else 0
deathValue_right = deathValue_right if row['deathValue_right'] is not None else 0
if deathValue_left <= deathValue_right:
return deathValue_right
else:
return deathValue_left
elif set(['deathValue']).issubset(columns.values):
return row['deathValue']
else:
return 0
# Check if epedimic is active or ended
def getStatus(self,row=None,status=None):
currentStatus = 'false'
if status == 'active':
if pd.to_datetime(self.today) < pd.to_datetime(row['endDate']):
currentStatus='active'
elif pd.to_datetime(row['endDate']) == (pd.to_datetime(self.today)):
currentStatus='true'
else:
currentStatus='false'
elif status == 'reminder':
if row['reminderDate'] ==
|
pd.to_datetime(self.today)
|
pandas.to_datetime
|
from timeseries_preparation.preparation import TimeseriesPreparator
import pandas as pd
import pytest
def test_duplicate_dates():
df = pd.DataFrame(
{
"date": [
"2021-01-01 12:12:00",
"2021-01-01 17:35:00",
"2021-01-02 14:55:00",
],
"id": [1, 1, 1],
}
)
frequency = "D"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
)
with pytest.raises(ValueError):
dataframe_prepared = preparator._truncate_dates(df)
def test_minutes_truncation():
df = pd.DataFrame(
{
"date": [
"2021-01-01 12:17:42",
"2021-01-01 12:30:00",
"2021-01-01 12:46:00",
],
"id": [1, 1, 1],
}
)
frequency = "15min"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] ==
|
pd.Timestamp("2021-01-01 12:15:00")
|
pandas.Timestamp
|
#####################################################################
## Functions related to water chemistry and dissolution of Calcite ##
#####################################################################
import numpy as np
import pandas
from .general import *
from scipy.optimize import brentq#, fminbound
from scipy.optimize import fsolve
from scipy.interpolate import LinearNDInterpolator, interp1d
#Define some useful constants
R = 8.3145 #J / (mol * K)
#H2OmolPerL = 55.5
def PCO2FromSolution(sol):
"""
Calculate partial pressure of CO2 from a solution object.
Parameters
----------
sol : solution object, numpy.ndarray of solution objects, or pandas Series of solution objects
Returns
-------
pCO2 : float, numpy.ndarray, or pandas series
partial pressure(s) of CO2 for the solution(s)
Notes
-----
Assumes a H20-CO2-CaCO3 system. Uses equation 2.30 from Dreybrodt (1988) and assumes an open system.
"""
def calc_PCO2(this_sol):
Ca_conc = this_sol.ions['Ca']['conc_mol']
gamma_H = this_sol.activity_coef('H')
gamma_HCO3 = this_sol.activity_coef('HCO3')
gamma_Ca = this_sol.activity_coef('Ca')
pH = this_sol.pH
H_conc = 10.0**(-pH)/gamma_H
#calculate mass action constans
T_K = this_sol.T_K
K_c = calc_K_c(T_K)
K_2 = calc_K_2(T_K)
K_1 = calc_K_1(T_K)
K_0 = calc_K_0(T_K)
K_H = calc_K_H(T_K)
#pCO2 derived from equation 2.30 in Dreybrodt 1988 and assuming an
#open system, where f approaches infty. See notebook for details.
pCO2 = (gamma_H * gamma_HCO3 / (K_1*K_H*(1+1/K_0)) ) * \
(H_conc**2. + 2.0*H_conc*Ca_conc)
return pCO2
is_series = (type(sol)==pandas.core.series.Series)
#or (type(sol)==pandas.core.series.TimeSeries)
if (type(sol)==np.ndarray) or is_series:
pCO2 = np.zeros(np.size(sol))
for i, single_sol in enumerate(sol):
pCO2[i] = calc_PCO2(single_sol)
if is_series:
pCO2 = pandas.Series(pCO2,index=sol.index)
else:
pCO2 = calc_PCO2(sol)
return pCO2
def concCaEqFromSolution(sol):
"""
Calculates the equilibrium concentration of calcium for a solution object.
First calculates the partial pressure of CO2, and then uses PCO2 and temperature to calculate equilibrium Ca.
Parameters
----------
sol : solution object, numpy.ndarray of solution objects, or pandas Series of solution objects
Returns
-------
CaEq : float, numpy.ndarray, or pandas Series
Equilibrium concentration(s) of CaEq for the given solution(s) in mol/L.
Notes
-----
Assumes a H20-CO2-CaCO3 system.
"""
PCO2 = PCO2FromSolution(sol)
is_series = (type(sol)==pandas.core.series.Series)
#or (type(sol)==pandas.core.series.TimeSeries)
if (type(sol)==np.ndarray) or is_series:
T_C = np.zeros(np.size(sol))
for i, single_sol in enumerate(sol):
T_C[i] = single_sol.T_C
else:
T_C = sol.T_C
CaEq = concCaEqFromPCO2(PCO2, T_C = T_C)
return CaEq
#Calculate the equilibrium concentration of Ca using PCO2 and T_C
def concCaEqFromPCO2(PCO2, T_C = 25.):
"""
Calculates the equilibrium concentration of calcium using PCO2 and temp.
Iteratively solves for the equilibrium concentration of calcium from PCO2 and temp. First guesses that activity coefficients are 1, and then iterates to solution using scipy's brentq function.
Parameters
----------
PCO2 : float, numpy.ndarray, or pandas Series
partial pressure of CO2 (atm)
T_C : float, numpy.ndarray, or pandas Series (optional)
temperature of solution in degrees Celsius (default = 25 C)
Returns
-------
CaEq : float, numpy.ndarray, or pandas Series
Equilibrium concentration(s) of calcium in mol/L
Notes
-----
Assumes a H20-CO2-CaCO3 system.
If a numpy array or pandas Series object are passed in as PCO2 arguments, then equilibrium concentrations will be found iteratively in a for-loop and returned as the same data type given in the argument.
"""
def Ca_minimize(Ca,T_C_func,K_c,K_2,K_1,K_H,PCO2_func):
if Ca<0:
return 10.
I = approxI(Ca)
properties = getProperties()
z_Ca = properties['Ca']['charge']
r_Ca = properties['Ca']['radius']
gamma_Ca = DebyeHuckel(I,z_Ca,r_Ca,T=T_C_func)
z_HCO3 = properties['HCO3']['charge']
r_HCO3 = properties['HCO3']['radius']
gamma_HCO3 = DebyeHuckel(I,z_HCO3,r_HCO3,T=T_C_func)
return Ca - (PCO2_func*K_1*K_c*K_H/(4.*K_2*gamma_Ca*gamma_HCO3**2.))**(1./3.)
#If there is only one value for T_C, but multiple PCO2 values, make an array of equal values
if (np.size(PCO2)>1) and (np.size(T_C) == 1):
T_C = T_C + np.zeros(np.size(PCO2))
T_K = CtoK(T_C)
K_c = calc_K_c(T_K)
K_2 = calc_K_2(T_K)
K_1 = calc_K_1(T_K)
K_H = calc_K_H(T_K)
#make a guess assuming activities are = 1
guess_Ca = (PCO2*K_1*K_c*K_H/(4.*K_2))**(1./3.)
maxCa = 10.*guess_Ca
minCa = guess_Ca
is_series = (type(PCO2)==pandas.core.series.Series)
#or (type(PCO2)==pandas.core.series.TimeSeries)
if (type(PCO2)==np.ndarray) or is_series:
#We have a numpy array or pandas Series. Loop through solutions.
CaEq = np.zeros(np.size(PCO2))
for i, single_PCO2 in enumerate(PCO2):
try:
CaEq[i] = brentq(Ca_minimize, guess_Ca[i], 10.*guess_Ca[i], args=(T_C[i],K_c[i],K_2[i],K_1[i],K_H[i], PCO2[i]))
except RuntimeError:
CaEq[i] = np.nan
if is_series:
#Create a pandas series object from the CaEq array
CaEq = pandas.Series(CaEq, index=PCO2.index)
else: #We only have a single value
try:
CaEq = brentq(Ca_minimize, guess_Ca, 10.*guess_Ca, args=(T_C,K_c,K_2,K_1,K_H,PCO2))
except RuntimeError:
CaEq = np.nan
return CaEq
#Calculate the equilibrium concentration of Ca using PCO2 and T_C
def PCO2EqFromCa(Ca, T_C = 25., I=None):
"""
Calculates the equilibrium PCO2 for a given concentration of calcium and temp.
Parameters
----------
Ca : float, numpy.ndarray, or pandas Series
Concentration of calcium in mol/L.
T_C : float, numpy.ndarray, or pandas Series (optional)
temperature of solution in degrees Celsius (default = 25 C)
I : float, numpy.ndarray, or pandas Series (optional)
Ionic strength of solution. If not provided, will be calculated from Ca alone.
Returns
-------
PCO2Eq : float, numpy.ndarray, or pandas Series
Partial pressure of CO2 (atm) at which the solution would be in equilibrium w.r.t. calcite.
Notes
-----
Assumes a H20-CO2-CaCO3 system.
If a numpy array or pandas Series object are passed in as Ca arguments, then equilibrium PCO2s will be found iteratively in a for-loop and returned as the same data type given in the argument.
"""
#If there is only one value for T_C, but multiple PCO2 values, make an array of equal values
if (np.size(Ca)>1) and (np.size(T_C) == 1):
T_C = T_C + np.zeros(np.size(Ca))
#Calculate equilibrium constants as a function of T
T_K = CtoK(T_C)
K_c = calc_K_c(T_K)
K_2 = calc_K_2(T_K)
K_1 = calc_K_1(T_K)
K_H = calc_K_H(T_K)
#Calculate ionic strength
if I == None:
I = approxI(Ca)#Neglects other metals in the solution (might change this)
#Calculate activity coefficients
properties = getProperties()
z_Ca = properties['Ca']['charge']
r_Ca = properties['Ca']['radius']
gamma_Ca = DebyeHuckel(I,z_Ca,r_Ca,T=T_C)
z_HCO3 = properties['HCO3']['charge']
r_HCO3 = properties['HCO3']['radius']
gamma_HCO3 = DebyeHuckel(I,z_HCO3,r_HCO3,T=T_C)
#Calculate PCO2 from eqn 2.35c in Dreybrodt (1988)
PCO2 = Ca**3. * 4.*K_2*gamma_Ca*gamma_HCO3**2. / (K_1*K_c*K_H)
return PCO2
#Calculates equilibrium activity of H+ given PCO2
# - uses relaxed charge balance assumption
def activityHFromPCO2(PCO2, T_C = 25., CaEq = None):
"""
Calculates equilibrium activity of H+ given PCO2 using relaxed charge balance.
Calculates hydrogen activity at equilibrium given PCO2, temperature, and (optionally) equilibrium calcium concentration (mol/L). Assumes a relaxed charge balance (see 2.18a in Dreybrodt [1988]). If keyword CaEq is not given, then it is iteratively calculated using concCaEqFromPCO2().
Parameters
----------
PCO2 : float
partial pressure of CO2 (atm)
T_C : float, optional
temperature of solution in degrees Celsius (default = 25 C)
CaEq : float
Equilibrium calcium concentration (mol/L), optional
Returns
-------
aHeq : float
equilibrium activity of hydrogen ion (mol/L)
Notes
-----
Assumes a H20-CO2-CaCO3 system.
"""
if CaEq == None:
CaEq = concCaEqFromPCO2(PCO2, T_C=T_C)
I = approxI(CaEq)
properties = getProperties()
z_Ca = properties['Ca']['charge']
r_Ca = properties['Ca']['radius']
gamma_Ca = DebyeHuckel(I,z_Ca,r_Ca,T=T_C)
z_HCO3 = properties['HCO3']['charge']
r_HCO3 = properties['HCO3']['radius']
gamma_HCO3 = DebyeHuckel(I,z_HCO3,r_HCO3,T=T_C)
T_K = CtoK(T_C)
K_c = calc_K_c(T_K)
K_2 = calc_K_2(T_K)
K_1 = calc_K_1(T_K)
K_H = calc_K_H(T_K)
a_Heq = ( ((K_1*K_H*PCO2)**2.)*K_2*gamma_Ca / (2*K_c*gamma_HCO3) )**(1./3.)
return a_Heq
#Calculate H+ concentration from Ca and PCO2 assuming relaxed charge balance
def concHFromCaPCO2Relaxed(Ca, PCO2, T_C = 25.):
"""
Calculates concentration of H+ from calcium concentration and PCO2 using relaxed charge balance. Uses equation 2.30a from Dreybrodt (1988).
Parameters
----------
Ca : float
concentration of calcium in mol/L
PCO2 : float
partial pressure of CO2 (atm)
T_C : float, optional
temperature of solution in degrees Celsius (default = 25 C)
Returns
-------
concH : float
concentration of hydrogen ions
Notes
-----
Assumes a H20-CO2-CaCO3 system.
"""
#from eqn 2.30a in Dreybrodt 1988
T_K = CtoK(T_C)
I = 3.*Ca
gamma_H = gamma('H', I, T_C=T_C)
gamma_HCO3 = gamma('HCO3', I, T_C=T_C)
K_1 = calc_K_1(T_K)
K_H = calc_K_H(T_K)
K_0 = calc_K_0(T_K)
HCO3_sqaured = K_1*K_H*PCO2*(1.+1./K_0)/(gamma_H*gamma_HCO3)
concH = -Ca + np.sqrt(Ca**2. + HCO3_sqaured)
return concH
#Calculate H+ concentration given Ca and PCO2, makes no relaxed charge
#balance assumption
def solutionFromCaPCO2(Ca, PCO2, T_C = 25., per_tol = 0.001, max_iter=1000):
"""
Creates a solution object from a given concentration of calcium and PCO2.
Parameters
----------
Ca : float, numpy.ndarray, or pandas Series
concentration of calcium in mol/L
PCO2 : float, numpy.ndarray, or pandas Series
partial pressure of CO2 (atm)
T_C : float, , numpy.ndarray, or pandas Series (optional)
temperature of solution in degrees Celsius (default = 25 C)
per_tol : float
the fractional change in H concentration between iterations upon which the iteration is terminated
max_iter : int
the number of iterations allowed in the solution. Returns None if solution does not converge in max_iter.
Returns
-------
sol : solution object, numpy.ndarray of solution objects, or pandas Series of solution objects
Notes
-----
Assumes a H20-CO2-CaCO3 system. Guesses concentration of H using relaxed charge balance assumption, and then iterates to full solution.
"""
def calc_sol(Ca_in,PCO2_in,T_C_in,per_tol=0.001):
I_guess = 3.*Ca_in
T_K = CtoK(T_C_in)
H_guess = concHFromCaPCO2Relaxed(Ca_in,PCO2_in, T_C=T_C_in)
K_W = calc_K_W(T_K)
K_H = calc_K_H(T_K)
K_0 = calc_K_0(T_K)
K_1 = calc_K_1(T_K)
K_2 = calc_K_2(T_K)
K_6 = K_1*(1.+1./K_0)
found=False
niter = 0
while not(found):
#estimate activity coefficients
gamma_H = gamma('H', I_guess, T_C=T_C_in)
gamma_OH = gamma('OH', I_guess, T_C=T_C_in)
gamma_HCO3 = gamma('HCO3', I_guess, T_C=T_C_in)
gamma_CO3 = gamma('CO3', I_guess, T_C=T_C_in)
H_new= fsolve(lambda H: 2.*Ca_in + H - K_W/(gamma_H*gamma_OH*H)\
- K_6*K_H*PCO2_in/(gamma_HCO3*gamma_H*H)*\
(1. + 2.*K_2*gamma_HCO3/(gamma_CO3*gamma_H*H)),\
H_guess)[0]
#calculate ion concentrations from guess H+ concentration
OH = K_W/(gamma_OH*gamma_H*H_new)
CO3 = K_2*K_6*K_H*PCO2_in/((gamma_H*H_new)**2)/gamma_CO3
HCO3 = K_6*K_H*PCO2_in/(gamma_HCO3*gamma_H*H_new)
I_new = 0.5*(H_new + OH + HCO3 + 4.*CO3 + 4.*Ca_in)
if (np.abs(H_new - H_guess)/H_guess < per_tol):
found = True
else:
H_guess = H_new
I_guess = I_new
#calculate non-charge ions
niter += 1
if niter > max_iter:
return None
CO2 = K_H*PCO2_in
H2CO3 = (K_H/K_0)*PCO2_in
H2CO3s = H2CO3 + CO2
pH = -np.log10(H_new*gamma_H)
#creation solution with these species
sol = solution(['H', 'OH', 'CO3', 'HCO3', 'Ca', 'CO2', 'H2CO3', 'H2CO3s'],
[H_new, OH, CO3, HCO3, Ca_in, CO2, H2CO3, H2CO3s],
"mol/L", T=T_C_in, pH = pH)
return sol
is_series = (type(Ca)==pandas.core.series.Series)
#or (type(Ca)==pandas.core.series.TimeSeries)
if (type(Ca)==np.ndarray) or is_series:
sol_arr = np.empty(np.size(Ca),dtype=object)
for i, this_Ca in enumerate(Ca):
if np.size(T_C)==1:
sol_arr[i] = calc_sol(Ca[i],PCO2[i],T_C,per_tol=per_tol)
else:
sol_arr[i] = calc_sol(Ca[i],PCO2[i],T_C[i],per_tol=per_tol)
if is_series:
sol_arr = pandas.Series(sol_arr,index=Ca.index)
return sol_arr
else:
return calc_sol(Ca,PCO2,T_C,per_tol=per_tol)
# Function to calculate H+ concentration from Calcium concentration and pC02
# using approximation in equation 2.30a (with an additional assumption that
# chi -> infty, as for an open system) from Dreybrodt 1988. which assumes
# pH < 8 such that carbonate and OH- species can be neglected
# Ca = Calcium concentration mol/L
# PCO2 = partial pressure of CO2
def solutionFromCaPCO2Relaxed(Ca, PCO2, T_C = 25.):
"""
Creates a solution object from a given concentration of calcium, PCO2, and optional temperature. Uses the approximate charge balance assumption (equation 2.30a in Dreybrodt [1988]). This is valid when pH < 8, such that CO3- and OH- species can be neglected.
Parameters
----------
Ca : float
concentration of calcium in mol/L
PCO2 : float
partial pressure of CO2 (atm)
T_C : float, optional
temperature of solution in degrees Celsius (default = 25 C)
Returns
-------
sol : solution object
Notes
-----
Assumes a H20-CO2-CaCO3 system. Calculates concentration of H using relaxed charge balance assumption.
"""
T_K = CtoK(T_C)
H2CO3s = H2CO3sfromPCO2(PCO2, T_K=T_K)
H2CO3 = H2CO3fromPCO2(PCO2, T_K=T_K)
properties = getProperties()
I = approxI(Ca)
gamma_H = DebyeHuckel(I,
properties['H']['charge'],
properties['H']['radius'],
T = T_C)
gamma_HCO3 = DebyeHuckel(I,
properties['HCO3']['charge'],
properties['HCO3']['radius'],
T = T_C)
gamma_CO3 = DebyeHuckel(I,
properties['CO3']['charge'],
properties['CO3']['radius'],
T = T_C)
K_0 = calc_K_0(T_K)
K_1 = calc_K_1(T_K)
K_2 = calc_K_2(T_K)
K_H = calc_K_H(T_K)
K_6 = K_1*(1.+1./K_0)
v_over_w = K_1*H2CO3s/(gamma_H*gamma_HCO3)
#equation 2.30a with chi-->infty
H = -Ca + 0.5*np.sqrt(4.*(Ca**2.)+ 4.*v_over_w)
#from equation 2.24 with OH and CO3 neglected
HCO3 = 2.*Ca + H
CO3 = K_2*K_6*K_H*PCO2/((gamma_H*H)**2)/gamma_CO3
#calculate amount fraction of H (neglecting other ions)
H_activity = gamma_H*H
pH = -np.log10(H_activity)
sol = solution(['H', 'Ca', 'HCO3', 'H2CO3', 'H2CO3s', 'CO3'], [H, Ca, HCO3, H2CO3, H2CO3s, CO3], units="mol/L", T=T_C, T_units='C', pH=pH)
return sol
# Function to calculate solution from Calcium concentration and pH
# using approximation in equation 2.30a (with an additional assumption that
# chi -> infty, as for an open system) from Dreybrodt 1988. which assumes
# pH < 8 such that carbonate and OH- species can be neglected
# Ca = Calcium concentration mol/L
# pH = partial pressure of CO2
def solutionFrompHCaRelaxed(Ca, pH, T_C = 25.):
"""
Creates a solution object from a given concentration of calcium and pH.
Creates a solution object from a given concentration of calcium, pH, and optional temperature. Uses the approximate charge balance assumption (equation 2.30a in Dreybrodt [1988]). This is valid when pH < 8, such that CO3- and OH- species can be neglected.
Parameters
----------
Ca : float
concentration of calcium in mol/L
pH : float
pH
T_C : float, optional
temperature of solution in degrees Celsius (default = 25 C)
Returns
-------
sol : solution object
Notes
-----
Assumes a H20-CO2-CaCO3 system. Calculates concentration of H using relaxed charge balance assumption.
"""
T_K = CtoK(T_C)
properties = getProperties()
I = approxI(Ca)
gamma_H = DebyeHuckel(I,
properties['H']['charge'],
properties['H']['radius'],
T = T_C)
gamma_HCO3 = DebyeHuckel(I,
properties['HCO3']['charge'],
properties['HCO3']['radius'],
T = T_C)
gamma_CO3 = DebyeHuckel(I,
properties['CO3']['charge'],
properties['CO3']['radius'],
T = T_C)
gamma_Ca = DebyeHuckel(I,
properties['Ca']['charge'],
properties['Ca']['radius'],
T = T_C)
H = 10.0**(-pH)/gamma_H
#calculate mass action constans
K_c = calc_K_c(T_K)
K_2 = calc_K_2(T_K)
K_1 = calc_K_1(T_K)
K_0 = calc_K_0(T_K)
K_H = calc_K_H(T_K)
K_6 = K_1*(1.+1./K_0)
#pCO2 derived from equation 2.30 in Dreybrodt 1988 and assuming an
#open system, where f approaches infty. See notebook for details.
pCO2 = (gamma_H * gamma_HCO3 / (K_1*K_H*(1+1/K_0)) ) * \
(H**2. + 2.0*H*Ca)
H2CO3 = H2CO3fromPCO2(pCO2, T_K=T_K)
H2CO3s = H2CO3sfromPCO2(pCO2, T_K=T_K)
CO3 = K_2*K_6*K_H*pCO2/((gamma_H*H)**2)/gamma_CO3
K_1 = calc_K_1(T_K)
#equation 2.30a with chi-->infty
HCO3 = 2.*Ca + H
is_series = (type(Ca)==pandas.core.series.Series)
#or (type(Ca)==pandas.core.series.TimeSeries)
if (type(Ca)==np.ndarray) or is_series:
sol_arr = np.empty(np.size(Ca),dtype=object)
for i in range(np.size(Ca)):
if np.size(T_C)==1:
sol_arr[i] = solution(['H', 'Ca', 'HCO3', 'H2CO3', 'H2CO3s', 'CO3'], [H[i], Ca[i], HCO3[i], H2CO3[i], H2CO3s[i], CO3[i]], units="mol/L", T=T_C, T_units='C', pH=pH[i])
else:
sol_arr[i] = solution(['H', 'Ca', 'HCO3', 'H2CO3', 'H2CO3s', 'CO3'], [H[i], Ca[i], HCO3[i], H2CO3[i], H2CO3s[i], CO3[i]], units="mol/L", T=T_C[i], T_units='C', pH=pH[i])
if is_series:
sol_arr = pandas.Series(sol_arr, index=Ca.index)
return sol_arr
else:
sol = solution(['H', 'Ca', 'HCO3', 'H2CO3', 'H2CO3s', 'CO3'], [H, Ca, HCO3, H2CO3, H2CO3s, CO3], units="mol/L", T=T_C, T_units='C', pH=pH)
return sol
#Calculate concentration of Carbonic acid in equilibrium with a certain pCO2
def H2CO3fromPCO2(PCO2, T_K = 273.15 + 25., T_C = None):
"""
Calculate concentration of carbonic acid in equilibrium with a certain PCO2.
Parameters
----------
PCO2 : float
partial pressure of CO2
T_K : float, optional temperature in degrees Kelvin (default = 273.15 + 25)
T_C : float, optional
temperature in degrees Celsius (default = None). If None, function assumes T_K was given or uses default value. If T_C is given in function call, then function uses T_C value to calculate T_K.
Returns
-------
H2CO3 : float
concentration of carbonic acid in mol/L
"""
if T_C != None:
T_K = CtoK(T_C)
K_H = calc_K_H(T_K)
K_0 = calc_K_0(T_K)
H2CO3 = K_H*PCO2/K_0
return H2CO3
#Calculate concentration of Carbonic acid in equilibrium with a certain pCO2
def H2CO3sfromPCO2(PCO2, T_K = 273.15 + 25., T_C = None):
"""
Calculate concentration of carbonic acid + aqueous CO2 in equilibrium with a certain PCO2. [H2CO3s] = [H2CO3] + [CO2]
Parameters
----------
PCO2 : float
partial pressure of CO2
T_K : float, optional temperature in degrees Kelvin (default = 273.15 + 25)
T_C : float, optional
temperature in degrees Celsius (default = None). If None, function assumes T_K was given or uses default value. If T_C is given in function call, then function uses T_C value to calculate T_K.
Returns
-------
H2CO3s : float
concentration of carbonic acid + aqueous CO2 in mol/L
"""
if T_C != None:
T_K = CtoK(T_C)
K_H = calc_K_H(T_K)
K_0 = calc_K_0(T_K)
H2CO3s = K_H*PCO2*(1+1/K_0)
return H2CO3s
def pwpFromSolution(sol, PCO2=None, method='theory'):
"""
Calculates the PWP dissolution rate from a solution object.
Parameters
----------
sol : solution object, numpy.ndarray, or pandas Series
An olm solution object for which the calcite dissolution rate will be calculated.
PCO2 : float
The partial pressure of CO2 for the solution. If not given, it will be calculated from the solution object using PCO2FromSolution().
method : string
A string that is equal to 'theory', 'pascal', or 'franci' that specifies the version of the PWP equation to use.
Returns
-------
R : float, numpy.ndarray, or pandas Series
calcite dissolution rate according to the PWP equation (mmol/cm^2/s)
"""
if PCO2==None:
PCO2 = PCO2FromSolution(sol)
def calc_rate(sol_in,PCO2_in):
#Check whether all necessary ions are present
if ('Ca' in sol_in.ions) and ('H' in sol_in.ions) and ('HCO3' in sol_in.ions):
#Check whether H2CO3s is present, and calculate if necessary
if not 'H2CO3s' in sol_in.ions:
K_H = calc_K_H(sol_in.T_K)
CO2 = K_H*PCO2_in
if not 'H2CO3' in sol_in.ions:
K_0 = calc_K_0(sol_in.T_K)
a_H2CO3s = CO2*(1.+1./K_0)
else:
a_H2CO3s = CO2 + sol_in.activity('H2CO3')
else: #If we have it already, just read it
a_H2CO3s = sol_in.activity('H2CO3s')
#Pull out other ion concentrations
a_Ca = sol_in.activity('Ca')
a_H = sol_in.activity('H')
a_HCO3 = sol_in.activity('HCO3')
T_K = sol_in.T_K
if method=='theory':
R = pwpRateTheory(a_Ca=a_Ca, a_H2CO3s=a_H2CO3s, a_H=a_H, a_HCO3=a_HCO3, T_K=T_K,PCO2=PCO2_in)
elif method=='pascal':
R = pwpRatePascal(a_Ca=a_Ca, a_H2CO3s=a_H2CO3s, a_H=a_H, a_HCO3=a_HCO3, T_K=T_K,PCO2=PCO2_in)
elif method=='franci':
R = pwpRateFranci(a_Ca=a_Ca, a_H2CO3s=a_H2CO3s, a_H=a_H, a_HCO3=a_HCO3, T_K=T_K,PCO2=PCO2_in)
else:
print("method must be set to 'theory', 'pascal', or 'franci'")
return -1
return R
else:
print("Not all necessary ions are present in the solution object.")
return -1
is_series = (type(sol)==pandas.core.series.Series)
#or (type(sol)==pandas.core.series.TimeSeries)
if (type(sol)==np.ndarray) or is_series:
rate_arr = np.empty(np.size(sol))
for i, this_sol in enumerate(sol):
rate_arr[i] = calc_rate(this_sol,PCO2[i])
if is_series:
rate_arr = pandas.Series(rate_arr, index=sol.index)
return rate_arr
else:
return calc_rate(sol,PCO2)
#Calculate dissolution rate from PWP equation using an input concentrations
# kappa4 is calculated using relation from Dreybrodt's PASCAL code.
def pwpRatePascal(a_Ca=0., a_H2CO3s=0., a_H=0., a_HCO3=0., T_K=25.+273.15,PCO2=0.):
"""
Calculates PWP rate using relation for kappa4 found in PASCAL code.
Calculates PWP dissolution rate for calcite using the relation for kappa4 that is found in the PASCAL code in Dreybrodt (1988). This is also given in equation 3 from Buhmann and Dreybrodt (1985), The kinetics of calcite dissolution and precipitation in geologically relevant situations of karst areas: 1. Open system. They say that it is a fit to the experimental data of Plummer et al. for values of PCO2 < 0.05 atm.
Parameters
----------
a_Ca : float
activity of calcium (mol/L)
a_H2CO3s : float
activity of carbonic acid (mol/L)
a_H : float
activity of hydrogen (mol/L)
a_HCO3 : float
activity of bicarbonate (mol/L)
T_K : float
temperature degrees Kelvin
PCO2 : float
partial pressure of CO2 (atm)
Returns
-------
R : float
calcite dissolution rate according to the PWP equation (mmol/cm^2/s)
"""
kappa1 = calc_kappa1(T_K)
kappa2 = calc_kappa2(T_K)
kappa3 = calc_kappa3(T_K)
kappa4 = calc_kappa4Pascal(T_K,PCO2)
R = kappa1*a_H + kappa2*a_H2CO3s + kappa3 - kappa4*a_Ca*a_HCO3
return R
#Calculate dissolution rate from PWP equation using an input
# concentrations kappa4 is calculated using theoretical relation with
# a_H equal to current value, as done in Franci's code
def pwpRateFranci(a_Ca=0., a_H2CO3s=0., a_H=0., a_HCO3=0., T_K=25.+273.15,PCO2=0.):
"""
Calculates PWP rate using relation for kappa4 used in Franci Gabrovsek's code.
Calculates PWP rate using relation for kappa4 used in Franci Gabrovsek's speleogenesis code (pers. commun.). This slight difference was discovered during testing of this code against Franci's calculations. In this case, a_H in the equation for kappa4 is the bulk value and not the equilibrium surface value for the given carbonic acid concentration.
Parameters
----------
a_Ca : float
activity of calcium (mol/L)
a_H2CO3s : float
activity of carbonic acid (mol/L)
a_H : float
activity of hydrogen (mol/L)
a_HCO3 : float
activity of bicarbonate (mol/L)
T_K : float
temperature degrees Kelvin
PCO2 : float
partial pressure of CO2 (atm)
Returns
-------
R : float
calcite dissolution rate according to the PWP equation (mmol/cm^2/s)
"""
kappa1 = calc_kappa1(T_K)
kappa2 = calc_kappa2(T_K)
kappa3 = calc_kappa3(T_K)
kappa4 = calc_kappa4Franci(T_K, a_H, a_H2CO3s)
R = kappa1*a_H + kappa2*a_H2CO3s + kappa3 - kappa4*a_Ca*a_HCO3
return R
def pwpRateTheory(a_Ca=0., a_H2CO3s=0., a_H=0., a_HCO3=0., T_K=25.+273.15,PCO2=0.):
"""
Calculates PWP rate using theoretical relation for kappa4 from PWP.
Calculates PWP rate using theoretical relation for kappa4 from PWP (as described by equation 25 in Plummer, Wigley, and Parkhurst (1978) and in Dreybrodt [1988] equation 6.22b). In this case, a_H in the equation for kappa4 is the equilibrium surface value for the given carbonic acid concentration, as specified in the theory.
Parameters
----------
a_Ca : float
activity of calcium (mol/L)
a_H2CO3s : float
activity of carbonic acid (mol/L)
a_H : float
activity of hydrogen (mol/L)
a_HCO3 : float
activity of bicarbonate (mol/L)
T_K : float
temperature degrees Kelvin
PCO2 : float
partial pressure of CO2 (atm)
Returns
-------
R : float
calcite dissolution rate according to the PWP equation (mmol/cm^2/s)
"""
kappa1 = calc_kappa1(T_K)
kappa2 = calc_kappa2(T_K)
kappa3 = calc_kappa3(T_K)
is_series = (type(a_Ca)==pandas.core.series.Series)
#or (type(a_Ca)==pandas.core.series.TimeSeries)
if (type(a_Ca)==np.ndarray) or is_series:
#We have a numpy array or pandas Series. Loop through and calculate rates individually
R = np.zeros(np.size(a_Ca))
for i, single_R in enumerate(R):
kappa4 = calc_kappa4Theory(T_K[i], PCO2[i], a_H2CO3s[i])
R[i] = kappa1[i]*a_H[i] + kappa2[i]*a_H2CO3s[i] + kappa3[i] - kappa4*a_Ca[i]*a_HCO3[i]
if is_series:
#Create a pandas series object from the R array
R = pandas.Series(R, index=a_Ca.index)
else:
kappa4 = calc_kappa4Theory(T_K, PCO2, a_H2CO3s)
R = kappa1*a_H + kappa2*a_H2CO3s + kappa3 - kappa4*a_Ca*a_HCO3
return R
def pwp_to_mm_yr(R, rho=2.6):
"""
Converts the PWP dissolution rates from mmol/cm^2/s to mm/year.
Parameters
----------
R : float
Dissolution rate as provided by PWP rate functions in units of mmol/cm^2/s.
rho : float
Density of rock in g/cm^3. Default is 2.6 g/cm^3, a typical value for limestone.
Returns
-------
E : float
Erosion rate in mm/year
"""
#First convert from mmol to mol.
R_mol = R*10.**(-3)
properties = getProperties()
CaCO3_weight = properties['Ca']['weight'] + properties['CO3']['weight']
#Convert to grams
R_g = R_mol*CaCO3_weight
#R in cm/s
R_cm_s = R_g/rho
#Convert to mm
R_mm_s = R_cm_s*10.
#Convert from seconds to years
E = R_mm_s*365.*24.*60.*60.
return E
#Functions to calculate rate constants in PWP Equation from Equations
# 6.13, 6.14, and 6.22b in Dreybrodt 1988
def calc_kappa1(T_K):
"""
Calculates kappa1 in the PWP equation.
Calculates kappa1 in the PWP equation, according to Equation 5 in Plummer, Wigley, and Parkhurst (1978) or Equation 6.13 of Dreybrodt (1988).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
kappa1 : float
constant kappa1 in the PWP equation (cm/s)
"""
kappa1 = 10.**(0.198 - 444./T_K)
return kappa1
def calc_kappa2(T_K):
"""
Calculates kappa2 in the PWP equation.
Calculates kappa2 in the PWP equation, according to Equation 7 in Plummer, Wigley, and Parkhurst (1978) or Equation 6.14 of Dreybrodt (1988).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
kappa2 : float
constant kappa2 in the PWP equation (cm/s)
"""
kappa2 = 10.**(2.84 - 2177./T_K)
return kappa2
def calc_kappa3(T_K):
"""
Calculates kappa3 in the PWP equation.
Calculates kappa3 in the PWP equation, according to Equations 8 and 9 in Plummer, Wigley, and Parkhurst (1978) or Equations 6.14a and 6.14b of Dreybrodt (1988).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
kappa3 : float
constant kappa3 in the PWP equation (mmol/cm^2/s)
"""
if (np.size(T_K)>1) or (type(T_K)==pandas.core.series.Series):
kappa3 = np.zeros(np.size(T_K))
for i, this_temp in enumerate(T_K):
if (this_temp < 273.15+25):
kappa3[i] = 10.**(-5.86 - 317./T_K[i])
else:
kappa3[i] = 10.**(-1.10 - 1737./T_K[i])
else:
if (T_K < 273.15+25):
kappa3 = 10.**(-5.86 - 317./T_K)
else:
kappa3 = 10.**(-1.10 - 1737./T_K)
return kappa3
def calc_kappa4Kaufmann(T_K, PCO2):
"""
Calculates kappa4 in the PWP equation using the relation from Kaufmann and Dreybrodt (2007).
Parameters
----------
T_K : float
temperature Kelvin
PCO2 : float
partial pressure of CO2 (atm)
Returns
-------
kappa4 : float
constant kappa4 in the PWP equation (cm^4/mmol/s)
Notes
-----
"""
T_C = KtoC(T_K)
if PCO2>0.05:
kappa4 = 10.**(-2.375+0.025*T_C)
else:
kappa4 = 10.**(-2.375+0.025*T_C + 0.56*(-np.log10(PCO2)-1.3))
return kappa4
def calc_kappa4Pascal(T_K,PCO2):
"""
Calculates kappa4 in the PWP equation using fit from Buhmann and Dreybrodt (1985).
Parameters
----------
T_K : float
temperature Kelvin
PCO2 : float
partial pressure of CO2 (atm)
Returns
-------
kappa4 : float
constant kappa4 in the PWP equation (cm^4/mmol/s)
Notes
-----
See more info under documentation for pwpRatePascal().
"""
T_C = KtoC(T_K)
B = 3.077-0.0146*T_C
kappa4 = 10.**(-B)*(1/PCO2)**0.611
return kappa4
def calc_kappa4Franci(T_K, a_H, a_H2CO3s):
"""
Calculates kappa4 in the PWP equation using approach from Franci's code.
Parameters
----------
T_K : float
temperature Kelvin
a_H : float
activity of hydrogen (mol/L)
a_H2CO3s : float
activity of carbonic acid (mol/L)
Returns
-------
kappa4 : float
constant kappa4 in the PWP equation (cm^4/mmol/s)
Notes
-----
See more info under documentation for pwpRateFranci().
"""
K_2 = calc_K_2(T_K)
K_c = calc_K_c(T_K)
kappa1 = calc_kappa1(T_K)
kappa2 = calc_kappa2(T_K)
kappa3 = calc_kappa3(T_K)
kappa4 = (K_2/K_c)*(kappa1 + 1/a_H*(kappa2*a_H2CO3s + kappa3) )
return kappa4
def calc_kappa4Theory(T_K, PCO2, a_H2CO3s):
"""
Calculates kappa4 in the PWP equation using the theoretical relation for kappa4 from Plummer, Wigley, and Parkhurst (1978) Equation 25 (as described in Dreybrodt [1988] equation 6.22b). In this case, a_H in the equation for kappa4 is the equilibrium surface value for the given carbonic acid concentration, as specified in the theory.
Parameters
----------
T_K : float
temperature Kelvin
PCO2 : float
partial pressure of CO2 (atm)
a_H2CO3s : float
activity of carbonic acid (mol/L)
Returns
-------
kappa4 : float
constant kappa4 in the PWP equation (cm/s)
Notes
-----
See more info under documentation for pwpRateTheory().
"""
T_C = KtoC(T_K)
K_2 = calc_K_2(T_K)
K_c = calc_K_c(T_K)
kappa1 = calc_kappa1(T_K)
kappa2 = calc_kappa2(T_K)
kappa3 = calc_kappa3(T_K)
#calculate equilbrium activity of H at surface
a_Heq = activityHFromPCO2(PCO2, T_C=T_C)
kappa4 = (K_2/K_c)*(kappa1 + 1/a_Heq*(kappa2*a_H2CO3s + kappa3) )
return kappa4
#Functions for calculating mass action constants given temperature (K)
# from Dreybrodt, 1988
def calc_K_c(T_K):
"""
Calculates equilibrium constant for calcite.
Calculates equilibrium constant for calcite using equation from Table 2.2 in Dreybrodt (1988), originally reported in Plummer and Busenberg (1982).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
K_c : float
equilibrium constant for calcite
"""
K_c = 10.**(-171.9065 - 0.077993*T_K + 2839.319/T_K + 71.595*np.log10(T_K))
return K_c
def calc_K_2(T_K):
"""
Calculates mass action constant for dissociation of bicarbonate.
Calculates mass action constant for dissociation of bicarbonate using equation from Table 2.2 in Dreybrodt (1988), originally reported in Plummer and Busenberg (1982).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
K_2 : float
mass action constant for dissociation of bicarbonate
"""
K_2 = 10.**(-107.8871 - 0.03252849*T_K + 5151.79/T_K + 38.92561*np.log10(T_K) - 563713.9/T_K/T_K)
return K_2
def calc_K_1(T_K):
"""
Calculates mass action constant for dissociation of carbonic acid.
Calculates mass action constant for dissociation of carbonic acid using equation from Table 2.2 in Dreybrodt (1988), originally reported in Plummer and Busenberg (1982).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
K_2 : float
mass action constant for dissociation of carbonic acid
"""
K_1 = 10.**(-356.3094 - 0.06091964*T_K + 21834.37/T_K + 126.8339*np.log10(T_K) - 1684915.0/T_K/T_K)
return K_1
def calc_K_0(T_K):
"""
Calculates mass action constant for conversion of CO2 to carbonic acid.
Calculates mass action constant for conversion of CO2 to carbonic acid using equation from Table 2.2 in Dreybrodt (1988), originally reported in Plummer and Busenberg (1982).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
K_0 : float
mass action constant for conversion of CO2 to carbonic acid
"""
K_1 = 10.**(-356.3094 - 0.06091964*T_K + 21834.37/T_K + 126.8339*np.log10(T_K) - 1684915.0/T_K/T_K)
K_0 = 1.7*0.0001/K_1
return K_0
def calc_K_H(T_K):
"""
Calculates Henry's law constant for CO2.
Calculates Henry's law constant for CO2 using equation from Table 2.2 in Dreybrodt (1988), originally reported in Plummer and Busenberg (1982).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
K_H : float
Henry's law constant for CO2.
"""
K_H = 10.**(108.3865 + 0.01985076*T_K - 6919.53/T_K - 40.45154*np.log10(T_K) + 669365./T_K/T_K)
return K_H
def calc_K_W(T_K):
"""
Calculates mass action constant for dissociation water.
Calculates mass action constant for dissociation of water using equation from Table 2.2 in Dreybrodt (1988), originally Harned and Hamer (1933).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
K_W : float
mass action constant for dissociation of water
"""
K_W = 10.**(22.801 - 4787.3/T_K - 0.010365*T_K - 7.1321*np.log10(T_K))
return K_W
def calc_k1(T_K):
"""
Calculates k1+ kinetic constant from Table 1 of Kaufmann and Dreybrodt (2007).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
k1 : float
kinetic constant k1+
Notes
-----
Related to the rate of CO2 conversion.
"""
k1 = 10.**(329.850 - 110.54*np.log10(T_K) - 17265.4/T_K)
# k1 = (10.**-3) * np.exp(34.69 - 9252./T_K)
return k1
def calc_k2(T_K):
"""
Calculates k2+ kinetic constant from Table 1 of Kaufmann and Dreybrodt (2007).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
k2 : float
kinetic constant k2+
Notes
-----
Related to the rate of CO2 conversion.
"""
# k2 = 10.**(14.072 - 3025./T_K)
k2 = 10.**(13.635 - 2895./T_K)
return k2
def calc_k_neg1(T_K):
"""
Calculates k1- kinetic constant from Table 1 of Kaufmann and Dreybrodt (2007).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
k_neg1 : float
kinetic constant k1-
Notes
-----
Related to the rate of CO2 conversion.
"""
k_neg1 = 10.**(13.558 - 3617.1/T_K)
return k_neg1
def calc_k_neg2(T_K):
"""
Calculates k2- kinetic constant from Table 1 in Kaufmann and Dreybrodt (2007).
Parameters
----------
T_K : float
temperature Kelvin
Returns
-------
k_neg2 : float
kinetic constant k2-
Notes
-----
Related to the rate of CO2 conversion.
"""
k_neg2 = 10.**(14.09 - 5308./T_K)
return k_neg2
#################
### Palmer Dissolution
#################
def createPalmerInterpolationFunctions(impure=True):
"""
Creates interpolation functions for kinetic rate constants using linear interpolation from Table from Palmer (1991). PCO2 is interpolated linearly in log space, since it spans several orders of magnitude. Primarily intended for internal use by palmerRate() and palmerFromSolution().
Parameters
----------
impure : boolean
Whether to use the table values for impure calcite (True) or pure calcite (False). Impure calcite is more representative of typical limestone. (default = True)
Returns
-------
(palmer_k1, palmer_C_Cs_T, palmer_n) : tuple
Interpolation functions for k1, C_Cs_T, and n.
"""
#Construct linear interpolation functions for Table from Palmer (1991)
T_arr = np.array([5.,15.,25.])
logPCO2_arr = np.log10(np.array([1.0,0.3,0.03,0.003]))
T_grid, CO2_grid = np.meshgrid(T_arr,logPCO2_arr)
if impure:
k1_grid = np.array([[0.07,0.09,0.12],
[0.03,0.035,0.04],
[0.009,0.015,0.02],
[0.006,0.01,0.015]])
else:
k1_grid = np.array([[0.11,0.14,0.18],
[0.044,0.055,0.065],
[0.014,0.018,0.028],
[0.01,0.015,0.02]])
C_Cs_T_grid = np.array([[0.8,0.85,0.9],
[0.65,0.7,0.8],
[0.6,0.7,0.8],
[0.6,0.7,0.8]])
n_arr = np.array([1.5,1.6,1.7,2.2])
palmer_k1 = LinearNDInterpolator((T_grid.ravel(), CO2_grid.ravel()), k1_grid.ravel())
palmer_C_Cs_T = LinearNDInterpolator((T_grid.ravel(), CO2_grid.ravel()), C_Cs_T_grid.ravel())
palmer_n = interp1d(logPCO2_arr, n_arr)
return (palmer_k1, palmer_C_Cs_T, palmer_n)
#Dissolution rate function from Palmer (1991)
def palmerRate(T_C, PCO2, Sat_Ratio, rho=2.6, impure=True, interp_funcs=np.array([])):
"""
Calculates the calcite/limestone dissolution rate given temperature, PCO2, and a calcium saturation ratio using relationship from Palmer (1991).
Parameters
----------
T_C : float
Temperature in degrees Celcius.
PCO2 : float
The partial pressure of CO2.
Sat_Ratio : float
The ratio of calcium concentration to the calcium concentration at equilibrium ([Ca]/[Ca]_eq).
rho : float
Density of rock in g/cm^3. (default=2.6)
impure : boolean
Whether to use the table values for impure calcite (True) or pure calcite (False). Impure calcite is more representative of typical limestone. (default = True)
interp_funcs : tuple
Primarily for internal use by palmerFromSolution(). Contains interpolation functions for kinetic rate constants. Automatically calculated if not passed.
Returns
-------
R : float
calcite dissolution rate according to the Palmer (1991) equation (mm/yr)
"""
if np.size(interp_funcs)!=3:
interp_funcs = createPalmerInterpolationFunctions(impure)
#look whether we are saturated
if Sat_Ratio>1:
return 0.
#Test whether we are outside of interpolation rate
logPCO2 = np.log10(PCO2)
logPCO2_min = np.log10(0.003)
logPCO2_max = np.log10(1.0)
T_min = 5.
T_max = 25.
if logPCO2<logPCO2_min:
print("Warning! Low PCO2 outside of interpolation range is set to minimum from table.")
logPCO2=logPCO2_min
if logPCO2>logPCO2_max:
print("Warning! High PCO2 outside of interpolation range is set to maximum from table.")
logPCO2=logPCO2_max
if T_C<T_min:
print("Warning! Low temp outside of interpolation range is set to minimum from table.")
T_C=T_min
if T_C>T_max:
print("Warning! High temp outside of interpolation range is set to maximum from table.")
T_C = T_max
palmer_k1, palmer_C_Cs_T, palmer_n = interp_funcs
k1 = palmer_k1(T_C, logPCO2)
C_Cs_T = palmer_C_Cs_T(T_C, logPCO2)
n1 = palmer_n(logPCO2)
if Sat_Ratio>C_Cs_T:
#in non-linear kinetics regime, n=4
n2=4.
k2 = k1*(1.-C_Cs_T)**(n1-n2)
k=k2
n=n2
else:
n = n1
k = k1
#Calculate rate in mm/yr
return 10.*31.56*k*(1.-Sat_Ratio)**n/rho
def palmerFromSolution(sol, PCO2=np.array([]), rho=2.6, impure=True):
"""
Calculates the calcite/limestone dissolution rate from a solution object using relationship from Palmer (1991).
Parameters
----------
sol : solution object, numpy.ndarray, or pandas Series
An olm solution object for which the calcite dissolution rate will be calculated.
PCO2 : float, numpy.ndarray, or pandas Series
The partial pressure of CO2 for the solution(s). If not given, it will be calculated from the solution object using PCO2FromSolution().
rho : float
Density of rock in g/cm^3. (default=2.6)
impure : boolean
Whether to use the table values for impure calcite (True) or pure calcite (False). Impure calcite is more representative of typical limestone. (default = True)
Returns
-------
R : float, numpy.ndarray, or pandas Series
calcite dissolution rate according to the Palmer (1991) equation (mm/yr)
"""
interp_funcs = createPalmerInterpolationFunctions(impure=impure)
#Process solution
def calc_rate(sol, PCO2, rho):
T = sol.T_C
Ca_eq = concCaEqFromSolution(sol)
Ca = sol.ions['Ca']['conc_mol']
Sat_Ratio = Ca/Ca_eq
return palmerRate(T, PCO2, Sat_Ratio, rho, impure=impure, interp_funcs=interp_funcs)
#Loop through series or array if we have one
is_series = (type(sol)==pandas.core.series.Series)
#or (type(sol)==pandas.core.series.TimeSeries)
if (type(sol)==np.ndarray) or is_series:
rate_arr = np.empty(np.size(sol))
for i, this_sol in enumerate(sol):
if np.size(PCO2)==0:
this_PCO2 = PCO2FromSolution(this_sol)
elif np.size(PCO2)==1:
this_PCO2 = PCO2
elif np.size(PCO2)>1:
this_PCO2=PCO2[i]
rate_arr[i] = calc_rate(this_sol, this_PCO2, rho)
if is_series:
rate_arr = pandas.Series(rate_arr, index=sol.index)
return rate_arr
else:
if np.size(PCO2)==0:
PCO2 = PCO2FromSolution(sol)
return calc_rate(sol, PCO2, rho)
def dissRateFromCaPCO2(Ca, PCO2, T_C, rho=2.6, method=None, impure=True, per_tol=0.001, error=False, error_num=100, Ca_err=None, PCO2_err=None, molL=False, confidence=0., return_samples = False):
"""
Calculates the calcite/limestone dissolution rate from given calcium concentration and PCO2. Optionally uses Monte Carlo error propagation to calculate uncertainty in rates.
Parameters
----------
Ca : float, numpy.ndarray or pandas Series
Calcium concentration, default units are mg/L. Change to mol/L by setting keyword mol_L=true.
PCO2 : float, numpy.ndarray, or pandas Series
The partial pressure of CO2 for the solution(s).
T_C : float, numpy.ndarray, or pandas Series
The temperature of the water in degrees Celcius.
rho : float
Density of rock in g/cm^3. (default=2.6)
method : string
Determines method used to calculate dissolution rates. Set to either 'PWP' or 'Palmer'. This keyword is required.
impure : boolean
Used when calculating Palmer rates. Determines whether to use the table values for impure calcite (True) or pure calcite (False). Impure calcite is more representative of typical limestone. (default = True)
per_tol : float
the fractional change in H concentration between iterations upon which the iteration is terminated (see solutionFromCaPCO2). default=0.001
error: boolean
Set to true if you want to use Monte Carlo Error propagation to estimate error in dissolution rate. Requires values for Ca_err and PCO2_err. (default=False)
error_num : integer
Size of random sample used in Monte Carlo Error propagation. default=100
Ca_err: float, numpy.ndarray, or pandas Series
Percent error in calcite concentration(s) (1=100%)
PCO2_err: float, numpy.ndarray, or pandas Series
Percent error in PCO2 values (1=100%)
molL : boolean
Are Ca units in mol/L. If so, set to true. Otherwise, units assumed are mg/L. (default=False, i.e. mg/L)
confidence : float
If non-zero then confidence intervals will be used in error estimation (e.g. 90 = 90% confidence). Default is 0.
return_samples : boolean
If true (default is false), then return entire random samples within error arrays.
Returns
-------
R : float, numpy.ndarray, or pandas Series
calcite dissolution rate in mm/yr
R_err : float, numpy.ndarray, or pandas Series
error in dissolution rate (returned with R if keyword error=True)
"""
#Function for Monte Carlo error estimation
def err_est(Ca,PCO2,T_C,Ca_err,PCO2_err):
rate_sample = np.zeros(error_num)
Ca_factor = 1. + Ca_err*np.random.randn(error_num)
Ca_sample = Ca*Ca_factor
PCO2_factor = 1. + PCO2_err*np.random.randn(error_num)
PCO2_sample = PCO2*PCO2_factor
for j in np.arange(error_num):
# print j, Ca_sample[j], PCO2_sample[j]
#Create solution object
found = False
while not found:
rand_sol = solutionFromCaPCO2(Ca_sample[j], PCO2_sample[j], T_C=T_C, per_tol=per_tol)
if type(rand_sol) != type(None):
found=True
else:
new_Ca_factor = 1. + Ca_err*np.random.randn(1)
Ca_sample[j] = Ca*new_Ca_factor
print('Solution did not converge for this error calculation.')
print('Ca_sample=',Ca_sample[j], ' PCO2_sample=',PCO2_sample[j])
#Calculate dissolution rate
if method=='PWP':
rate_sample[j] = pwp_to_mm_yr(pwpFromSolution(rand_sol, PCO2=PCO2_sample[j]), rho=rho)
elif method=='Palmer':
rate_sample[j] = palmerFromSolution(rand_sol, PCO2_sample[j], rho=rho, impure=impure)
else:
print( "Invalid method keyword!")
return None
if return_samples:
return rate_sample
if confidence==0:
#Estimated error is standard deviation from random sample
return np.std(rate_sample)
else:
#Error estimated using confidence intervals
lower = np.percentile(rate_sample, 100.-confidence)
upper = np.percentile(rate_sample, confidence)
return [upper, lower]
if not molL:
#convert Ca units to mol/L
Ca = mgL_to_molL(Ca, 'Ca')
is_series = (type(Ca)==pandas.core.series.Series)
if (type(Ca)==np.ndarray) or is_series:
rate_arr = np.empty(np.size(Ca), dtype=float)
if error:
if return_samples:
err_arr = np.empty((np.size(Ca),error_num), dtype=float)
elif confidence == 0:
err_arr = np.empty(np.size(Ca), dtype=float)
else:
err_arr = np.empty((np.size(Ca),2), dtype=float)
for i, this_Ca in enumerate(Ca):
if (i % 100)==0:
print( "Solution number "+str(i))
#print( "Ca = ", this_Ca, " CO2 = ", PCO2[i], ' Ca_err = ', Ca_err[i], ' CO2_err = ', PCO2_err)
#Create solution object
if np.size(T_C)==1:
sol = solutionFromCaPCO2(this_Ca, PCO2[i], T_C=T_C, per_tol=per_tol)
else:
sol = solutionFromCaPCO2(this_Ca, PCO2[i], T_C=T_C[i], per_tol=per_tol)
#Calculate dissolution rate
if method=='PWP':
rate_arr[i] = pwp_to_mm_yr(pwpFromSolution(sol, PCO2=PCO2[i]), rho=rho)
elif method=='Palmer':
rate_arr[i] = palmerFromSolution(sol, PCO2[i],rho=rho,impure=impure)
else:
print("Invalid method keyword!")
return None
#Monte Carlo error estimate on rate
if error:
if np.size(T_C)==1:
if confidence == 0 and not return_samples:
err_arr[i] = err_est(this_Ca, PCO2[i], T_C, Ca_err, PCO2_err)
else:
err_arr[i,:] = err_est(this_Ca, PCO2[i], T_C, Ca_err, PCO2_err)
else:
if np.size(Ca_err)==1:
this_Ca_err = Ca_err
else:
this_Ca_err = Ca_err[i]
if np.size(PCO2_err)==1:
this_PCO2_err = PCO2_err
else:
this_PCO2_err = PCO2_err[i]
if confidence == 0 and not return_samples:
err_arr[i] = err_est(this_Ca, PCO2[i], T_C[i], this_Ca_err, this_PCO2_err)
else:
err_arr[i,:] = err_est(this_Ca, PCO2[i], T_C[i], this_Ca_err, this_PCO2_err)
if is_series:
rate_arr =
|
pandas.Series(rate_arr, index=Ca.index)
|
pandas.Series
|
import json
import pandas as pd
import os
import fiona
import geopandas as gpd
import numpy as np
from copy import deepcopy
from pathlib import Path
from flatten_dict import flatten
from poi_conflation_tool import POIConflationTool
# load config file
with open(Path(os.path.dirname(os.path.realpath(__file__)), '../config.json')) as f:
config = json.load(f)
class DataProcessor:
"""
Perform processing on the HVP dataset by combining verified stop information, data from
operational survey and vehicle information.
"""
def __init__(self):
"""
Initialises the class object with an empty dataframe to store the combined data for each batch.
"""
self.combined_trip_data = pd.DataFrame()
self.combined_stop_data = gpd.GeoDataFrame()
self.conflation_tool = POIConflationTool()
print('Loading vehicle type, place type, land use, and activity type mapping data...')
vehicletype_mapping = pd.read_excel(os.path.join(os.path.dirname(__file__), config['vehicletype_mapping']))
self.vehicletype_mapping = dict(zip(vehicletype_mapping['OriginalVehicleType'],
vehicletype_mapping['MappedVehicleType']))
placetype_mapping = pd.read_excel(os.path.join(os.path.dirname(__file__), config['placetype_mapping']))
self.placetype_mapping = dict(zip(placetype_mapping['OriginalPlaceType'],
placetype_mapping['NewPlaceType']))
landusetype_mapping = pd.read_excel(os.path.join(os.path.dirname(__file__), config['landusetype_mapping']))
self.landusetype_mapping = dict(zip(landusetype_mapping['OriginalLandUseType'],
landusetype_mapping['MappedLandUseType']))
activitytype_mapping = pd.read_excel(os.path.join(os.path.dirname(__file__), config['activitytype_mapping']))
self.activitytype_mapping = dict(zip(activitytype_mapping['OriginalActivityType'],
activitytype_mapping['MappedActivityType']))
print('Loading SLA land use data...')
self.landuse_data = self._load_landuse_data()
def load_batch_data(self, batch_num):
"""
Loads the batch stop data from local directory.
Return:
batch_data: geopandas.GeoDataFrame
Contains the processed stop data for a particular batch.
"""
batch_data = gpd.read_file(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] +
'batch_stop_data_{}.shp'.format(batch_num)),
encoding='utf-8')
self.combined_stop_data = pd.concat([self.combined_stop_data, batch_data], ignore_index=True)
return batch_data
def load_combined_data(self):
"""
Loads the combined stop data of all batches from local directory.
Return:
combined_stop_data: geopandas.GeoDataFrame
Contains the processed stop data for all batches.
"""
self.combined_stop_data = gpd.read_file(os.path.join(os.path.dirname(__file__),
config['processed_data_directory'] +
'combined_stop_data.shp'),
encoding='utf-8')
return self.combined_stop_data
def _vehicle_type_mapping(self, vehicle_type):
"""
Performs a vehicle type mapping to merge similar vehicle types together.
Parameters:
vehicle_type: str
Contains the original vehicle type.
Return:
self.vehicle_mapping[vehicle_type]: str
Contains the mapped vehicle type. Returns "Unknown" if vehicle_type is None.
"""
if (vehicle_type is None) or (vehicle_type == 'Nil') or (vehicle_type == ''):
return "Unknown"
if vehicle_type in self.vehicletype_mapping:
return self.vehicletype_mapping[vehicle_type]
else:
return "Unknown"
def _load_verified_trips(self, batch_num):
"""
Loads the verified trips data for a particular batch and removes the irrelevant columns.
Parameters:
batch_num: int
Contains the batch number.
Return:
verified_trips: pandas.DataFrame
Contains the verified trips information for a particular batch.
"""
with open(os.path.join(os.path.dirname(__file__),
config['verified_stop_directory'].format(batch_num=batch_num))) as f:
verified_trips = json.load(f)
verified_trips = pd.json_normalize(verified_trips)
# filter important features
retained_columns = ['DriverID', 'VehicleType', 'Stops', 'Travels', 'YMD', 'Timeline', 'DayOfWeekStr']
verified_trips = verified_trips[retained_columns]
# perform mapping for vehicle type information
verified_trips['VehicleType'] = verified_trips['VehicleType'].apply(self._vehicle_type_mapping)
return verified_trips
def _load_operation_survey(self, batch_num):
"""
Loads the operation survey for a particular batch and removes the irrelevant columns.
Parameters:
batch_num: int
Contains the batch number.
Return:
operation_data: pandas.DataFrame
Contains the operation survey data for a particular batch.
"""
# load operational survey
with open(os.path.join(os.path.dirname(__file__),
config['operation_survey_directory'].format(batch_num=batch_num))) as f:
operation_data = json.load(f)
operation_data = pd.json_normalize(operation_data)
# filter important features
important_features = ['Commodity', 'SpecialCargo', 'Company.Type', 'Industry',
'Driver.ID']
retained_columns = [column
for column in operation_data.columns
for feature in important_features
if feature in column]
retained_columns.remove('Commodity.OtherStr')
operation_data = operation_data[retained_columns]
return operation_data
def _generate_trip_id(self, verified_trips, batch_num):
"""
Assigns a unique ID to each trip that contains the batch number as well.
Parameters:
verified_trips: pandas.DataFrame
Contains the trip information for a particular batch.
batch_num: int
Contains the batch number.
Return:
verified_trips: pandas.DataFrame
Contains the trip information for a particular batch with unique ID for each trip.
"""
verified_trips = verified_trips.rename_axis('TripID').reset_index()
verified_trips['TripID'] = 'B{}_'.format(batch_num) + verified_trips['TripID'].astype(str)
return verified_trips
def _process_timeline(self, timeline):
"""
Process the timeline information of a particular trip to extract the stop information.
Parameters:
timeline: list of dictionaries
Contains the stops made during a particular trip.
Return:
stops_df: pandas.DataFrame
Contains the stops made during a particular trip, concatenated and formatted as a single Dataframe.
"""
timeline_list = []
for i in range(len(timeline)):
for j in range(len(timeline.loc[i, 'Timeline'])):
stop_dict = flatten(timeline.loc[i, 'Timeline'][j], reducer='dot')
stop_dict['TripID'] = timeline.loc[i, 'TripID']
timeline_list.append(stop_dict)
# filter out stops and travel
timeline_df = pd.DataFrame(timeline_list)
stops_df = timeline_df[timeline_df['Type'] == 'Stop'].reset_index(drop=True)
# drop redundant columns
stops_df.rename(columns={'ID': 'StopID'}, inplace=True)
interested_columns = ['Attribute.PlaceType.', 'Attribute.Address', 'Attribute.StopLon', 'Attribute.StopLat',
'Attribute.Activity.', 'StartTime', 'EndTime', 'Duration', 'StopID', 'TripID']
retained_columns = [column
for column in stops_df.columns
for interested_column in interested_columns
if interested_column in column]
retained_columns.remove('Attribute.PlaceType.Applicable')
retained_columns.remove('Attribute.Activity.OtherStr')
stops_df = stops_df[retained_columns]
# remove 'Attribute_' from column name
stops_df.columns = [col_name.replace('Attribute.', '') for col_name in stops_df.columns]
return stops_df
def _activity_type_mapping(self, verified_stops):
"""
Performs an activity type mapping to merge similar activity types together.
Parameters:
verified_stops: pd.DataFrame
Contains the verified stops information with original activity types.
Return:
verified_stops: pd.DataFrame
Contains the verified stops information with the newly mapped activity types.
"""
activity_types = ['DeliverCargo', 'PickupCargo', 'Other', 'Shift', 'ProvideService',
'OtherWork', 'Meal', 'DropoffTrailer', 'PickupTrailer', 'Fueling',
'Personal', 'Passenger', 'Resting', 'Queuing', 'DropoffContainer',
'PickupContainer', 'Fail', 'Maintenance']
for activity in activity_types:
if 'MappedActivity.{}'.format(self.activitytype_mapping[activity]) not in verified_stops.columns:
verified_stops['MappedActivity.{}'.format(self.activitytype_mapping[activity])] = deepcopy(
verified_stops['Activity.{}'.format(activity)]
)
else:
verified_stops['MappedActivity.{}'.format(self.activitytype_mapping[activity])] = \
verified_stops['MappedActivity.{}'.format(self.activitytype_mapping[activity])] + \
verified_stops['Activity.{}'.format(activity)]
idx = verified_stops[verified_stops['MappedActivity.{}'.format(
self.activitytype_mapping[activity])] > 0].index.tolist()
verified_stops.loc[idx, 'MappedActivity.{}'.format(self.activitytype_mapping[activity])] = 1
return verified_stops
def _extract_verified_stops(self, verified_trips, batch_num):
"""
Extracts the verified stop information based on the verified trips.
Parameters:
verified_trips: pandas.DataFrame
Contains the verified trip information for a particular batch.
batch_num: int
Contains the batch number.
Return:
verified_stops: pandas.DataFrame
Contains the verified stop information for a particular batch.
"""
# extract stop information and frequent places
verified_trips = self._generate_trip_id(verified_trips, batch_num)
timeline = verified_trips[['Timeline', 'TripID']]
other_trip_info = verified_trips.drop(columns=['Timeline'])
timeline_info = self._process_timeline(timeline)
# merge with other trip information
verified_stops = timeline_info.merge(other_trip_info, how='left', on='TripID')
# extract stop start time
verified_stops['StartHour'] = verified_stops['StartTime'].apply(lambda x: int(x.split(' ')[1].split('-')[0]))
# perform mapping of activity types
verified_stops = self._activity_type_mapping(verified_stops)
return verified_stops
def _remove_bus_data(self, trip_data):
"""
Removes all trip and stop data collected for buses.
Parameters:
trip_data: pandas.DataFrame
Contains the trip data for a particular batch.
Return:
filtered_trip_data: pandas.DataFrame
Contains the filtered trip data for a particular batch without any bus-related trips.
"""
filtered_trip_data = trip_data[trip_data['VehicleType'] != 'Bus']
return filtered_trip_data
def _landuse_type_mapping(self, landuse_type):
"""
Performs a land use type mapping to merge similar land use types together.
Parameters:
landuse_type: str
Contains the original landuse type from URA.
Return:
self.landuse_mapping[landuse_type]: str
Contains the mapped landuse type.
"""
if (landuse_type is None) or (landuse_type == 'Nil') or (landuse_type == '') or \
(landuse_type not in self.landusetype_mapping):
raise ValueError('Land use type {} is invalid'.format(landuse_type))
else:
return self.landusetype_mapping[landuse_type]
def _load_landuse_data(self):
""""
Loads the URA 2019 land use data.
Return:
landuse_data: pd.DataFrame
Contains the land use information from URA.
"""
fiona.drvsupport.supported_drivers['KML'] = 'rw'
landuse_data = gpd.read_file(os.path.join(os.path.dirname(__file__), config['ura_landuse']),
driver='KML')
landuse_data['LandUseType'] = landuse_data['Description'].apply(lambda x:
|
pd.read_html(x)
|
pandas.read_html
|
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training history."""
from functools import reduce
from typing import Dict, List, Tuple
import logging
import json
import pandas as pd
from flwr.common.typing import Scalar
class History:
"""History class for training and/or evaluation metrics collection."""
def __init__(self) -> None:
self.losses_distributed: List[Tuple[int, float]] = []
self.losses_centralized: List[Tuple[int, float]] = []
self.metrics_distributed: Dict[str, List[Tuple[int, Scalar]]] = {}
self.metrics_centralized: Dict[str, List[Tuple[int, Scalar]]] = {}
def add_loss_distributed(self, rnd: int, loss: float) -> None:
"""Add one loss entry (from distributed evaluation)."""
self.losses_distributed.append((rnd, loss))
def add_loss_centralized(self, rnd: int, loss: float) -> None:
"""Add one loss entry (from centralized evaluation)."""
self.losses_centralized.append((rnd, loss))
def add_metrics_distributed(self, rnd: int, metrics: Dict[str, Scalar]) -> None:
"""Add metrics entries (from distributed evaluation)."""
logging.debug("add_metrics_distributed function")
logging.debug("PRINTING METRICS:")
logging.debug(metrics)
logging.debug("DONE PRINTING METRICS")
for key in metrics:
logging.debug("KEY:")
logging.debug(key)
# if not (isinstance(metrics[key], float) or isinstance(metrics[key], int)):
# continue # ignore non-numeric key/value pairs
if key not in self.metrics_distributed:
logging.debug("KEY NOT IN SELF.METRICS")
self.metrics_distributed[key] = []
logging.debug("GOING TO APPEND")
self.metrics_distributed[key].append((rnd, metrics[key]))
logging.debug("APPENDED")
# Dictionary to DataFrame
logging.debug("Metrics distributed Dict to DataFrame")
metrics = {k:[v] for k,v in metrics.items()} # WORKAROUND
metrics_df = pd.DataFrame(metrics)
filename = 'Round_' + str(rnd) + '_distributed_metrics.csv'
metrics_df.to_csv(filename)
def add_metrics_centralized(self, rnd: int, metrics: Dict[str, Scalar]) -> None:
"""Add metrics entries (from centralized evaluation)."""
for key in metrics:
# if not (isinstance(metrics[key], float) or isinstance(metrics[key], int)):
# continue # ignore non-numeric key/value pairs
if key not in self.metrics_centralized:
self.metrics_centralized[key] = []
self.metrics_centralized[key].append((rnd, metrics[key]))
# Dictionary to DataFrame
logging.debug("Metrics cenrtralized Dict to DataFrame")
metrics = {k:[v] for k,v in metrics.items()} # WORKAROUND
metrics_df =
|
pd.DataFrame(metrics)
|
pandas.DataFrame
|
"""
Created on Fri Feb 17 15:26:03 2017
@author: hum094
"""
"""
Functions related to the loading and processing of CCNC data from DMT
version: 1.1
date: 2017-03-23
Search for "xkcd" to find sections of the code that need attention
Things to do:
- simplify the execution functions (breaking up into readable chunks)
- write netcdf writing function
- write csv writing function
- ccnc status window
KJ - unused functions are marked. Removed references to RVI_Underway.
"""
import os
import sys
import pandas as pd
import numpy as np
import glob
import pickle
import importlib.util
import datetime
import argparse
import scipy
from itertools import compress
from scipy.interpolate import interp1d
# import libraries for chart
import tkinter as tk
from tkinter import ttk
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import atmoscripts
pd.set_option('io.hdf.default_format', 'table')
def main():
'''
Collection of scripts to concatenate, QA/QC and perform flow calibrations
on raw data coming from the CCNC-100 instrument made by Droplet Measurement
Technologies.
Usage:
python CCNC.py raw_path output_path output_filetype output_time_resolution,
filterBool, flow_cal_file
where:
raw_path (str) - path where raw data files exist
output_path (str) - path where output data files are written
output_filetype (str) - either 'hdf', 'h5' or 'netcdf'
output_time_resolution (str) - resolution of output data. Must be in
the form '--#U' where # is a numeral and U is replaced with either
"S" for seconds, "M" for minutes, "H" for hours, or "D" for days
filterBool (bool) - apply filtering or just concatenate raw data
flow_cal_file (str) - file containing datetimes and flow data for flow
calibrations. This file must be in the same folder as the raw data
output_file_frequency (str) - describes how the output data is broken
up for memory management. Options are 'monthly','weekly','daily' or
'all'
'''
# If no input given, show the docstring only
if len(sys.argv[0:]) <= 1:
print(main.__doc__)
return
parser = argparse.ArgumentParser()
parser.add_argument("raw_path", help="path where raw data exists")
parser.add_argument("-o", "--output_path", help="path where output data\
files are written")
parser.add_argument("-ext", "--output_file_extension", help="Extension of \
the output filetype. Options include 'hdf', \
'h5' or 'netcdf'", default='hdf')
parser.add_argument("-res", "--output_time_resolution", help="time \
resolution of output data. Default 1 second",
default='1S')
parser.add_argument("-q", "--QCdata", help="Boolean specifying whether to \
perform QC actions on data. Default false",
default=False, type=bool)
parser.add_argument("--flow_cal_file", help="file containing datetimes and\
flow data for flow calibrations. This file \
must be in the same folder as the raw data.")
parser.add_argument("--output_file_frequency", help="describes how output \
data is broken up for memory management. \
Options are 'monthly','weekly','daily' or 'all'",
default='all')
parser.add_argument("-r", "--reload_from_source", help="forces reloading\
data from source csv files rather than loading\
from concatenated data. Boolean", default=True)
parser.add_argument("--atmos_press", help="specify atmospheric pressure of \
measurement location for supersaturation correction. ",
default=1010)
parser.add_argument("--cal_press", help="specify atmospheric pressure of \
calibration location for supersaturation correction. ",
default=830)
parser.add_argument("--log_filt_file", help="file containing datetimes for\
removal. This file must be in the same folder as the \
raw data.")
args = parser.parse_args()
print(args)
# Interpret the user input arguments
ccn_raw_data_path = args.raw_path
ccn_output_data_path = args.output_path
ccn_output_filetype = args.output_file_extension
output_time_resolution = args.output_time_resolution
QC = args.QCdata
flow_cal_file = args.flow_cal_file
output_file_frequency = args.output_file_frequency
reload_from_source = args.reload_from_source
atmos_press = args.atmos_press
cal_press = args.cal_press
log_filt_file = args.log_filt_file
# Test that the inputs are the correct format
if not os.path.exists(ccn_raw_data_path):
print('Raw data path does not exist. Exiting')
return
if (ccn_output_data_path is None) or (not os.path.exists(ccn_output_data_path)):
ccn_output_data_path = create_temp_output_directory()
print('Output data path does not exist. Creating new folder in:')
print(ccn_output_data_path)
assert ccn_output_filetype.lower() in ['netcdf', 'h5', 'hdf'], \
"output filetype invalid! Please use either 'netcdf', 'h5', or 'hdf'"
if output_time_resolution not in ['--1S', '--5S', '--10S', '--15S', '--30S',
'--1M', '--2M', '--5M', '--10M', '--15M',
'--20M', '--30M', '--1H', '--2H', '--3H',
'--6H', '--8H', '--12H', '--1D']:
output_time_resolution = '--1S'
print('No valid output time resolution given, no time resampling applied')
output_time_resolution = output_time_resolution[2:] # remove the '--'
assert isinstance(QC, bool), 'QC must be a boolean value. Exiting'
if flow_cal_file is not None:
assert os.path.isfile(flow_cal_file), "Can't find flow cal file!"
# Loaded data and process
LoadAndProcess(ccn_raw_data_path,
ccn_output_data_path,
ccn_output_filename='CCN',
ccn_output_filetype=ccn_output_filetype,
output_time_resolution=output_time_resolution,
concat_file_frequency=output_file_frequency,
QC=QC,
flow_cal_file=flow_cal_file,
log_filt_file=log_filt_file,
reload_from_source=reload_from_source,
atmos_press=atmos_press,
cal_press=cal_press)
return
# end main
def LoadAndProcess(ccn_raw_path=None,
ccn_output_path=None,
ccn_output_filetype='hdf',
load_from_filetype='csv',
filename_base='CCN',
force_reload_from_source=False,
QC=False,
output_time_resolution='1S',
concat_file_frequency='all',
mask_period_file=None,
mask_period_timestamp_df=None,
flow_cal_file=None,
flow_cal_df=None,
flow_setpt=500,
flow_polyDeg=2,
calibrate_for_pressure=False,
press_cal=1010,
press_meas=1010,
split_by_supersaturation=True,
input_filelist=None,
gui_mode=False,
gui_mainloop=None):
'''
Loads CCNC data from raw csv files, concatenates, then saved to output
files of either hdf or netcdf format.
Data can then be quality controlled using parameters output by the
instrument.
If a file containing flow calibration values is provided, it will then do a
flow calibration.
If a file is provided containing logged events for filtering, it will
remove these periods
If requested, it will perform exhaust removal (assuming its on the RVI)
'''
print('ccn_raw_path is ', ccn_raw_path)
if ccn_output_path is None:
ccn_output_path = ccn_raw_path
if ccn_raw_path is None:
input_str_list = input_filelist[0].split('/')
ccn_raw_path = '/'.join(input_str_list[:-1])+'/'
if load_from_filetype == "csv":
# Concatenate csv files
concatenate_from_csv(ccn_raw_path,
ccn_output_path,
filename_base,
None, # Don't resample timebase at this point
concat_file_frequency,
ccn_output_filetype,
force_reload_from_source,
input_filelist=input_filelist,
gui_mode=gui_mode,
gui_mainloop=gui_mainloop)
raw_filelist = get_raw_filelist(ccn_output_path,
ccn_output_filetype,
substring='raw')
elif load_from_filetype in ['h5', 'hdf']:
if input_filelist is None:
raw_filelist = get_raw_filelist(ccn_raw_path,
load_from_filetype,
substring='.')
else:
raw_filelist = list(input_filelist)
for file in raw_filelist:
# Load data
if os.path.isfile(file):
ccn_data = load_ccn(ccn_raw_path,
load_from_filetype,
filepath=file)
else:
if load_from_filetype == "csv":
ccn_data = load_ccn(ccn_output_path,
ccn_output_filetype,
substring=file)
else:
ccn_data = load_ccn(ccn_raw_path,
load_from_filetype,
substring=file)
# plot_me(ccn_data, plot_each_step, 'CCN Number Conc', 'raw')
# Calculate CCN counting uncertainty
ccn_data = uncertainty_calc(ccn_data, 1, np.sqrt(ccn_data['CCN Number Conc']))
# QC data for internal parameters and for changes in SS
if QC:
ccn_data = DataQC(ccn_data)
save_as(ccn_data, ccn_output_path, 'QC', ccn_output_filetype, file)
# plot_me(ccn_data, plot_each_step,'CCN Number Conc', 'QC')
# Perform flow calibration if data is provided
if flow_cal_file is not None:
ccn_data = flow_cal(ccn_data,
flow_cal_file,
ccn_raw_path,
set_flow_rate=flow_setpt,
polydeg=flow_polyDeg)
save_as(ccn_data, ccn_output_path, 'flowCal', ccn_output_filetype, file)
# plot_me(ccn_data, plot_each_step,'CCN Number Conc','flow cal')
elif flow_cal_df is not None:
ccn_data = flow_cal(ccn_data,
measured_flows_df=flow_cal_df,
set_flow_rate=flow_setpt,
polydeg=flow_polyDeg)
save_as(ccn_data, ccn_output_path, 'flowCal', ccn_output_filetype, file)
# plot_me(ccn_data, plot_each_step,'CCN Number Conc','flow cal')
# Calibrate supersaturation
ccn_data = ss_cal(ccn_data, press_meas, press_cal)
save_as(ccn_data, ccn_output_path, 'ssCal', ccn_output_filetype, file)
# Correct for inlet losses #xkcd
# ccn_data = inlet_corrections(ccn_data, IE)
# save_as(ccn_data,ccn_output_data_path,'IE',ccn_output_filetype)
# plot_me(ccn_data, plot_each_step,'CCN Number Conc', 'IE')
# Filter for logged events
if mask_period_file is not None:
ccn_data = atmoscripts.log_filter(ccn_data, ccn_raw_path, mask_period_file)
save_as(ccn_data, ccn_output_path, 'logFilt', ccn_output_filetype, file)
# plot_me(ccn_data, plot_each_step,'CCN Number Conc','log filter')
elif mask_period_timestamp_df is not None:
ccn_data = atmoscripts.log_filter(ccn_data, log_mask_df=mask_period_timestamp_df)
save_as(ccn_data, ccn_output_path, 'logFilt', ccn_output_filetype, file)
# plot_me(ccn_data, plot_each_step,'CCN Number Conc','log filter')
# Filter for exhaust #xkcd
# save_as(ccn_data,ccn_output_path,'exhaustfilt',ccn_output_filetype)
# Separate into different supersaturations
ccn_data = ss_split(ccn_data, split_by_supersaturation)
save_as(ccn_data, ccn_output_path, 'ssSplit', ccn_output_filetype, file)
# plot_me(ccn_data, plot_each_step,None,'SS Split')
# Resample timebase and calculate uncertainties
ccn_data = timebase_resampler(ccn_data,
time_int=output_time_resolution,
split_by_supersaturation=split_by_supersaturation,
input_h5_filename=file,
output_filetype=ccn_output_filetype,
gui_mode=gui_mode,
gui_mainloop=gui_mainloop)
if os.path.isfile('netcdf_global_attributes.temp'):
os.remove('netcdf_global_attributes.temp')
return
# end LoadAndProcess
# unused
def plot_me(ccn_data, plot_each_step, var=None, title = ''):
if plot_each_step:
if var is None:
# Plot everything
plt.plot(ccn_data)
else:
plt.plot(ccn_data[var])
plt.title(title)
plt.show()
return
def get_raw_filelist(ccn_output_path, output_filetype, substring='raw'):
'''
Retrieves a list of the raw files so that processing can be done on all
of them, not just the last one.
'''
os.chdir(ccn_output_path)
flist = glob.glob('*.'+output_filetype)
raw_filelist = [f for f in flist if substring in f]
raw_filelist = [check_file(f) for f in raw_filelist]
raw_filelist = [f for f in raw_filelist if f is not None]
raw_filelist.sort()
return raw_filelist
def check_file(fname):
try:
int(fname.split('.')[0][-1])
return fname
except:
return None
###############################################################################
### File IO
###############################################################################
def load_ccn(data_path=None,
filetype=None,
substring=None,
filepath=None):
'''
Loads data from concatenated data file.
if substring is not none, I select only those files which contain the
specific subsstring in the folder. This helps deal with processing when the
data file is split into monthly, weekly or daily files.
'''
if filepath is not None:
if os.path.isfile(filepath):
fname = filepath
else:
os.chdir(data_path)
# Get most recently updated file:
filelist = glob.glob('*.'+filetype)
if substring is not None:
fname = [f for f in filelist if substring in f]
fname = fname[0]
else:
fname = min(filelist, key=os.path.getctime)
# Check that filetype is what is being asked for
ftype = fname.split('.')[1]
if ftype != filetype:
filetype = ftype
print('Load filetype coerced in load_ccn function')
if filetype in ['hdf', 'h5']:
data =
|
pd.read_hdf(fname, key='ccn')
|
pandas.read_hdf
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_WRMSSE_metric.ipynb (unless otherwise specified).
__all__ = ['get_agg', 'get_df_weights', 'combine_cols', 'append_df_unique_id', 'WRMSSE']
# Cell
#export
import os
from time import time
import gc
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.rcParams['figure.figsize'] = (14,6)
plt.rcParams['font.size'] = 16
# Cell
def get_agg(df_stv):
"""Gets a sparse aggregaion matrix and index to align weights and scales."""
# Take the transpose of each dummy matrix to correctly orient the matrix
dummy_frames = [
pd.DataFrame({'Total': np.ones((df_stv.shape[0],)).astype('int8')}, index=df_stv.index).T,
pd.get_dummies(df_stv.state_id, dtype=np.int8).T,
pd.get_dummies(df_stv.store_id, dtype=np.int8).T,
pd.get_dummies(df_stv.cat_id, dtype=np.int8).T,
pd.get_dummies(df_stv.dept_id, dtype=np.int8).T,
pd.get_dummies(df_stv.state_id + '_' + df_stv.cat_id, dtype=np.int8).T,
|
pd.get_dummies(df_stv.state_id + '_' + df_stv.dept_id, dtype=np.int8)
|
pandas.get_dummies
|
import numpy as np
import scipy.stats as sp
import os
import pandas as pd
import h5py
import bokeh.io as bkio
import bokeh.layouts as blay
import bokeh.models as bmod
import bokeh.plotting as bplt
from bokeh.palettes import Category20 as palette
from bokeh.palettes import Category20b as paletteb
import plot_results as plt_res
import frequency_analysis as fan
colrs = palette[20] + paletteb[20] + palette[20] + paletteb[20]
def save_data_to_hdf5(data_folder_path, hdf5_file_path):
d_paths = [f_file for f_file in os.listdir(data_folder_path) if f_file.endswith('axgt')]
with pd.HDFStore(hdf5_file_path) as h5file:
for d_path in d_paths:
f_path = os.path.join(data_folder_path, d_path)
d_arr = np.loadtxt(f_path, dtype={'names': ('time', 'Potential', 'Im', 'ACh'),
'formats': ('float', 'float', 'float', 'float')},
skiprows=1)
d_df = pd.DataFrame(d_arr)
d_name = d_path.split('.')[0].replace(' ', '_').replace('(', '').replace(')', '')
print(d_name)
h5file.put('{}/data'.format(d_name), d_df, format='table', data_columns=True)
print(h5file)
def plot_spike_data(in_h5_file, exclude_list=[]):
sp_fig = bplt.figure(title='Membrane Potential vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], 1000.0*h5_data[f_name]['Potential'],
line_width=3, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_selected_ach_data(in_h5_file, select_list):
sp_fig = bplt.figure(title='Acetylcholine vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name in select_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], h5_data[f_name]['ACh'],
line_width=1, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_selected_spike_data(in_h5_file, select_list):
sp_fig = bplt.figure(title='Membrane Potential vs Time')
sp_fig.xaxis.axis_label = 'time (sec)'
sp_fig.yaxis.axis_label = 'potential (mV)'
print('Plotting Potential values from {}'.format(in_h5_file))
my_lines = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'data' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name in select_list:
my_lines.append(sp_fig.line(h5_data[f_name]['time'], 1000.0*h5_data[f_name]['Potential'],
line_width=1, color=colrs[f_i])
)
legend_items.append((leg_name, [my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
sp_fig.add_layout(my_legend, 'right')
return sp_fig
def plot_spike_raster(in_h5_file, exclude_list=[]):
rast_fig = bplt.figure(title='Spike Raster vs Time')
rast_fig.xaxis.axis_label = 'time (sec)'
print('Plotting Spike Raster values from {}'.format(in_h5_file))
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 1
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_times' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
y_vals = f_i*np.ones(h5_data[f_name].shape)
if leg_name not in exclude_list:
my_circles.append(rast_fig.circle(h5_data[f_name], y_vals,
line_width=3, color=colrs[f_i-1])
)
legend_items.append((leg_name, [my_circles[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
rast_fig.add_layout(my_legend, 'right')
return rast_fig
def plot_instantaneous_spike_rate(in_h5_file, exclude_list=[], t_start=0):
isr_fig = bplt.figure(title='Instantaneous Spike Rate vs Time')
isr_fig.xaxis.axis_label = 'time (sec)'
isr_fig.yaxis.axis_label = 'spike rate (Hz)'
print('Plotting instantaneous spike rate from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
my_lines.append(isr_fig.line(h5_data[f_name]['time'], h5_data[f_name]['ISR'],
line_width=3, color=colrs[f_i])
)
my_circles.append(isr_fig.circle(h5_data[f_name]['time'], h5_data[f_name]['ISR'],
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
isr_fig.add_layout(my_legend, 'right')
isr_fig.x_range.start = t_start
return isr_fig
def plot_spike_accel(in_h5_file, exclude_list=[], normalize=False, t_start=0):
if normalize:
acc_fig = bplt.figure(title='Normalized Spike Acceleration vs Time')
else:
acc_fig = bplt.figure(title='Spike Acceleration vs Time')
acc_fig.xaxis.axis_label = 'time (sec)'
acc_fig.yaxis.axis_label = 'spike acceleration (%)'
print('Plotting spike acceleration from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
if normalize:
max_accel = np.max(h5_data[f_name]['Spike_Accel'])
my_lines.append(acc_fig.line(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel']/max_accel,
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(h5_data[f_name]['time'],
h5_data[f_name]['Spike_Accel']/max_accel,
size=6, color=colrs[f_i])
)
else:
my_lines.append(acc_fig.line(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel'],
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(h5_data[f_name]['time'], h5_data[f_name]['Spike_Accel'],
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
acc_fig.add_layout(my_legend, 'right')
acc_fig.x_range.start = t_start
return acc_fig
def plot_spike_accel_aligned(in_h5_file, exclude_list=[], normalize=False):
if normalize:
acc_fig = bplt.figure(title='Normalized Spike Acceleration vs Time')
else:
acc_fig = bplt.figure(title='Spike Acceleration vs Time')
acc_fig.xaxis.axis_label = 'time (sec)'
acc_fig.yaxis.axis_label = 'spike acceleration (%)'
print('Plotting spike acceleration from {}'.format(in_h5_file))
my_lines = []
my_circles = []
legend_items = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_time = h5_data[name + '/ach_times'][0] + 0.5
acc_spikes = h5_data[name + '/spike_times'].loc[h5_data[name+'/spike_times'] > ach_time].to_numpy()
acc_isr = 1.0 / np.diff(acc_spikes)
acc_t = acc_spikes[:-1]
sp0 = acc_spikes[0]
freq_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
freq_val = h5_data['frequency_table']['Frequency'][freq_i].values[0]
sp_accel = (acc_isr - freq_val)/freq_val*100
if normalize:
max_accel = np.max(sp_accel)
my_lines.append(
acc_fig.line(acc_t-sp0, sp_accel / max_accel,
line_width=2, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(acc_t-sp0, sp_accel / max_accel,
size=6, color=colrs[f_i])
)
else:
my_lines.append(acc_fig.line(acc_t-sp0, sp_accel,
line_width=3, color=colrs[f_i])
)
my_circles.append(acc_fig.circle(acc_t-sp0, sp_accel,
size=6, color=colrs[f_i])
)
legend_items.append((leg_name, [my_circles[-1], my_lines[-1]]))
f_i += 1
my_legend = bmod.Legend(items=legend_items, location='center')
acc_fig.add_layout(my_legend, 'right')
return acc_fig
def plot_spike_cessation(in_h5_file, exclude_list=[], add_mean=True):
cess_names = []
cess_vals = []
with pd.HDFStore(in_h5_file) as h5_data:
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name_parts = f_name.split('/')[1].split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
cess_names.append(leg_name)
cess_vals.append(1.0/np.min(h5_data[f_name]['ISR']))
if add_mean:
mean_cess = np.mean(cess_vals)
cess_vals.append(mean_cess)
all_names = cess_names
mean_name = 'Mean: {0:.2f} sec'.format(mean_cess)
all_names.append(mean_name)
else:
all_names = cess_names
cess_fig = bplt.figure(x_range=all_names, title='Duration of Spike Cessation after ACh')
cess_fig.yaxis.axis_label = 'duration (sec)'
cess_fig.vbar(x=cess_names, top=cess_vals, width=0.9, color=colrs[0])
if add_mean:
cess_fig.vbar(x=[mean_name], top=[mean_cess], width=0.9, color='red')
cess_fig.xaxis.major_label_orientation = np.pi / 2
cess_fig.y_range.start = 0.0
return cess_fig
def plot_average_ifr(in_h5_file, exclude_list=[]):
with pd.HDFStore(in_h5_file) as h5_data:
h5_df = pd.DataFrame(h5_data['frequency_table'])
h5_df = h5_df.sort_values(by=['Filename'])
sel_tab = h5_data['frequency_table'][~h5_data['frequency_table']['Legend'].isin(exclude_list)]
sel_tab.sort_values('Legend', inplace=True)
x_names = sel_tab['Legend'].tolist()
x_names.append('Average')
cess_fig = bplt.figure(x_range=x_names,
title='Average Pre-ACh Frequency and ISR')
cess_fig.vbar(x=sel_tab['Legend'],
top=sel_tab['Frequency'],
width=0.9, color='blue', alpha=0.6, legend='Frequency')
cess_fig.vbar(x=sel_tab['Legend'],
top=sel_tab['ISR_Mean'],
width=0.6, color='red', alpha=0.6, legend='ISR')
mean_isr = np.mean(sel_tab['ISR_Mean'])
mean_freq = np.mean(sel_tab['Frequency'])
cess_fig.vbar(x=['Average'], top=[mean_freq], width=0.9, color='navy', alpha=0.6)
cess_fig.vbar(x=['Average'], top=[mean_isr], width=0.6, color='maroon', alpha=0.6)
cess_fig.xaxis.major_label_orientation = np.pi / 2
cess_fig.yaxis.axis_label = 'frequency (Hz)'
cess_fig.y_range.start = 0.0
cess_fig.legend.location = 'top_right'
return cess_fig
def plot_average_curve(in_h5_file, time_start=8.5, time_bin_size=0.1, exclude_list=[], spike_acceleration=False,
return_curve=False):
long_time = 0
with pd.HDFStore(in_h5_file) as h5_data:
name_sort = list(h5_data.keys())
name_sort.sort()
# get longest recorded time
for f_name in name_sort:
if 'data' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
e_time = np.max(h5_data[f_name]['time'])
if e_time > long_time:
long_time = e_time
# make array of time bins
t_bins = np.arange(time_start, long_time+time_bin_size, time_bin_size)
isr_avg = np.zeros((t_bins.size - 1,))
acc_avg = np.zeros((t_bins.size - 1,))
c_count = np.zeros((t_bins.size - 1,))
for f_name in name_sort:
if 'spike_times' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
acc_spikes = h5_data[name + '/spike_times'].loc[h5_data[name + '/spike_times'] > time_start].to_numpy()
acc_isrs = 1.0 / np.diff(acc_spikes)
acc_t = acc_spikes[:-1]
freq_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
freq_val = h5_data['frequency_table']['Frequency'][freq_i].values[0]
sp_accels = (acc_isrs - freq_val) / freq_val * 100
sp_is = np.digitize(acc_t, t_bins)
for sp_i, sp_acc, sp_isr in zip(sp_is, sp_accels, acc_isrs):
isr_avg[sp_i] += sp_isr
acc_avg[sp_i] += sp_acc
c_count[sp_i] += 1
isr_avg = np.divide(isr_avg, c_count, where=np.greater(c_count, 0))
acc_avg = np.divide(acc_avg, c_count, where=np.greater(c_count, 0))
if spike_acceleration:
avg_fig = bplt.figure(title='Average Acceleration Versus Time')
avg_fig.yaxis.axis_label = 'spike acceleration (%)'
avg_fig.line(t_bins[:-1], acc_avg, line_width=3, color=colrs[0])
avg_fig.circle(t_bins[:-1], acc_avg, size=12, color=colrs[0])
else:
avg_fig = bplt.figure(title='Average Instantaneous Spike Rate Versus Time')
avg_fig.yaxis.axis_label = 'ISR (Hz)'
avg_fig.line(t_bins[:-1], isr_avg, line_width=3, color=colrs[0])
avg_fig.circle(t_bins[:-1], isr_avg, size=12, color=colrs[0])
avg_fig.xaxis.axis_label = 'time (sec)'
if return_curve:
if spike_acceleration:
return avg_fig, t_bins[:-1], acc_avg
else:
return avg_fig, t_bins[:-1], isr_avg
else:
return avg_fig
def plot_spike_cessation_vs_isr_variance(in_h5_file, exclude_list=[]):
cess_names = []
cess_vals = []
ifr_vars = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
cess_names.append(name)
cess_vals.append(1.0 / np.min(h5_data[f_name]['ISR']))
c_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
ifr_vars.append(h5_data['frequency_table']['ISR_Var'][c_i].values[0])
cess_fig = bplt.figure(title='Spike Cessation vs ISR Variance')
cess_fig.circle(cess_vals, ifr_vars, size=12, color=colrs[0])
cess_fig.xaxis.axis_label = 'duration of spike cessation (sec)'
cess_fig.yaxis.axis_label = 'variance of ISR (Hz)'
return cess_fig
def plot_peak_acceleration_vs_spike_cessation(in_h5_file, exclude_list=[]):
fail_acc = []
fail_cess = []
fail_names = []
succ_acc = []
succ_cess = []
succ_names = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_name = name + '/ach_times'
ach_start = h5_data[ach_name][0]
cess_val = 1.0 / np.min(h5_data[f_name]['ISR'])
acc_i = np.where(h5_data[f_name]['time'] < ach_start)
max_acc_pre = np.max(h5_data[f_name].loc[h5_data[f_name]['time'] < ach_start, 'Spike_Accel'].tolist())
max_acc = np.max(h5_data[f_name]['Spike_Accel'])
if max_acc <= 1.1*max_acc_pre:
fail_acc.append(max_acc)
fail_cess.append(cess_val)
fail_names.append(leg_name)
else:
succ_acc.append(max_acc)
succ_cess.append(cess_val)
succ_names.append(leg_name)
acc_fig = bplt.figure(title='Peak Spike Acceleration vs Duration of Spike Cessation')
acc_fig.circle(fail_cess, fail_acc, size=12, color='red', legend='no acceleration')
acc_fig.circle(succ_cess, succ_acc, size=12, color='green', legend='acceleration')
acc_fig.xaxis.axis_label = 'duration of spike cessation (sec)'
acc_fig.yaxis.axis_label = 'peak acceleration (%)'
print('Failed to Demonstrate Spike Acceleration')
print(fail_names)
print('Demonstrated at least 10% increase in ISR')
print(succ_names)
return acc_fig
def plot_peak_acceleration_vs_isr_variance(in_h5_file, exclude_list=[]):
acc_vals = []
var_vals = []
names = []
with pd.HDFStore(in_h5_file) as h5_data:
f_i = 0
name_sort = list(h5_data.keys())
name_sort.sort()
for f_name in name_sort:
if 'spike_rates' in f_name:
name = f_name.split('/')[1]
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
if leg_name not in exclude_list:
ach_name = name + '/ach_times'
ach_start = h5_data[ach_name][0]
c_i = h5_data['frequency_table'].index[h5_data['frequency_table']['Filename'] == name]
var_vals.append(h5_data['frequency_table']['ISR_Var'][c_i].values[0])
max_acc = np.max(h5_data[f_name]['Spike_Accel'])
acc_vals.append(max_acc)
names.append(leg_name)
acc_fig = bplt.figure(title='Peak Spike Acceleration vs ISR Variance')
acc_fig.circle(var_vals, acc_vals, size=12, color=colrs[0])
acc_fig.xaxis.axis_label = 'variance of ISR (Hz)'
acc_fig.yaxis.axis_label = 'peak acceleration (%)'
return acc_fig
def print_average_table(in_h5_file):
with pd.HDFStore(in_h5_file) as h5_data:
h5_df = pd.DataFrame(h5_data['frequency_table'])
print(h5_df)
def analyze_spike_data_from_hdf5(in_h5_file):
avg_freqs = []
avg_isrs = []
var_isrs = []
cell_names = []
legend_names = []
with pd.HDFStore(in_h5_file) as h5_data:
for f_i, f_name in enumerate(h5_data.keys()):
if '/data' in f_name:
print(f_name)
name = f_name.split('/')[1]
sp_name = '{}/spike_times'.format(name)
isr_name = '{}/spike_rates'.format(name)
ach_name = '{}/ach_times'.format(name)
name_parts = name.split('_')
leg_name = ' '.join(name_parts[:name_parts.index('CA1')])
# Calculate ACh Times
ach_i = np.where(h5_data[f_name]['ACh'] > 1e-5)
ach_times = pd.Series([h5_data[f_name]['time'][ach_i[0][0]], h5_data[f_name]['time'][ach_i[0][-1]]])
h5_data.put(ach_name, ach_times)
# Get spike times
sp_times = fan.get_spike_times(h5_data[f_name]['Potential'], h5_data[f_name]['time'])
h5_data.put(sp_name,
|
pd.Series(sp_times)
|
pandas.Series
|
import collections
import logging
import numpy as np
import pandas as pd
import core.event_study as esf
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestBuildLocalTimeseries(hut.TestCase):
def test_minutely1(self) -> None:
np.random.seed(42)
n_periods = 10
freq = "T"
start_date = pd.Timestamp("2009-09-29 10:00:00")
relative_grid_indices = list(range(-10, 10)) + [14]
idx =
|
pd.date_range(start_date, periods=n_periods, freq=freq)
|
pandas.date_range
|
"""
Functions to load datasets for the traffic project
"""
import os
import pandas as pd
def load_dataset(dataset_path):
"""
Load the dataset given the dataset name. Correct formatting and calculates a congestion % = 1 - expedite %.
:param str dataset_path: Path to dataset
:return: Pandas DataFrame containing the processed data
"""
assert os.path.exists(dataset_path), 'Dataset file doesn\'t exist!'
df = pd.read_csv(dataset_path,
names=['year', 'month', 'day', 'hour', 'minute', 'exp', 'cong', 'block', 'unknown'])
date_cols = ['year', 'month', 'day', 'hour', 'minute']
df['date'] = pd.to_datetime(df[date_cols])
df = df.set_index('date').drop(date_cols, axis=1)
for col in df.columns:
df[col] = df[col].str.rstrip('%').astype('float') / 100.0 # Convert percentage into 0.xx
df = df.resample('10Min').mean().dropna()
df['congestion'] = 1 - df['exp'] # Calculate congestion ratio
return df
def load_rain_data(dataset_path, table_index):
"""
Read rainfall dataset.
:paran str dataset_folder: Folder containing dataset
:param int table_index: Index of the table to read from
:return: Pandas DataFrame containing rainfall data
"""
import datetime
assert os.path.exists(dataset_path), 'Dataset file doesn\'t exist!'
translate_dict = {'区站号(字符)': 'station', '年(年)': 'year', '月(月)': 'month', '日(日)': 'day',
'时(时)': 'hour', '过去1小时降水量(毫米)': 'rain'}
df = pd.read_excel(pd.ExcelFile(dataset_path), table_index)
df = df.rename(translate_dict, axis=1).drop('station', axis=1)
df.index =
|
pd.to_datetime(df[['year', 'month', 'day', 'hour']])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import seaborn as sns
#Carichiamo il dataset e le colonne
df = pd.read_csv('data/wine.csv')
df.columns = ['fixed acidity','volatile acidity','citric acid','residual sugar','chlorides','free sulfur dioxide','total sulfur dioxide','density','pH','sulphates','alcohol','quality']
print(df.head(5))
#Definiamo un oggetto Principal Component Analysis
pca = PCA(n_components=4)
#Standardizziamo il dataset
df_scaled = StandardScaler().fit_transform(df)
#Otteniamo le principal components
pmod = pca.fit_transform(df_scaled)
df_final =
|
pd.DataFrame(pca.components_, columns=df.columns)
|
pandas.DataFrame
|
from datetime import datetime
from decimal import Decimal
from io import StringIO
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv
import pandas._testing as tm
from pandas.core.base import SpecificationError
import pandas.core.common as com
def test_repr():
# GH18203
result = repr(pd.Grouper(key="A", level="B"))
expected = "Grouper(key='A', level='B', axis=0, sort=False)"
assert result == expected
@pytest.mark.parametrize("dtype", ["int64", "int32", "float64", "float32"])
def test_basic(dtype):
data = Series(np.arange(9) // 3, index=np.arange(9), dtype=dtype)
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
for k, v in grouped:
assert len(v) == 3
agged = grouped.aggregate(np.mean)
assert agged[1] == 1
tm.assert_series_equal(agged, grouped.agg(np.mean)) # shorthand
tm.assert_series_equal(agged, grouped.mean())
tm.assert_series_equal(grouped.agg(np.sum), grouped.sum())
expected = grouped.apply(lambda x: x * x.sum())
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
tm.assert_series_equal(transformed, expected)
value_grouped = data.groupby(data)
tm.assert_series_equal(
value_grouped.aggregate(np.mean), agged, check_index_type=False
)
# complex agg
agged = grouped.aggregate([np.mean, np.std])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate({"one": np.mean, "two": np.std})
group_constants = {0: 10, 1: 20, 2: 30}
agged = grouped.agg(lambda x: group_constants[x.name] + x.mean())
assert agged[1] == 21
# corner cases
msg = "Must produce aggregated value"
# exception raised is type Exception
with pytest.raises(Exception, match=msg):
grouped.aggregate(lambda x: x * 2)
def test_groupby_nonobject_dtype(mframe, df_mixed_floats):
key = mframe.index.codes[0]
grouped = mframe.groupby(key)
result = grouped.sum()
expected = mframe.groupby(key.astype("O")).sum()
tm.assert_frame_equal(result, expected)
# GH 3911, mixed frame non-conversion
df = df_mixed_floats.copy()
df["value"] = range(len(df))
def max_value(group):
return group.loc[group["value"].idxmax()]
applied = df.groupby("A").apply(max_value)
result = applied.dtypes
expected = Series(
[np.dtype("object")] * 2 + [np.dtype("float64")] * 2 + [np.dtype("int64")],
index=["A", "B", "C", "D", "value"],
)
tm.assert_series_equal(result, expected)
def test_groupby_return_type():
# GH2893, return a reduced type
df1 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 2, "val2": 27},
{"val1": 2, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df1.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
df2 = DataFrame(
[
{"val1": 1, "val2": 20},
{"val1": 1, "val2": 19},
{"val1": 1, "val2": 27},
{"val1": 1, "val2": 12},
]
)
def func(dataf):
return dataf["val2"] - dataf["val2"].mean()
with tm.assert_produces_warning(FutureWarning):
result = df2.groupby("val1", squeeze=True).apply(func)
assert isinstance(result, Series)
# GH3596, return a consistent type (regression in 0.11 from 0.10.1)
df = DataFrame([[1, 1], [1, 1]], columns=["X", "Y"])
with tm.assert_produces_warning(FutureWarning):
result = df.groupby("X", squeeze=False).count()
assert isinstance(result, DataFrame)
def test_inconsistent_return_type():
# GH5592
# inconsistent return type
df = DataFrame(
dict(
A=["Tiger", "Tiger", "Tiger", "Lamb", "Lamb", "Pony", "Pony"],
B=Series(np.arange(7), dtype="int64"),
C=date_range("20130101", periods=7),
)
)
def f(grp):
return grp.iloc[0]
expected = df.groupby("A").first()[["B"]]
result = df.groupby("A").apply(f)[["B"]]
tm.assert_frame_equal(result, expected)
def f(grp):
if grp.name == "Tiger":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Tiger"] = np.nan
tm.assert_frame_equal(result, e)
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["B"]]
e = expected.copy()
e.loc["Pony"] = np.nan
tm.assert_frame_equal(result, e)
# 5592 revisited, with datetimes
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0]
result = df.groupby("A").apply(f)[["C"]]
e = df.groupby("A").first()[["C"]]
e.loc["Pony"] = pd.NaT
tm.assert_frame_equal(result, e)
# scalar outputs
def f(grp):
if grp.name == "Pony":
return None
return grp.iloc[0].loc["C"]
result = df.groupby("A").apply(f)
e = df.groupby("A").first()["C"].copy()
e.loc["Pony"] = np.nan
e.name = None
tm.assert_series_equal(result, e)
def test_pass_args_kwargs(ts, tsframe):
def f(x, q=None, axis=0):
return np.percentile(x, q, axis=axis)
g = lambda x: np.percentile(x, 80, axis=0)
# Series
ts_grouped = ts.groupby(lambda x: x.month)
agg_result = ts_grouped.agg(np.percentile, 80, axis=0)
apply_result = ts_grouped.apply(np.percentile, 80, axis=0)
trans_result = ts_grouped.transform(np.percentile, 80, axis=0)
agg_expected = ts_grouped.quantile(0.8)
trans_expected = ts_grouped.transform(g)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
agg_result = ts_grouped.agg(f, q=80)
apply_result = ts_grouped.apply(f, q=80)
trans_result = ts_grouped.transform(f, q=80)
tm.assert_series_equal(agg_result, agg_expected)
tm.assert_series_equal(apply_result, agg_expected)
tm.assert_series_equal(trans_result, trans_expected)
# DataFrame
df_grouped = tsframe.groupby(lambda x: x.month)
agg_result = df_grouped.agg(np.percentile, 80, axis=0)
apply_result = df_grouped.apply(DataFrame.quantile, 0.8)
expected = df_grouped.quantile(0.8)
tm.assert_frame_equal(apply_result, expected, check_names=False)
tm.assert_frame_equal(agg_result, expected)
agg_result = df_grouped.agg(f, q=80)
apply_result = df_grouped.apply(DataFrame.quantile, q=0.8)
tm.assert_frame_equal(agg_result, expected)
tm.assert_frame_equal(apply_result, expected, check_names=False)
def test_len():
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
assert len(grouped) == len(df)
grouped = df.groupby([lambda x: x.year, lambda x: x.month])
expected = len({(x.year, x.month) for x in df.index})
assert len(grouped) == expected
# issue 11016
df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3]))
assert len(df.groupby(("a"))) == 0
assert len(df.groupby(("b"))) == 3
assert len(df.groupby(["a", "b"])) == 3
def test_basic_regression():
# regression
result = Series([1.0 * x for x in list(range(1, 10)) * 10])
data = np.random.random(1100) * 10.0
groupings = Series(data)
grouped = result.groupby(groupings)
grouped.mean()
@pytest.mark.parametrize(
"dtype", ["float64", "float32", "int64", "int32", "int16", "int8"]
)
def test_with_na_groups(dtype):
index = Index(np.arange(10))
values = Series(np.ones(10), index, dtype=dtype)
labels = Series(
[np.nan, "foo", "bar", "bar", np.nan, np.nan, "bar", "bar", np.nan, "foo"],
index=index,
)
# this SHOULD be an int
grouped = values.groupby(labels)
agged = grouped.agg(len)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
# assert issubclass(agged.dtype.type, np.integer)
# explicitly return a float from my function
def f(x):
return float(len(x))
agged = grouped.agg(f)
expected = Series([4, 2], index=["bar", "foo"])
tm.assert_series_equal(agged, expected, check_dtype=False)
assert issubclass(agged.dtype.type, np.dtype(dtype).type)
def test_indices_concatenation_order():
# GH 2808
def f1(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(levels=[[]] * 2, codes=[[]] * 2, names=["b", "c"])
res = DataFrame(columns=["a"], index=multiindex)
return res
else:
y = y.set_index(["b", "c"])
return y
def f2(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
return DataFrame()
else:
y = y.set_index(["b", "c"])
return y
def f3(x):
y = x[(x.b % 2) == 1] ** 2
if y.empty:
multiindex = MultiIndex(
levels=[[]] * 2, codes=[[]] * 2, names=["foo", "bar"]
)
res = DataFrame(columns=["a", "b"], index=multiindex)
return res
else:
return y
df = DataFrame({"a": [1, 2, 2, 2], "b": range(4), "c": range(5, 9)})
df2 = DataFrame({"a": [3, 2, 2, 2], "b": range(4), "c": range(5, 9)})
# correct result
result1 = df.groupby("a").apply(f1)
result2 = df2.groupby("a").apply(f1)
tm.assert_frame_equal(result1, result2)
# should fail (not the same number of levels)
msg = "Cannot concat indices that do not have the same number of levels"
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f2)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f2)
# should fail (incorrect shape)
with pytest.raises(AssertionError, match=msg):
df.groupby("a").apply(f3)
with pytest.raises(AssertionError, match=msg):
df2.groupby("a").apply(f3)
def test_attr_wrapper(ts):
grouped = ts.groupby(lambda x: x.weekday())
result = grouped.std()
expected = grouped.agg(lambda x: np.std(x, ddof=1))
tm.assert_series_equal(result, expected)
# this is pretty cool
result = grouped.describe()
expected = {name: gp.describe() for name, gp in grouped}
expected = DataFrame(expected).T
tm.assert_frame_equal(result, expected)
# get attribute
result = grouped.dtype
expected = grouped.agg(lambda x: x.dtype)
# make sure raises error
msg = "'SeriesGroupBy' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
getattr(grouped, "foo")
def test_frame_groupby(tsframe):
grouped = tsframe.groupby(lambda x: x.weekday())
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == 5
assert len(aggregated.columns) == 4
# by string
tscopy = tsframe.copy()
tscopy["weekday"] = [x.weekday() for x in tscopy.index]
stragged = tscopy.groupby("weekday").aggregate(np.mean)
tm.assert_frame_equal(stragged, aggregated, check_names=False)
# transform
grouped = tsframe.head(30).groupby(lambda x: x.weekday())
transformed = grouped.transform(lambda x: x - x.mean())
assert len(transformed) == 30
assert len(transformed.columns) == 4
# transform propagate
transformed = grouped.transform(lambda x: x.mean())
for name, group in grouped:
mean = group.mean()
for idx in group.index:
tm.assert_series_equal(transformed.xs(idx), mean, check_names=False)
# iterate
for weekday, group in grouped:
assert group.index[0].weekday() == weekday
# groups / group_indices
groups = grouped.groups
indices = grouped.indices
for k, v in groups.items():
samething = tsframe.index.take(indices[k])
assert (samething == v).all()
def test_frame_groupby_columns(tsframe):
mapping = {"A": 0, "B": 0, "C": 1, "D": 1}
grouped = tsframe.groupby(mapping, axis=1)
# aggregate
aggregated = grouped.aggregate(np.mean)
assert len(aggregated) == len(tsframe)
assert len(aggregated.columns) == 2
# transform
tf = lambda x: x - x.mean()
groupedT = tsframe.T.groupby(mapping, axis=0)
tm.assert_frame_equal(groupedT.transform(tf).T, grouped.transform(tf))
# iterate
for k, v in grouped:
assert len(v.columns) == 2
def test_frame_set_name_single(df):
grouped = df.groupby("A")
result = grouped.mean()
assert result.index.name == "A"
result = df.groupby("A", as_index=False).mean()
assert result.index.name != "A"
result = grouped.agg(np.mean)
assert result.index.name == "A"
result = grouped.agg({"C": np.mean, "D": np.std})
assert result.index.name == "A"
result = grouped["C"].mean()
assert result.index.name == "A"
result = grouped["C"].agg(np.mean)
assert result.index.name == "A"
result = grouped["C"].agg([np.mean, np.std])
assert result.index.name == "A"
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"foo": np.mean, "bar": np.std})
def test_multi_func(df):
col1 = df["A"]
col2 = df["B"]
grouped = df.groupby([col1.get, col2.get])
agged = grouped.mean()
expected = df.groupby(["A", "B"]).mean()
# TODO groupby get drops names
tm.assert_frame_equal(
agged.loc[:, ["C", "D"]], expected.loc[:, ["C", "D"]], check_names=False
)
# some "groups" with no data
df = DataFrame(
{
"v1": np.random.randn(6),
"v2": np.random.randn(6),
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
},
index=["one", "two", "three", "four", "five", "six"],
)
# only verify that it works for now
grouped = df.groupby(["k1", "k2"])
grouped.agg(np.sum)
def test_multi_key_multiple_functions(df):
grouped = df.groupby(["A", "B"])["C"]
agged = grouped.agg([np.mean, np.std])
expected = DataFrame({"mean": grouped.agg(np.mean), "std": grouped.agg(np.std)})
tm.assert_frame_equal(agged, expected)
def test_frame_multi_key_function_list():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
grouped = data.groupby(["A", "B"])
funcs = [np.mean, np.std]
agged = grouped.agg(funcs)
expected = pd.concat(
[grouped["D"].agg(funcs), grouped["E"].agg(funcs), grouped["F"].agg(funcs)],
keys=["D", "E", "F"],
axis=1,
)
assert isinstance(agged.index, MultiIndex)
assert isinstance(expected.index, MultiIndex)
tm.assert_frame_equal(agged, expected)
@pytest.mark.parametrize("op", [lambda x: x.sum(), lambda x: x.mean()])
def test_groupby_multiple_columns(df, op):
data = df
grouped = data.groupby(["A", "B"])
result1 = op(grouped)
keys = []
values = []
for n1, gp1 in data.groupby("A"):
for n2, gp2 in gp1.groupby("B"):
keys.append((n1, n2))
values.append(op(gp2.loc[:, ["C", "D"]]))
mi = MultiIndex.from_tuples(keys, names=["A", "B"])
expected = pd.concat(values, axis=1).T
expected.index = mi
# a little bit crude
for col in ["C", "D"]:
result_col = op(grouped[col])
pivoted = result1[col]
exp = expected[col]
tm.assert_series_equal(result_col, exp)
tm.assert_series_equal(pivoted, exp)
# test single series works the same
result = data["C"].groupby([data["A"], data["B"]]).mean()
expected = data.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
def test_as_index_select_column():
# GH 5764
df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"])
result = df.groupby("A", as_index=False)["B"].get_group(1)
expected = pd.Series([2, 4], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby("A", as_index=False)["B"].apply(lambda x: x.cumsum())
expected = pd.Series(
[2, 6, 6], name="B", index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 2)])
)
tm.assert_series_equal(result, expected)
def test_groupby_as_index_select_column_sum_empty_df():
# GH 35246
df = DataFrame(columns=["A", "B", "C"])
left = df.groupby(by="A", as_index=False)["B"].sum()
assert type(left) is DataFrame
assert left.to_dict() == {"A": {}, "B": {}}
def test_groupby_as_index_agg(df):
grouped = df.groupby("A", as_index=False)
# single-key
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
grouped = df.groupby("A", as_index=True)
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped["C"].agg({"Q": np.sum})
# multi-key
grouped = df.groupby(["A", "B"], as_index=False)
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
result2 = grouped.agg({"C": np.mean, "D": np.sum})
expected2 = grouped.mean()
expected2["D"] = grouped.sum()["D"]
tm.assert_frame_equal(result2, expected2)
expected3 = grouped["C"].sum()
expected3 = DataFrame(expected3).rename(columns={"C": "Q"})
result3 = grouped["C"].agg({"Q": np.sum})
tm.assert_frame_equal(result3, expected3)
# GH7115 & GH8112 & GH8582
df = DataFrame(np.random.randint(0, 100, (50, 3)), columns=["jim", "joe", "jolie"])
ts = Series(np.random.randint(5, 10, 50), name="jim")
gr = df.groupby(ts)
gr.nth(0) # invokes set_selection_from_grouper internally
tm.assert_frame_equal(gr.apply(sum), df.groupby(ts).apply(sum))
for attr in ["mean", "max", "count", "idxmax", "cumsum", "all"]:
gr = df.groupby(ts, as_index=False)
left = getattr(gr, attr)()
gr = df.groupby(ts.values, as_index=True)
right = getattr(gr, attr)().reset_index(drop=True)
tm.assert_frame_equal(left, right)
def test_ops_not_as_index(reduction_func):
# GH 10355, 21090
# Using as_index=False should not modify grouped column
if reduction_func in ("corrwith",):
pytest.skip("Test not applicable")
if reduction_func in ("nth", "ngroup",):
pytest.skip("Skip until behavior is determined (GH #5755)")
df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"])
expected = getattr(df.groupby("a"), reduction_func)()
if reduction_func == "size":
expected = expected.rename("size")
expected = expected.reset_index()
g = df.groupby("a", as_index=False)
result = getattr(g, reduction_func)()
tm.assert_frame_equal(result, expected)
result = g.agg(reduction_func)
tm.assert_frame_equal(result, expected)
result = getattr(g["b"], reduction_func)()
tm.assert_frame_equal(result, expected)
result = g["b"].agg(reduction_func)
tm.assert_frame_equal(result, expected)
def test_as_index_series_return_frame(df):
grouped = df.groupby("A", as_index=False)
grouped2 = df.groupby(["A", "B"], as_index=False)
result = grouped["C"].agg(np.sum)
expected = grouped.agg(np.sum).loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].agg(np.sum)
expected2 = grouped2.agg(np.sum).loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
result = grouped["C"].sum()
expected = grouped.sum().loc[:, ["A", "C"]]
assert isinstance(result, DataFrame)
tm.assert_frame_equal(result, expected)
result2 = grouped2["C"].sum()
expected2 = grouped2.sum().loc[:, ["A", "B", "C"]]
assert isinstance(result2, DataFrame)
tm.assert_frame_equal(result2, expected2)
def test_as_index_series_column_slice_raises(df):
# GH15072
grouped = df.groupby("A", as_index=False)
msg = r"Column\(s\) C already selected"
with pytest.raises(IndexError, match=msg):
grouped["C"].__getitem__("D")
def test_groupby_as_index_cython(df):
data = df
# single-key
grouped = data.groupby("A", as_index=False)
result = grouped.mean()
expected = data.groupby(["A"]).mean()
expected.insert(0, "A", expected.index)
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
# multi-key
grouped = data.groupby(["A", "B"], as_index=False)
result = grouped.mean()
expected = data.groupby(["A", "B"]).mean()
arrays = list(zip(*expected.index.values))
expected.insert(0, "A", arrays[0])
expected.insert(1, "B", arrays[1])
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_series_scalar(df):
grouped = df.groupby(["A", "B"], as_index=False)
# GH #421
result = grouped["C"].agg(len)
expected = grouped.agg(len).loc[:, ["A", "B", "C"]]
tm.assert_frame_equal(result, expected)
def test_groupby_as_index_corner(df, ts):
msg = "as_index=False only valid with DataFrame"
with pytest.raises(TypeError, match=msg):
ts.groupby(lambda x: x.weekday(), as_index=False)
msg = "as_index=False only valid for axis=0"
with pytest.raises(ValueError, match=msg):
df.groupby(lambda x: x.lower(), as_index=False, axis=1)
def test_groupby_multiple_key(df):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day])
agged = grouped.sum()
tm.assert_almost_equal(df.values, agged.values)
grouped = df.T.groupby(
[lambda x: x.year, lambda x: x.month, lambda x: x.day], axis=1
)
agged = grouped.agg(lambda x: x.sum())
tm.assert_index_equal(agged.index, df.columns)
tm.assert_almost_equal(df.T.values, agged.values)
agged = grouped.agg(lambda x: x.sum())
tm.assert_almost_equal(df.T.values, agged.values)
def test_groupby_multi_corner(df):
# test that having an all-NA column doesn't mess you up
df = df.copy()
df["bad"] = np.nan
agged = df.groupby(["A", "B"]).mean()
expected = df.groupby(["A", "B"]).mean()
expected["bad"] = np.nan
tm.assert_frame_equal(agged, expected)
def test_omit_nuisance(df):
grouped = df.groupby("A")
result = grouped.mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
df = df.loc[:, ["A", "C", "D"]]
df["E"] = datetime.now()
grouped = df.groupby("A")
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_frame_equal(result, expected)
# won't work with axis = 1
grouped = df.groupby({"A": 0, "C": 0, "D": 1, "E": 1}, axis=1)
msg = "reduction operation 'sum' not allowed for this dtype"
with pytest.raises(TypeError, match=msg):
grouped.agg(lambda x: x.sum(0, numeric_only=False))
def test_omit_nuisance_python_multiple(three_group):
grouped = three_group.groupby(["A", "B"])
agged = grouped.agg(np.mean)
exp = grouped.mean()
tm.assert_frame_equal(agged, exp)
def test_empty_groups_corner(mframe):
# handle empty groups
df = DataFrame(
{
"k1": np.array(["b", "b", "b", "a", "a", "a"]),
"k2": np.array(["1", "1", "1", "2", "2", "2"]),
"k3": ["foo", "bar"] * 3,
"v1": np.random.randn(6),
"v2": np.random.randn(6),
}
)
grouped = df.groupby(["k1", "k2"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped = mframe[3:5].groupby(level=0)
agged = grouped.apply(lambda x: x.mean())
agged_A = grouped["A"].apply(np.mean)
tm.assert_series_equal(agged["A"], agged_A)
assert agged.index.name == "first"
def test_nonsense_func():
df = DataFrame([0])
msg = r"unsupported operand type\(s\) for \+: 'int' and 'str'"
with pytest.raises(TypeError, match=msg):
df.groupby(lambda x: x + "foo")
def test_wrap_aggregated_output_multindex(mframe):
df = mframe.T
df["baz", "two"] = "peekaboo"
keys = [np.array([0, 0, 1]), np.array([0, 0, 1])]
agged = df.groupby(keys).agg(np.mean)
assert isinstance(agged.columns, MultiIndex)
def aggfun(ser):
if ser.name == ("foo", "one"):
raise TypeError
else:
return ser.sum()
agged2 = df.groupby(keys).aggregate(aggfun)
assert len(agged2.columns) + 1 == len(df.columns)
def test_groupby_level_apply(mframe):
result = mframe.groupby(level=0).count()
assert result.index.name == "first"
result = mframe.groupby(level=1).count()
assert result.index.name == "second"
result = mframe["A"].groupby(level=0).count()
assert result.index.name == "first"
def test_groupby_level_mapper(mframe):
deleveled = mframe.reset_index()
mapper0 = {"foo": 0, "bar": 0, "baz": 1, "qux": 1}
mapper1 = {"one": 0, "two": 0, "three": 1}
result0 = mframe.groupby(mapper0, level=0).sum()
result1 = mframe.groupby(mapper1, level=1).sum()
mapped_level0 = np.array([mapper0.get(x) for x in deleveled["first"]])
mapped_level1 = np.array([mapper1.get(x) for x in deleveled["second"]])
expected0 = mframe.groupby(mapped_level0).sum()
expected1 = mframe.groupby(mapped_level1).sum()
expected0.index.name, expected1.index.name = "first", "second"
tm.assert_frame_equal(result0, expected0)
tm.assert_frame_equal(result1, expected1)
def test_groupby_level_nonmulti():
# GH 1313, GH 13901
s = Series([1, 2, 3, 10, 4, 5, 20, 6], Index([1, 2, 3, 1, 4, 5, 2, 6], name="foo"))
expected = Series([11, 22, 3, 4, 5, 6], Index(range(1, 7), name="foo"))
result = s.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[0]).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=-1).sum()
tm.assert_series_equal(result, expected)
result = s.groupby(level=[-1]).sum()
tm.assert_series_equal(result, expected)
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=1)
with pytest.raises(ValueError, match=msg):
s.groupby(level=-2)
msg = "No group keys passed!"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[])
msg = "multiple levels only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 0])
with pytest.raises(ValueError, match=msg):
s.groupby(level=[0, 1])
msg = "level > 0 or level < -1 only valid with MultiIndex"
with pytest.raises(ValueError, match=msg):
s.groupby(level=[1])
def test_groupby_complex():
# GH 12902
a = Series(data=np.arange(4) * (1 + 2j), index=[0, 0, 1, 1])
expected = Series((1 + 2j, 5 + 10j))
result = a.groupby(level=0).sum()
tm.assert_series_equal(result, expected)
result = a.sum(level=0)
tm.assert_series_equal(result, expected)
def test_groupby_series_indexed_differently():
s1 = Series(
[5.0, -9.0, 4.0, 100.0, -5.0, 55.0, 6.7],
index=Index(["a", "b", "c", "d", "e", "f", "g"]),
)
s2 = Series(
[1.0, 1.0, 4.0, 5.0, 5.0, 7.0], index=Index(["a", "b", "d", "f", "g", "h"])
)
grouped = s1.groupby(s2)
agged = grouped.mean()
exp = s1.groupby(s2.reindex(s1.index).get).mean()
tm.assert_series_equal(agged, exp)
def test_groupby_with_hier_columns():
tuples = list(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
)
index = MultiIndex.from_tuples(tuples)
columns = MultiIndex.from_tuples(
[("A", "cat"), ("B", "dog"), ("B", "cat"), ("A", "dog")]
)
df = DataFrame(np.random.randn(8, 4), index=index, columns=columns)
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).mean()
tm.assert_index_equal(result.index, df.index)
result = df.groupby(level=0).agg(np.mean)
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0).apply(lambda x: x.mean())
tm.assert_index_equal(result.columns, columns)
result = df.groupby(level=0, axis=1).agg(lambda x: x.mean(1))
tm.assert_index_equal(result.columns, Index(["A", "B"]))
tm.assert_index_equal(result.index, df.index)
# add a nuisance column
sorted_columns, _ = columns.sortlevel(0)
df["A", "foo"] = "bar"
result = df.groupby(level=0).mean()
tm.assert_index_equal(result.columns, df.columns[:-1])
def test_grouping_ndarray(df):
grouped = df.groupby(df["A"].values)
result = grouped.sum()
expected = df.groupby("A").sum()
tm.assert_frame_equal(
result, expected, check_names=False
) # Note: no names when grouping by value
def test_groupby_wrong_multi_labels():
data = """index,foo,bar,baz,spam,data
0,foo1,bar1,baz1,spam2,20
1,foo1,bar2,baz1,spam3,30
2,foo2,bar2,baz1,spam2,40
3,foo1,bar1,baz2,spam1,50
4,foo3,bar1,baz2,spam1,60"""
data = read_csv(StringIO(data), index_col=0)
grouped = data.groupby(["foo", "bar", "baz", "spam"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_groupby_series_with_name(df):
result = df.groupby(df["A"]).mean()
result2 = df.groupby(df["A"], as_index=False).mean()
assert result.index.name == "A"
assert "A" in result2
result = df.groupby([df["A"], df["B"]]).mean()
result2 = df.groupby([df["A"], df["B"]], as_index=False).mean()
assert result.index.names == ("A", "B")
assert "A" in result2
assert "B" in result2
def test_seriesgroupby_name_attr(df):
# GH 6265
result = df.groupby("A")["C"]
assert result.count().name == "C"
assert result.mean().name == "C"
testFunc = lambda x: np.sum(x) * 2
assert result.agg(testFunc).name == "C"
def test_consistency_name():
# GH 12363
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
expected = df.groupby(["A"]).B.count()
result = df.B.groupby(df.A).count()
tm.assert_series_equal(result, expected)
def test_groupby_name_propagation(df):
# GH 6124
def summarize(df, name=None):
return Series({"count": 1, "mean": 2, "omissions": 3}, name=name)
def summarize_random_name(df):
# Provide a different name for each Series. In this case, groupby
# should not attempt to propagate the Series name since they are
# inconsistent.
return Series({"count": 1, "mean": 2, "omissions": 3}, name=df.iloc[0]["A"])
metrics = df.groupby("A").apply(summarize)
assert metrics.columns.name is None
metrics = df.groupby("A").apply(summarize, "metrics")
assert metrics.columns.name == "metrics"
metrics = df.groupby("A").apply(summarize_random_name)
assert metrics.columns.name is None
def test_groupby_nonstring_columns():
df = DataFrame([np.arange(10) for x in range(10)])
grouped = df.groupby(0)
result = grouped.mean()
expected = df.groupby(df[0]).mean()
tm.assert_frame_equal(result, expected)
def test_groupby_mixed_type_columns():
# GH 13432, unorderable types in py3
df = DataFrame([[0, 1, 2]], columns=["A", "B", 0])
expected = DataFrame([[1, 2]], columns=["B", 0], index=Index([0], name="A"))
result = df.groupby("A").first()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").sum()
tm.assert_frame_equal(result, expected)
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:Mean of:RuntimeWarning")
def test_cython_grouper_series_bug_noncontig():
arr = np.empty((100, 100))
arr.fill(np.nan)
obj = Series(arr[:, 0])
inds = np.tile(range(10), 10)
result = obj.groupby(inds).agg(Series.median)
assert result.isna().all()
def test_series_grouper_noncontig_index():
index = Index(tm.rands_array(10, 100))
values = Series(np.random.randn(50), index=index[::2])
labels = np.random.randint(0, 5, 50)
# it works!
grouped = values.groupby(labels)
# accessing the index elements causes segfault
f = lambda x: len(set(map(id, x.index)))
grouped.agg(f)
def test_convert_objects_leave_decimal_alone():
s = Series(range(5))
labels = np.array(["a", "b", "c", "d", "e"], dtype="O")
def convert_fast(x):
return Decimal(str(x.mean()))
def convert_force_pure(x):
# base will be length 0
assert len(x.values.base) > 0
return Decimal(str(x.mean()))
grouped = s.groupby(labels)
result = grouped.agg(convert_fast)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
result = grouped.agg(convert_force_pure)
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_groupby_dtype_inference_empty():
# GH 6733
df = DataFrame({"x": [], "range": np.arange(0, dtype="int64")})
assert df["x"].dtype == np.float64
result = df.groupby("x").first()
exp_index = Index([], name="x", dtype=np.float64)
expected = DataFrame({"range": Series([], index=exp_index, dtype="int64")})
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_groupby_list_infer_array_like(df):
result = df.groupby(list(df["A"])).mean()
expected = df.groupby(df["A"]).mean()
tm.assert_frame_equal(result, expected, check_names=False)
with pytest.raises(KeyError, match=r"^'foo'$"):
df.groupby(list(df["A"][:-1]))
# pathological case of ambiguity
df = DataFrame({"foo": [0, 1], "bar": [3, 4], "val": np.random.randn(2)})
result = df.groupby(["foo", "bar"]).mean()
expected = df.groupby([df["foo"], df["bar"]]).mean()[["val"]]
def test_groupby_keys_same_size_as_index():
# GH 11185
freq = "s"
index = pd.date_range(
start=pd.Timestamp("2015-09-29T11:34:44-0700"), periods=2, freq=freq
)
df = pd.DataFrame([["A", 10], ["B", 15]], columns=["metric", "values"], index=index)
result = df.groupby([pd.Grouper(level=0, freq=freq), "metric"]).mean()
expected = df.set_index([df.index, "metric"])
tm.assert_frame_equal(result, expected)
def test_groupby_one_row():
# GH 11741
msg = r"^'Z'$"
df1 = pd.DataFrame(np.random.randn(1, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df1.groupby("Z")
df2 = pd.DataFrame(np.random.randn(2, 4), columns=list("ABCD"))
with pytest.raises(KeyError, match=msg):
df2.groupby("Z")
def test_groupby_nat_exclude():
# GH 6992
df = pd.DataFrame(
{
"values": np.random.randn(8),
"dt": [
np.nan,
pd.Timestamp("2013-01-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-02-01"),
np.nan,
pd.Timestamp("2013-01-01"),
],
"str": [np.nan, "a", np.nan, "a", np.nan, "a", np.nan, "b"],
}
)
grouped = df.groupby("dt")
expected = [pd.Index([1, 7]), pd.Index([3, 5])]
keys = sorted(grouped.groups.keys())
assert len(keys) == 2
for k, e in zip(keys, expected):
# grouped.groups keys are np.datetime64 with system tz
# not to be affected by tz, only compare values
tm.assert_index_equal(grouped.groups[k], e)
# confirm obj is not filtered
tm.assert_frame_equal(grouped.grouper.groupings[0].obj, df)
assert grouped.ngroups == 2
expected = {
Timestamp("2013-01-01 00:00:00"): np.array([1, 7], dtype=np.intp),
Timestamp("2013-02-01 00:00:00"): np.array([3, 5], dtype=np.intp),
}
for k in grouped.indices:
|
tm.assert_numpy_array_equal(grouped.indices[k], expected[k])
|
pandas._testing.assert_numpy_array_equal
|
import re
from datetime import date
import pandas as pd
from datautils import extend_data_range, apply_colour_map, colour_data
from test_colourutils import colour_regex
def test_extend_data_range():
s = pd.Series({
date(2021, 2, 27): 'a',
date(2021, 3, 1): 'b'
})
s = extend_data_range(s)
assert s.index.min() == date(2021, 2, 1)
assert s.index.max() == date(2021, 3, 31)
t = pd.Series({
date(2021, 1, 1): 'a',
date(2021, 1, 31): 'b'
})
t = extend_data_range(t)
assert t.index.min() == date(2021, 1, 1)
assert t.index.max() == date(2021, 1, 31)
u = pd.Series({
'2021-01-31': 'a',
'2021-03-01': 'b',
'2021-02-01': 'a',
'2021-01-15': 'b'
})
u.index = pd.to_datetime(u.index)
u = extend_data_range(u)
assert pd.to_datetime(u.index.values[0]) == date(2021, 1, 1)
assert
|
pd.to_datetime(u.index.values[-1])
|
pandas.to_datetime
|
import pickle
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.stats as scp_stats
import pandas as pd
import matplotlib
matplotlib.rcParams.update({'font.size': 15})
def box_plot_data(tot_df, label, units, type_order, type_color, y_lim_top, out_fig_name):
# Drop NaN elements.
tmp_df = tot_df[tot_df[label].notnull()]
# Arrange data into a list of numpy arrays.
type_data = []
for type_key in type_order:
type_data.append(tmp_df[tmp_df['type']==type_key][label].values)
fig, ax = plt.subplots(figsize = (7, 5))
box = ax.boxplot(type_data, patch_artist=True, sym='c.') # notch=True
for patch, color in zip(box['boxes'], [type_color[type_key] for type_key in type_order]):
patch.set_facecolor(color)
for i, type_key in enumerate(type_order):
ax.errorbar([i+1], [type_data[i].mean()], yerr=[type_data[i].std() / np.sqrt(1.0 * type_data[i].size)], marker='o', ms=8, color='k', linewidth=2, capsize=5, markeredgewidth=2, ecolor='k', elinewidth=2)
ind = np.where(type_data[i] > y_lim_top)[0]
ax.annotate(u'$\u2191$'+'\n%d/%d' % (ind.size, type_data[i].size), xy=(i+1.2, 1.0*y_lim_top), fontsize=12)
ax.set_ylim((0.0, y_lim_top))
ax.set_xticks(range(1, len(type_order)+1))
ax.set_xticklabels(type_order)
if (units == ''):
ax.set_ylabel('%s' % (label))
else:
ax.set_ylabel('%s (%s)' % (label, units))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(size=10)
plt.savefig(out_fig_name, format='eps')
plt.show()
cell_db_path = '/allen/aibs/mat/antona/network/14-simulations/9-network/analysis/'
# Decide which systems we are doing analysis for.
sys_dict = {}
# sys_dict['ll1'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': cell_db_path + 'Ori/ll1_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll1_pref_stat.csv'}
# sys_dict['ll2'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': cell_db_path + 'Ori/ll2_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll2_pref_stat.csv'}
# sys_dict['ll3'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': cell_db_path + 'Ori/ll3_rates.npy', 'f_out_pref': cell_db_path + 'Ori/ll3_pref_stat.csv'}
#sys_dict['rl1'] = { 'cells_file': '../build/rl1.csv', 'f_out': 'Ori/rl1_rates.npy', 'f_out_pref': 'Ori/rl1_pref_stat.csv'}
#sys_dict['rl2'] = { 'cells_file': '../build/rl2.csv', 'f_out': 'Ori/rl2_rates.npy', 'f_out_pref': 'Ori/rl2_pref_stat.csv'}
#sys_dict['rl3'] = { 'cells_file': '../build/rl3.csv', 'f_out': 'Ori/rl3_rates.npy', 'f_out_pref': 'Ori/rl3_pref_stat.csv'}
#sys_dict['lr1'] = { 'cells_file': '../build/lr1.csv', 'f_out': 'Ori/lr1_rates.npy', 'f_out_pref': 'Ori/lr1_pref_stat.csv'}
#sys_dict['lr2'] = { 'cells_file': '../build/lr2.csv', 'f_out': 'Ori/lr2_rates.npy', 'f_out_pref': 'Ori/lr2_pref_stat.csv'}
#sys_dict['lr3'] = { 'cells_file': '../build/lr3.csv', 'f_out': 'Ori/lr3_rates.npy', 'f_out_pref': 'Ori/lr3_pref_stat.csv'}
#sys_dict['rr1'] = { 'cells_file': '../build/rr1.csv', 'f_out': 'Ori/rr1_rates.npy', 'f_out_pref': 'Ori/rr1_pref_stat.csv'}
#sys_dict['rr2'] = { 'cells_file': '../build/rr2.csv', 'f_out': 'Ori/rr2_rates.npy', 'f_out_pref': 'Ori/rr2_pref_stat.csv'}
#sys_dict['rr3'] = { 'cells_file': '../build/rr3.csv', 'f_out': 'Ori/rr3_rates.npy', 'f_out_pref': 'Ori/rr3_pref_stat.csv'}
#sys_dict['ll2_TF4Hz'] = { 'cells_file': '../build/ll2.csv', 'f_out': 'Ori/ll2_rates_4Hz.npy', 'f_out_pref': 'Ori/ll2_pref_stat_4Hz.csv' }
# sys_dict['ll1_LIF'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll1_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll1_pref_stat.csv'}
# sys_dict['ll2_LIF'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll2_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll2_pref_stat.csv'}
# sys_dict['ll3_LIF'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': '../analysis_intFire1/analysis_ll/Ori/ll3_rates.npy', 'f_out_pref': '../analysis_intFire1/analysis_ll/Ori/ll3_pref_stat.csv'}
#sys_dict['rl1_LIF'] = { 'cells_file': '../build/rl1.csv', 'f_out': 'Ori_LIF/rl1_rates.npy', 'f_out_pref': 'Ori_LIF/rl1_pref_stat.csv'}
#sys_dict['rl2_LIF'] = { 'cells_file': '../build/rl2.csv', 'f_out': 'Ori_LIF/rl2_rates.npy', 'f_out_pref': 'Ori_LIF/rl2_pref_stat.csv'}
#sys_dict['rl3_LIF'] = { 'cells_file': '../build/rl3.csv', 'f_out': 'Ori_LIF/rl3_rates.npy', 'f_out_pref': 'Ori_LIF/rl3_pref_stat.csv'}
#sys_dict['lr1_LIF'] = { 'cells_file': '../build/lr1.csv', 'f_out': 'Ori_LIF/lr1_rates.npy', 'f_out_pref': 'Ori_LIF/lr1_pref_stat.csv'}
#sys_dict['lr2_LIF'] = { 'cells_file': '../build/lr2.csv', 'f_out': 'Ori_LIF/lr2_rates.npy', 'f_out_pref': 'Ori_LIF/lr2_pref_stat.csv'}
#sys_dict['lr3_LIF'] = { 'cells_file': '../build/lr3.csv', 'f_out': 'Ori_LIF/lr3_rates.npy', 'f_out_pref': 'Ori_LIF/lr3_pref_stat.csv'}
#sys_dict['rr1_LIF'] = { 'cells_file': '../build/rr1.csv', 'f_out': 'Ori_LIF/rr1_rates.npy', 'f_out_pref': 'Ori_LIF/rr1_pref_stat.csv'}
#sys_dict['rr2_LIF'] = { 'cells_file': '../build/rr2.csv', 'f_out': 'Ori_LIF/rr2_rates.npy', 'f_out_pref': 'Ori_LIF/rr2_pref_stat.csv'}
#sys_dict['rr3_LIF'] = { 'cells_file': '../build/rr3.csv', 'f_out': 'Ori_LIF/rr3_rates.npy', 'f_out_pref': 'Ori_LIF/rr3_pref_stat.csv'}
sys_dict['ll1_LIF'] = { 'cells_file': cell_db_path + '../build/ll1.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll1_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll1_pref_stat.csv'}
sys_dict['ll2_LIF'] = { 'cells_file': cell_db_path + '../build/ll2.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll2_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll2_pref_stat.csv'}
sys_dict['ll3_LIF'] = { 'cells_file': cell_db_path + '../build/ll3.csv', 'f_out': '../analysis_intFire4/analysis_ll/Ori/ll3_rates.npy', 'f_out_pref': '../analysis_intFire4/analysis_ll/Ori/ll3_pref_stat.csv'}
# result_fig_prefix = 'Ori/new_Ori_bio_ll'
# result_fig_prefix = 'Ori/new_Ori_lif1_ll'
result_fig_prefix = 'Ori/new_Ori_lif4_ll'
result_fig_CV_ori = result_fig_prefix + '_CV_ori.eps'
result_fig_DSI = result_fig_prefix + '_DSI.eps'
type_color = {'Scnn1a': 'darkorange', 'Rorb': 'red', 'Nr5a1': 'magenta', 'PV1': 'blue', 'PV2': 'cyan', 'AnL4E': 'gray', 'AwL4E': 'gray', 'AnI': 'gray', 'AwI': 'gray'}
type_order = ['Scnn1a', 'Rorb', 'Nr5a1', 'AnL4E', 'AwL4E', 'PV1', 'PV2', 'AnI', 'AwI']
# Read files with OSI and DSI from simulations.
sim_df = pd.DataFrame()
for sys_name in sys_dict.keys():
tmp_df = pd.read_csv(sys_dict[sys_name]['f_out_pref'], sep=' ')
cells_df = pd.read_csv(sys_dict[sys_name]['cells_file'], sep=' ')
cells_df_1 = pd.DataFrame()
cells_df_1['id'] = cells_df['index'].values
cells_df_1['type'] = cells_df['type'].values
tmp_df = pd.merge(tmp_df, cells_df_1, on='id', how='inner')
# Combine dataframes from all systems into one file.
sim_df = pd.concat([sim_df, tmp_df], axis=0)
sim_df_1 = pd.DataFrame()
sim_df_1['gid'] = sim_df['id'].values
sim_df_1['type'] = sim_df['type'].values
sim_df_1['CV_ori'] = sim_df['CV_ori'].values
sim_df_1['DSI'] = sim_df['DSI'].values
# Read file with OSI and DSI from experiments.
exp_f = { 'AnL4E': '/allen/aibs/mat/antona/experimental_data/ephys_Sev/2016_paper_data/gratings/ANL4Exc.csv',
'AwL4E': '/allen/aibs/mat/antona/experimental_data/ephys_Sev/2016_paper_data/gratings/AWL4Exc.csv',
'AnI': '/allen/aibs/mat/antona/experimental_data/ephys_Sev/2016_paper_data/gratings/ANInh.csv',
'AwI': '/allen/aibs/mat/antona/experimental_data/ephys_Sev/2016_paper_data/gratings/AWInh.csv' }
exp_df = pd.DataFrame()
for exp_key in exp_f:
tmp_df =
|
pd.read_csv(exp_f[exp_key], sep=',')
|
pandas.read_csv
|
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.toolkit.topology import Molecule, Topology
from biopandas.pdb import PandasPdb
import matplotlib.pyplot as plt
from operator import itemgetter
from mendeleev import element
from simtk.openmm import app
from scipy import optimize
import subprocess as sp
from sys import stdout
import pandas as pd
import numpy as np
import statistics
import itertools
import parmed
import pickle
import shutil
import simtk
import scipy
import time
import math
import sys
import ast
import re
import os
BOHRS_PER_ANGSTROM = 0.529
HARTREE_PER_KCAL_MOL = 627.509391
#kcal/mol * A^2 to kJ/mol * nm^2
KCAL_MOL_PER_KJ_MOL = 4.184
ANGSTROMS_PER_NM = 10.0
RADIANS_PER_DEGREE = np.pi / 180.0
method_basis_scale_dict = {
"HF STO-3G": 0.817,
"HF 3-21G": 0.906,
"HF 3-21G*": 0.903,
"HF 6-31G": 0.903,
"HF 6-31G*": 0.899,
"HF 6-31G**": 0.903,
"HF 6-31+G**": 0.904,
"HF 6-311G*": 0.904,
"HF 6-311G**": 0.909,
"HF TZVP": 0.909,
"HF cc-pVDZ": 0.908,
"HF cc-pVTZ": 0.91,
"HF cc-pVQZ": 0.908,
"HF aug-cc-pVDZ": 0.911,
"HF aug-cc-pVTZ": 0.91,
"HF aug-cc-pVQZ": 0.909,
"HF daug-cc-pVDZ": 0.912,
"HF daug-cc-pVTZ": 0.905,
"ROHF 3-21G": 0.907,
"ROHF 3-21G*": 0.909,
"ROHF 6-31G": 0.895,
"ROHF 6-31G*": 0.89,
"ROHF 6-31G**": 0.855,
"ROHF 6-31+G**": 0.856,
"ROHF 6-311G*": 0.856,
"ROHF 6-311G**": 0.913,
"ROHF cc-pVDZ": 0.861,
"ROHF cc-pVTZ": 0.901,
"LSDA STO-3G": 0.896,
"LSDA 3-21G": 0.984,
"LSDA 3-21G*": 0.982,
"LSDA 6-31G": 0.98,
"LSDA 6-31G*": 0.981,
"LSDA 6-31G**": 0.981,
"LSDA 6-31+G**": 0.985,
"LSDA 6-311G*": 0.984,
"LSDA 6-311G**": 0.988,
"LSDA TZVP": 0.988,
"LSDA cc-pVDZ": 0.989,
"LSDA cc-pVTZ": 0.989,
"LSDA aug-cc-pVDZ": 0.989,
"LSDA aug-cc-pVTZ": 0.991,
"BLYP STO-3G": 0.925,
"BLYP 3-21G": 0.995,
"BLYP 3-21G*": 0.994,
"BLYP 6-31G": 0.992,
"BLYP 6-31G*": 0.992,
"BLYP 6-31G**": 0.992,
"BLYP 6-31+G**": 0.995,
"BLYP 6-311G*": 0.998,
"BLYP 6-311G**": 0.996,
"BLYP TZVP": 0.998,
"BLYP cc-pVDZ": 1.002,
"BLYP cc-pVTZ": 0.997,
"BLYP aug-cc-pVDZ": 0.998,
"BLYP aug-cc-pVTZ": 0.997,
"B1B95 STO-3G": 0.883,
"B1B95 3-21G": 0.957,
"B1B95 3-21G*": 0.955,
"B1B95 6-31G": 0.954,
"B1B95 6-31G*": 0.949,
"B1B95 6-31G**": 0.955,
"B1B95 6-31+G**": 0.957,
"B1B95 6-311G*": 0.959,
"B1B95 6-311G**": 0.96,
"B1B95 TZVP": 0.957,
"B1B95 cc-pVDZ": 0.961,
"B1B95 cc-pVTZ": 0.957,
"B1B95 aug-cc-pVDZ": 0.958,
"B1B95 aug-cc-pVTZ": 0.959,
"B3LYP STO-3G": 0.892,
"B3LYP 3-21G": 0.965,
"B3LYP 3-21G*": 0.962,
"B3LYP 6-31G": 0.962,
"B3LYP 6-31G*": 0.96,
"B3LYP 6-31G**": 0.961,
"B3LYP 6-31+G**": 0.964,
"B3LYP 6-311G*": 0.966,
"B3LYP 6-311G**": 0.967,
"B3LYP TZVP": 0.965,
"B3LYP cc-pVDZ": 0.97,
"B3LYP cc-pVTZ": 0.967,
"B3LYP cc-pVQZ": 0.969,
"B3LYP aug-cc-pVDZ": 0.97,
"B3LYP aug-cc-pVTZ": 0.968,
"B3LYP aug-cc-pVQZ": 0.969,
"B3PW91 STO-3G": 0.885,
"B3PW91 3-21G": 0.961,
"B3PW91 3-21G*": 0.959,
"B3PW91 6-31G": 0.958,
"B3PW91 6-31G*": 0.957,
"B3PW91 6-31G**": 0.958,
"B3PW91 6-31+G**": 0.96,
"B3PW91 6-311G*": 0.963,
"B3PW91 6-311G**": 0.963,
"B3PW91 TZVP": 0.964,
"B3PW91 cc-pVDZ": 0.965,
"B3PW91 cc-pVTZ": 0.962,
"B3PW91 aug-cc-pVDZ": 0.965,
"B3PW91 aug-cc-pVTZ": 0.965,
"mPW1PW91 STO-3G": 0.879,
"mPW1PW91 3-21G": 0.955,
"mPW1PW91 3-21G*": 0.95,
"mPW1PW91 6-31G": 0.947,
"mPW1PW91 6-31G*": 0.948,
"mPW1PW91 6-31G**": 0.952,
"mPW1PW91 6-31+G**": 0.952,
"mPW1PW91 6-311G*": 0.954,
"mPW1PW91 6-311G**": 0.957,
"mPW1PW91 TZVP": 0.954,
"mPW1PW91 cc-pVDZ": 0.958,
"mPW1PW91 cc-pVTZ": 0.959,
"mPW1PW91 aug-cc-pVDZ": 0.958,
"mPW1PW91 aug-cc-pVTZ": 0.958,
"PBEPBE STO-3G": 0.914,
"PBEPBE 3-21G": 0.991,
"PBEPBE 3-21G*": 0.954,
"PBEPBE 6-31G": 0.986,
"PBEPBE 6-31G*": 0.986,
"PBEPBE 6-31G**": 0.986,
"PBEPBE 6-31+G**": 0.989,
"PBEPBE 6-311G*": 0.99,
"PBEPBE 6-311G**": 0.991,
"PBEPBE TZVP": 0.989,
"PBEPBE cc-pVDZ": 0.994,
"PBEPBE cc-pVTZ": 0.993,
"PBEPBE aug-cc-pVDZ": 0.994,
"PBEPBE aug-cc-pVTZ": 0.994,
"PBE1PBE STO-3G": 0.882,
"PBE1PBE 3-21G": 0.96,
"PBE1PBE 3-21G*": 0.96,
"PBE1PBE 6-31G": 0.956,
"PBE1PBE 6-31G*": 0.95,
"PBE1PBE 6-31G**": 0.953,
"PBE1PBE 6-31+G**": 0.955,
"PBE1PBE 6-311G*": 0.959,
"PBE1PBE 6-311G**": 0.959,
"PBE1PBE TZVP": 0.96,
"PBE1PBE cc-pVDZ": 0.962,
"PBE1PBE cc-pVTZ": 0.961,
"PBE1PBE aug-cc-pVDZ": 0.962,
"PBE1PBE aug-cc-pVTZ": 0.962,
"HSEh1PBE STO-3G": 0.883,
"HSEh1PBE 3-21G": 0.963,
"HSEh1PBE 3-21G*": 0.96,
"HSEh1PBE 6-31G": 0.957,
"HSEh1PBE 6-31G*": 0.951,
"HSEh1PBE 6-31G**": 0.954,
"HSEh1PBE 6-31+G**": 0.955,
"HSEh1PBE 6-311G*": 0.96,
"HSEh1PBE 6-311G**": 0.96,
"HSEh1PBE TZVP": 0.96,
"HSEh1PBE cc-pVDZ": 0.962,
"HSEh1PBE cc-pVTZ": 0.961,
"HSEh1PBE aug-cc-pVDZ": 0.962,
"HSEh1PBE aug-cc-pVTZ": 0.962,
"TPSSh 3-21G": 0.969,
"TPSSh 3-21G*": 0.966,
"TPSSh 6-31G": 0.962,
"TPSSh 6-31G*": 0.959,
"TPSSh 6-31G**": 0.959,
"TPSSh 6-31+G**": 0.963,
"TPSSh 6-311G*": 0.963,
"TPSSh TZVP": 0.964,
"TPSSh cc-pVDZ": 0.972,
"TPSSh cc-pVTZ": 0.968,
"TPSSh aug-cc-pVDZ": 0.967,
"TPSSh aug-cc-pVTZ": 0.965,
"B97D3 3-21G": 0.983,
"B97D3 6-31G*": 0.98,
"B97D3 6-31+G**": 0.983,
"B97D3 6-311G**": 0.986,
"B97D3 TZVP": 0.986,
"B97D3 cc-pVDZ": 0.992,
"B97D3 cc-pVTZ": 0.986,
"B97D3 aug-cc-pVTZ": 0.985,
"MP2 STO-3G": 0.872,
"MP2 3-21G": 0.955,
"MP2 3-21G*": 0.951,
"MP2 6-31G": 0.957,
"MP2 6-31G*": 0.943,
"MP2 6-31G**": 0.937,
"MP2 6-31+G**": 0.941,
"MP2 6-311G*": 0.95,
"MP2 6-311G**": 0.95,
"MP2 TZVP": 0.948,
"MP2 cc-pVDZ": 0.953,
"MP2 cc-pVTZ": 0.95,
"MP2 cc-pVQZ": 0.948,
"MP2 aug-cc-pVDZ": 0.959,
"MP2 aug-cc-pVTZ": 0.953,
"MP2 aug-cc-pVQZ": 0.95,
"MP2=FULL STO-3G": 0.889,
"MP2=FULL 3-21G": 0.955,
"MP2=FULL 3-21G*": 0.948,
"MP2=FULL 6-31G": 0.95,
"MP2=FULL 6-31G*": 0.942,
"MP2=FULL 6-31G**": 0.934,
"MP2=FULL 6-31+G**": 0.939,
"MP2=FULL 6-311G*": 0.947,
"MP2=FULL 6-311G**": 0.949,
"MP2=FULL TZVP": 0.953,
"MP2=FULL cc-pVDZ": 0.95,
"MP2=FULL cc-pVTZ": 0.949,
"MP2=FULL cc-pVQZ": 0.957,
"MP2=FULL aug-cc-pVDZ": 0.969,
"MP2=FULL aug-cc-pVTZ": 0.951,
"MP2=FULL aug-cc-pVQZ": 0.956,
"MP3 STO-3G": 0.894,
"MP3 3-21G": 0.968,
"MP3 3-21G*": 0.965,
"MP3 6-31G": 0.966,
"MP3 6-31G*": 0.939,
"MP3 6-31G**": 0.935,
"MP3 6-31+G**": 0.931,
"MP3 TZVP": 0.935,
"MP3 cc-pVDZ": 0.948,
"MP3 cc-pVTZ": 0.945,
"MP3=FULL 6-31G*": 0.938,
"MP3=FULL 6-31+G**": 0.932,
"MP3=FULL TZVP": 0.934,
"MP3=FULL cc-pVDZ": 0.94,
"MP3=FULL cc-pVTZ": 0.933,
"B2PLYP 6-31G*": 0.949,
"B2PLYP 6-31+G**": 0.952,
"B2PLYP TZVP": 0.954,
"B2PLYP cc-pVDZ": 0.958,
"B2PLYP cc-pVTZ": 0.959,
"B2PLYP cc-pVQZ": 0.957,
"B2PLYP aug-cc-pVTZ": 0.961,
"B2PLYP=FULL 3-21G": 0.952,
"B2PLYP=FULL 6-31G*": 0.948,
"B2PLYP=FULL 6-31+G**": 0.951,
"B2PLYP=FULL TZVP": 0.954,
"B2PLYP=FULL cc-pVDZ": 0.959,
"B2PLYP=FULL cc-pVTZ": 0.956,
"B2PLYP=FULL aug-cc-pVDZ": 0.962,
"B2PLYP=FULL aug-cc-pVTZ": 0.959,
"CID 3-21G": 0.932,
"CID 3-21G*": 0.931,
"CID 6-31G": 0.935,
"CID 6-31G*": 0.924,
"CID 6-31G**": 0.924,
"CID 6-31+G**": 0.924,
"CID 6-311G*": 0.929,
"CID cc-pVDZ": 0.924,
"CID cc-pVTZ": 0.927,
"CISD 3-21G": 0.941,
"CISD 3-21G*": 0.934,
"CISD 6-31G": 0.938,
"CISD 6-31G*": 0.926,
"CISD 6-31G**": 0.918,
"CISD 6-31+G**": 0.922,
"CISD 6-311G*": 0.925,
"CISD cc-pVDZ": 0.922,
"CISD cc-pVTZ": 0.93,
"QCISD 3-21G": 0.969,
"QCISD 3-21G*": 0.961,
"QCISD 6-31G": 0.964,
"QCISD 6-31G*": 0.952,
"QCISD 6-31G**": 0.941,
"QCISD 6-31+G**": 0.945,
"QCISD 6-311G*": 0.957,
"QCISD 6-311G**": 0.954,
"QCISD TZVP": 0.955,
"QCISD cc-pVDZ": 0.959,
"QCISD cc-pVTZ": 0.956,
"QCISD aug-cc-pVDZ": 0.969,
"QCISD aug-cc-pVTZ": 0.962,
"CCD 3-21G": 0.972,
"CCD 3-21G*": 0.957,
"CCD 6-31G": 0.96,
"CCD 6-31G*": 0.947,
"CCD 6-31G**": 0.938,
"CCD 6-31+G**": 0.942,
"CCD 6-311G*": 0.955,
"CCD 6-311G**": 0.955,
"CCD TZVP": 0.948,
"CCD cc-pVDZ": 0.957,
"CCD cc-pVTZ": 0.934,
"CCD aug-cc-pVDZ": 0.965,
"CCD aug-cc-pVTZ": 0.957,
"CCSD 3-21G": 0.943,
"CCSD 3-21G*": 0.943,
"CCSD 6-31G": 0.943,
"CCSD 6-31G*": 0.944,
"CCSD 6-31G**": 0.933,
"CCSD 6-31+G**": 0.934,
"CCSD 6-311G*": 0.954,
"CCSD TZVP": 0.954,
"CCSD cc-pVDZ": 0.947,
"CCSD cc-pVTZ": 0.941,
"CCSD cc-pVQZ": 0.951,
"CCSD aug-cc-pVDZ": 0.963,
"CCSD aug-cc-pVTZ": 0.956,
"CCSD aug-cc-pVQZ": 0.953,
"CCSD=FULL 6-31G*": 0.95,
"CCSD=FULL TZVP": 0.948,
"CCSD=FULL cc-pVTZ": 0.948,
"CCSD=FULL aug-cc-pVTZ": 0.951,
}
element_list = [
["1 ", "H ", "Hydrogen"],
["2 ", "He", "Helium"],
["3 ", "Li", "Lithium"],
["4 ", "Be", "Beryllium"],
["5 ", "B ", "Boron"],
["6 ", "C ", "Carbon"],
["7 ", "N ", "Nitrogen"],
["8 ", "O ", "Oxygen"],
["9 ", "F ", "Fluorine"],
["10", "Ne", "Neon"],
["11", "Na", "Sodium"],
["12", "Mg", "Magnesium"],
["13", "Al", "Aluminum"],
["14", "Si", "Silicon"],
["15", "P ", "Phosphorus"],
["16", "S ", "Sulfur"],
["17", "Cl", "Chlorine"],
["18", "Ar", "Argon"],
["19", "K ", "Potassium"],
["20", "Ca", "Calcium"],
["21", "Sc", "Scandium"],
["22", "Ti", "Titanium"],
["23", "V ", "Vanadium"],
["24", "Cr", "Chromium"],
["25", "Mn", "Manganese"],
["26", "Fe", "Iron"],
["27", "Co", "Cobalt"],
["28", "Ni", "Nickel"],
["29", "Cu", "Copper"],
["30", "Zn", "Zinc"],
["31", "Ga", "Gallium"],
["32", "Ge", "Germanium"],
["33", "As", "Arsenic"],
["34", "Se", "Selenium"],
["35", "Br", "Bromine"],
["36", "Kr", "Krypton"],
["37", "Rb", "Rubidium"],
["38", "Sr", "Strontium"],
["39", "Y ", "Yttrium"],
["40", "Zr", "Zirconium"],
["41", "Nb", "Niobium"],
["42", "Mo", "Molybdenum"],
["43", "Tc", "Technetium"],
["44", "Ru", "Ruthenium"],
["45", "Rh", "Rhodium"],
["46", "Pd", "Palladium"],
["47", "Ag", "Silver"],
["48", "Cd", "Cadmium"],
["49", "In", "Indium"],
["50", "Sn", "Tin"],
["51", "Sb", "Antimony"],
["52", "Te", "Tellurium"],
["53", "I ", "Iodine"],
["54", "Xe", "Xenon"],
["55", "Cs", "Cesium"],
["56", "Ba", "Barium"],
["57", "La", "Lanthanum"],
["58", "Ce", "Cerium"],
["59", "Pr", "Praseodymium"],
["60", "Nd", "Neodymium"],
["61", "Pm", "Promethium"],
["62", "Sm", "Samarium"],
["63", "Eu", "Europium"],
["64", "Gd", "Gadolinium"],
["65", "Tb", "Terbium"],
["66", "Dy", "Dysprosium"],
["67", "Ho", "Holmium"],
["68", "Er", "Erbium"],
["69", "Tm", "Thulium"],
["70", "Yb", "Ytterbium"],
["71", "Lu", "Lutetium"],
["72", "Hf", "Hafnium"],
["73", "Ta", "Tantalum"],
["74", "W ", "Tungsten"],
["75", "Re", "Rhenium"],
["76", "Os", "Osmium"],
["77", "Ir", "Iridium"],
["78", "Pt", "Platinum"],
["79", "Au", "Gold"],
["80", "Hg", "Mercury"],
["81", "Tl", "Thallium"],
["82", "Pb", "Lead"],
["83", "Bi", "Bismuth"],
["84", "Po", "Polonium"],
["85", "At", "Astatine"],
["86", "Rn", "Radon"],
["87", "Fr", "Francium"],
["88", "Ra", "Radium"],
["89", "Ac", "Actinium"],
["90", "Th", "Thorium"],
["91", "Pa", "Protactinium"],
["92", "U ", "Uranium"],
["93", "Np", "Neptunium"],
["94", "Pu", "Plutonium"],
["95", "Am", "Americium"],
["96", "Cm", "Curium"],
["97", "Bk", "Berkelium"],
["98", "Cf", "Californium"],
["99", "Es", "Einsteinium"],
]
def get_vibrational_scaling(functional, basis_set):
"""
Returns vibrational scaling factor given the functional
and the basis set for the QM engine.
Parameters
----------
functional: str
Functional
basis_set: str
Basis set
Returns
-------
vib_scale: float
Vibrational scaling factor corresponding to the given
the basis_set and the functional.
Examples
--------
>>> get_vibrational_scaling("QCISD", "6-311G*")
0.957
"""
vib_scale = method_basis_scale_dict.get(functional + " " + basis_set)
return vib_scale
def unit_vector_N(u_BC, u_AB):
"""
Calculates unit normal vector perpendicular to plane ABC.
Parameters
----------
u_BC : (.. , 1, 3) array
Unit vector from atom B to atom C.
u_AB : (..., 1, 3) array
Unit vector from atom A to atom B.
Returns
-------
u_N : (..., 1, 3) array
Unit normal vector perpendicular to plane ABC.
Examples
--------
>>> u_BC = [0.34040355, 0.62192853, 0.27011169]
>>> u_AB = [0.28276792, 0.34232697, 0.02370306]
>>> unit_vector_N(u_BC, u_AB)
array([-0.65161629, 0.5726879 , -0.49741811])
"""
cross_product = np.cross(u_BC, u_AB)
norm_u_N = np.linalg.norm(cross_product)
u_N = cross_product / norm_u_N
return u_N
def delete_guest_angle_params(guest_qm_params_file="guest_qm_params.txt"):
"""
"""
f_params = open(guest_qm_params_file, "r")
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
lines_selected = lines_params[:to_begin] + lines_params[to_end + 1 :]
with open(guest_qm_params_file, "w") as f_:
f_.write("".join(lines_selected))
return
def remove_bad_angle_params(
guest_qm_params_file="guest_qm_params.txt", angle=1.00, k_angle=500):
with open(guest_qm_params_file, "r") as f_params:
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
lines_to_omit = []
for i in angle_params:
if float(re.findall(r"[-+]?\d+[.]?\d*", i)[0]) < float(angle) or float(
re.findall(r"[-+]?\d+[.]?\d*", i)[1]
) > float(k_angle):
lines_to_omit.append(i)
for b in lines_to_omit:
lines_params.remove(b)
with open(guest_qm_params_file, "w") as file:
for j in lines_params:
file.write(j)
def get_num_host_atoms(host_pdb):
"""
Reads the host PDB file and returns the
total number of atoms.
"""
ppdb = PandasPdb()
ppdb.read_pdb(host_pdb)
no_host_atoms = ppdb.df["ATOM"].shape[0]
return no_host_atoms
def change_names(inpcrd_file, prmtop_file, pdb_file):
command = "cp -r " + inpcrd_file + " system_qmmmrebind.inpcrd"
os.system(command)
command = "cp -r " + prmtop_file + " system_qmmmrebind.prmtop"
os.system(command)
command = "cp -r " + pdb_file + " system_qmmmrebind.pdb"
os.system(command)
def copy_file(source, destination):
"""
Copies a file from a source to the destination.
"""
shutil.copy(source, destination)
def get_openmm_energies(system_pdb, system_xml):
"""
Returns decomposed OPENMM energies for the
system.
Parameters
----------
system_pdb : str
Input PDB file
system_xml : str
Forcefield file in XML format
"""
pdb = simtk.openmm.app.PDBFile(system_pdb)
ff_xml_file = open(system_xml, "r")
system = simtk.openmm.XmlSerializer.deserialize(ff_xml_file.read())
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(
getEnergy=True, getParameters=True, getForces=True
)
force_group = []
for i, force in enumerate(system.getForces()):
force_group.append(force.__class__.__name__)
forcegroups = {}
for i in range(system.getNumForces()):
force = system.getForce(i)
force.setForceGroup(i)
forcegroups[force] = i
energies = {}
for f, i in forcegroups.items():
energies[f] = (
simulation.context.getState(getEnergy=True, groups=2 ** i)
.getPotentialEnergy()
._value
)
decomposed_energy = []
for key, val in energies.items():
decomposed_energy.append(val)
df_energy_openmm = pd.DataFrame(
list(zip(force_group, decomposed_energy)),
columns=["Energy_term", "Energy_openmm_params"],
)
energy_values = [
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicBondForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicAngleForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "PeriodicTorsionForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "NonbondedForce"
].values[0]
)[1],
]
energy_group = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_open_mm = pd.DataFrame(
list(zip(energy_group, energy_values)),
columns=["Energy_term", "Energy_openmm_params"],
)
df_energy_open_mm = df_energy_open_mm.set_index("Energy_term")
print(df_energy_open_mm)
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array which contains the coordinates of all
the N atoms.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def force_angle_constant(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14 of
Seminario calculation paper; returns angle (in kcal/mol/rad^2)
and equilibrium angle (in degrees).
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y and Z
coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
# Normal vector to angle plane found
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_u_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_u_PA
u_PC = np.cross(u_CB, u_N)
norm_u_PC = np.linalg.norm(u_PC)
u_PC = u_PC / norm_u_PC
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Scaling due to additional angles - Modified Seminario Part
sum_first = sum_first / scaling_1
sum_second = sum_second / scaling_2
# Added as two springs in series
k_theta = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta = 1 / k_theta
k_theta = -k_theta # Change to OPLS form
k_theta = abs(k_theta * 0.5) # Change to OPLS form
# Equilibrium Angle
theta_0 = math.degrees(math.acos(np.dot(u_AB, u_CB)))
# If the vectors u_CB and u_AB are linearly dependent u_N cannot be defined.
# This case is dealt with here :
if abs(sum((u_CB) - (u_AB))) < 0.01 or (
abs(sum((u_CB) - (u_AB))) > 1.99 and abs(sum((u_CB) - (u_AB))) < 2.01
):
scaling_1 = 1
scaling_2 = 1
[k_theta, theta_0] = force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
)
return k_theta, theta_0
def dot_product(u_PA, eig_AB):
"""
Returns the dot product of two vectors.
Parameters
----------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the
plane of A, B, C.
eig_AB : (..., 3, 3) array
Eigenvectors of the hessian matrix for
the bond AB.
"""
x = 0
for i in range(0, 3):
x = x + u_PA[i] * eig_AB[i].conjugate()
return x
def force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14
of Seminario calculation paper when the vectors
u_CB and u_AB are linearly dependent and u_N cannot
be defined. It instead takes samples of u_N across a
unit sphere for the calculation; returns angle
(in kcal/mol/rad^2) and equilibrium angle in degrees.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y,
and Z coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
k_theta_array = np.zeros((180, 360))
# Find force constant with varying u_N (with vector uniformly
# sampled across a sphere)
for theta in range(0, 180):
for phi in range(0, 360):
r = 1
u_N = [
r
* math.sin(math.radians(theta))
* math.cos(math.radians(theta)),
r
* math.sin(math.radians(theta))
* math.sin(math.radians(theta)),
r * math.cos(math.radians(theta)),
]
u_PA = np.cross(u_N, u_AB)
u_PA = u_PA / np.linalg.norm(u_PA)
u_PC = np.cross(u_CB, u_N)
u_PC = u_PC / np.linalg.norm(u_PC)
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Added as two springs in series
k_theta_ij = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta_ij = 1 / k_theta_ij
k_theta_ij = -k_theta_ij # Change to OPLS form
k_theta_ij = abs(k_theta_ij * 0.5) # Change to OPLS form
k_theta_array[theta, phi] = k_theta_ij
# Removes cases where u_N was linearly dependent of u_CB or u_AB.
# Force constant used is taken as the mean.
k_theta = np.mean(np.mean(k_theta_array))
# Equilibrium Angle independent of u_N
theta_0 = math.degrees(math.cos(np.dot(u_AB, u_CB)))
return k_theta, theta_0
def force_constant_bond(atom_A, atom_B, eigenvalues, eigenvectors, coords):
"""
Calculates the bond force constant for the bonds in the
molecule according to equation 10 of seminario paper,
given the bond atoms' indices and the corresponding
eigenvalues, eigenvectors and coordinates matrices.
Parameters
----------
atom_A : int
Index of Atom A.
atom_B : int
Index of Atom B.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing eigenvalues
of the hessian matrix, where N is the total number
of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing the
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y, and
Z coordinates of all N atoms.
Returns
--------
k_AB : float
Bond Force Constant value for the bond with atoms A and B.
"""
# Eigenvalues and eigenvectors calculated
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[:, :, atom_A, atom_B]
# Vector along bond
diff_AB = np.array(coords[atom_B, :]) - np.array(coords[atom_A, :])
norm_diff_AB = np.linalg.norm(diff_AB)
unit_vectors_AB = diff_AB / norm_diff_AB
k_AB = 0
# Projections of eigenvalues
for i in range(0, 3):
dot_product = abs(np.dot(unit_vectors_AB, eigenvectors_AB[:, i]))
k_AB = k_AB + (eigenvalues_AB[i] * dot_product)
k_AB = -k_AB * 0.5 # Convert to OPLS form
return k_AB
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array containing the coordinates of all the N atoms.
Returns
-------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the plane of A, B, C.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def reverse_list(lst):
"""
Returns the reversed form of a given list.
Parameters
----------
lst : list
Input list.
Returns
-------
reversed_list : list
Reversed input list.
Examples
--------
>>> lst = [5, 4, 7, 2]
>>> reverse_list(lst)
[2, 7, 4, 5]
"""
reversed_list = lst[::-1]
return reversed_list
def uniq(input_):
"""
Returns a list with only unique elements from a list
containing duplicate / repeating elements.
Parameters
----------
input_ : list
Input list.
Returns
-------
output : list
List with only unique elements.
Examples
--------
>>> lst = [2, 4, 2, 9, 10, 35, 10]
>>> uniq(lst)
[2, 4, 9, 10, 35]
"""
output = []
for x in input_:
if x not in output:
output.append(x)
return output
def search_in_file(file: str, word: str) -> list:
"""
Search for the given string in file and return lines
containing that string along with line numbers.
Parameters
----------
file : str
Input file.
word : str
Search word.
Returns
-------
list_of_results : list
List of lists with each element representing the
line number and the line contents.
"""
line_number = 0
list_of_results = []
with open(file, "r") as f:
for line in f:
line_number += 1
if word in line:
list_of_results.append((line_number, line.rstrip()))
return list_of_results
def list_to_dict(lst):
"""
Converts an input list with mapped characters (every
odd entry is the key of the dictionary and every
even entry adjacent to the odd entry is its correponding
value) to a dictionary.
Parameters
----------
lst : list
Input list.
Returns
-------
res_dct : dict
A dictionary with every element mapped with
its successive element starting from index 0.
Examples
--------
>>> lst = [5, 9, 3, 6, 2, 7]
>>> list_to_dict(lst)
{5: 9, 3: 6, 2: 7}
"""
res_dct = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
return res_dct
def scale_list(list_):
"""
Returns a scaled list with the minimum value
subtracted from each element of the corresponding list.
Parameters
----------
list_ : list
Input list.
Returns
-------
scaled_list : list
Scaled list.
Examples
--------
>>> list_ = [6, 3, 5, 11, 3, 2, 8, 6]
>>> scale_list(list_)
[4, 1, 3, 9, 1, 0, 6, 4]
"""
scaled_list = [i - min(list_) for i in list_]
return scaled_list
def list_kJ_kcal(list_):
"""
Convert the elements in the list from
kiloJoules units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of kJ.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_kJ_kcal(list_)
[1.4340344168260037, 0.7170172084130019, 1.1950286806883366]
"""
converted_list = [i / 4.184 for i in list_]
return converted_list
def list_hartree_kcal(list_):
"""
Convert the elements in the list from
hartree units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of hartree.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_hartree_kcal(list_)
[3765.0564000000004, 1882.5282000000002, 3137.547]
"""
converted_list = [i * 627.5094 for i in list_]
return converted_list
def torsiondrive_input_to_xyz(psi_input_file, xyz_file):
"""
Returns an xyz file from a torsiondrive formatted
input file.
Parameters
----------
psi_input_file : str
Input file for the psi4 QM engine.
xyz_file : str
XYZ format file to write the coords of the system.
"""
with open(psi_input_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "molecule {" in lines[i]:
to_begin = int(i)
if "set {" in lines[i]:
to_end = int(i)
xyz_lines = lines[to_begin + 2 : to_end - 1]
with open(xyz_file, "w") as f:
f.write(str(len(xyz_lines)) + "\n")
f.write(xyz_file + "\n")
for i in xyz_lines:
f.write(i)
def xyz_to_pdb(xyz_file, coords_file, template_pdb, system_pdb):
"""
Converts a XYZ file to a PDB file.
Parameters
----------
xyz_file : str
XYZ file containing the coordinates of the system.
coords_file : str
A text file containing the coordinates part of XYZ file.
template_pdb : str
A pdb file to be used as a template for the required PDB.
system_pdb : str
Output PDB file with the coordinates updated in the
template pdb using XYZ file.
"""
with open(xyz_file, "r") as f:
lines = f.readlines()
needed_lines = lines[2:]
with open(coords_file, "w") as f:
for i in needed_lines:
f.write(i)
df = pd.read_csv(coords_file, header=None, delimiter=r"\s+")
df.columns = ["atom", "x", "y", "z"]
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = df["x"]
ppdb.df["ATOM"]["y_coord"] = df["y"]
ppdb.df["ATOM"]["z_coord"] = df["z"]
ppdb.to_pdb(system_pdb)
def generate_xml_from_pdb_sdf(system_pdb, system_sdf, system_xml):
"""
Generates an openforcefield xml file from the pdb file.
Parameters
----------
system_pdb : str
Input PDB file.
system_sdf : str
SDF file of the system.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_sdf
os.system(command)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_charged_pdb_sdf(
system_pdb,
system_init_sdf,
system_sdf,
num_charge_atoms,
index_charge_atom_1,
charge_atom_1,
system_xml,
):
"""
Generates an openforcefield xml file from the pdb
file via SDF file and openforcefield.
Parameters
----------
system_pdb : str
Input PDB file.
system_init_sdf : str
SDF file for the system excluding charge information.
system_sdf : str
SDF file of the system.
num_charge_atoms : int
Total number of charged atoms in the PDB.
index_charge_atom_1 : int
Index of the first charged atom.
charge_atom_1 : float
Charge on first charged atom.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_init_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_init_sdf
os.system(command)
with open(system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(num_charge_atoms)
+ " "
+ str(index_charge_atom_1)
+ " "
+ str(charge_atom_1)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def get_dihedrals(qm_scan_file):
"""
Returns dihedrals from the torsiondrive scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
dihedrals : list
List of all the dihedral values from the qm scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
return dihedrals
def get_qm_energies(qm_scan_file):
"""
Returns QM optimized energies from the torsiondrive
scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
qm_energies : list
List of all the qm optimiseed energies extracted from the torsiondrive
scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
qm_energies = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
energy = float(energy_dihedral[1])
qm_energies.append(energy)
return qm_energies
def generate_mm_pdbs(qm_scan_file, template_pdb):
"""
Generate PDBs from the torsiondrive scan file
based on a template PDB.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
lines_markers = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
lines_markers.append(i)
lines_markers.append(len(lines) + 1)
for i in range(len(lines_markers) - 1):
# pdb_file_to_write = str(dihedrals[i]) + ".pdb"
if dihedrals[i] > 0:
pdb_file_to_write = "plus_" + str(abs(dihedrals[i])) + ".pdb"
if dihedrals[i] < 0:
pdb_file_to_write = "minus_" + str(abs(dihedrals[i])) + ".pdb"
to_begin = lines_markers[i]
to_end = lines_markers[i + 1]
lines_to_write = lines[to_begin + 1 : to_end - 1]
x_coords = []
y_coords = []
z_coords = []
for i in lines_to_write:
coordinates = i
coordinates = re.findall(r"[-+]?\d+[.]?\d*", coordinates)
x = float(coordinates[0])
y = float(coordinates[1])
z = float(coordinates[2])
x_coords.append(x)
y_coords.append(y)
z_coords.append(z)
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = x_coords
ppdb.df["ATOM"]["y_coord"] = y_coords
ppdb.df["ATOM"]["z_coord"] = z_coords
ppdb.to_pdb(pdb_file_to_write)
def remove_mm_files(qm_scan_file):
"""
Delete all generated PDB files.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
command = "rm -rf " + i
os.system(command)
command = "rm -rf " + i[:-4] + ".inpcrd"
os.system(command)
command = "rm -rf " + i[:-4] + ".prmtop"
os.system(command)
def get_non_torsion_mm_energy(system_pdb, load_topology, system_xml):
"""
Returns sum of all the non-torsional energies (that
includes HarmonicBondForce, HarmonicAngleForce
and NonBondedForce) of the system from the PDB
file given the topology and the forcefield file.
Parameters
----------
system_pdb : str
System PDB file to load the openmm system topology
and coordinates.
load_topology : {"openmm", "parmed"}
Argument to specify how to load the topology.
system_xml : str
XML force field file for the openmm system.
Returns
-------
Sum of all the non-torsional energies of the system.
"""
system_prmtop = system_pdb[:-4] + ".prmtop"
system_inpcrd = system_pdb[:-4] + ".inpcrd"
if load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(system_pdb, structure=True).topology,
parmed.load_file(system_xml),
)
if load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(system_pdb).topology,
parmed.load_file(system_xml),
)
openmm_system.save(system_prmtop, overwrite=True)
openmm_system.coordinates = parmed.load_file(
system_pdb, structure=True
).coordinates
openmm_system.save(system_inpcrd, overwrite=True)
parm = parmed.load_file(system_prmtop, system_inpcrd)
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
# print(prmtop_energy_decomposition)
prmtop_energy_decomposition_value_no_torsion = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
return sum(prmtop_energy_decomposition_value_no_torsion)
def get_mm_potential_energies(qm_scan_file, load_topology, system_xml):
"""
Returns potential energy of the system from the PDB file
given the topology and the forcefield file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to spcify how to load the topology.
system_xml : str
XML file to load the openmm system.
Returns
-------
mm_potential_energies : list
List of all the non torsion mm energies for the
generated PDB files.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
mm_pdb_file = i
mm_potential_energies = []
for i in mm_pdb_list:
mm_pdb_file = i
mm_energy = get_non_torsion_mm_energy(
system_pdb=i, load_topology=load_topology, system_xml=system_xml,
)
mm_potential_energies.append(mm_energy)
return mm_potential_energies
def list_diff(list_1, list_2):
"""
Returns the difference between two lists as a list.
Parameters
----------
list_1 : list
First list
list_2 : list
Second list.
Returns
-------
diff_list : list
List containing the diferences between the elements of
the two lists.
Examples
--------
>>> list_1 = [4, 2, 8, 3, 0, 6, 7]
>>> list_2 = [5, 3, 1, 5, 6, 0, 4]
>>> list_diff(list_1, list_2)
[-1, -1, 7, -2, -6, 6, 3]
"""
diff_list = []
zipped_list = zip(list_1, list_2)
for list1_i, list2_i in zipped_list:
diff_list.append(list1_i - list2_i)
return diff_list
def dihedral_energy(x, k1, k2, k3, k4=0):
"""
Expression for the dihedral energy.
"""
energy_1 = k1 * (1 + np.cos(1 * x * 0.01745))
energy_2 = k2 * (1 - np.cos(2 * x * 0.01745))
energy_3 = k3 * (1 + np.cos(3 * x * 0.01745))
energy_4 = k4 * (1 - np.cos(4 * x * 0.01745))
dihedral_energy = energy_1 + energy_2 + energy_3 + energy_4
return dihedral_energy
def error_function(delta_qm, delta_mm):
"""
Root Mean Squared Error.
"""
squared_error = np.square(np.subtract(delta_qm, delta_mm))
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def error_function_boltzmann(delta_qm, delta_mm, T):
"""
Boltzmann Root Mean Squared Error.
"""
kb = 3.297623483 * 10 ** (-24) # in cal/K
delta_qm_boltzmann_weighted = [np.exp(-i / (kb * T)) for i in delta_qm]
squared_error = (
np.square(np.subtract(delta_qm, delta_mm))
* delta_qm_boltzmann_weighted
)
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def gen_init_guess(qm_scan_file, load_topology, system_xml):
"""
Initial guess for the torsional parameter.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to speify how to load the topology.
system_xml : str
XML force field file for the system.
Returns
-------
k_init_guess : list
Initial guess for the torsional parameters.
"""
x = get_dihedrals(qm_scan_file)
y = scale_list(
list_=get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
)
init_vals = [0.0, 0.0, 0.0, 0.0]
k_init_guess, covar = scipy.optimize.curve_fit(
dihedral_energy, x, y, p0=init_vals
)
for i in range(len(k_init_guess)):
if k_init_guess[i] < 0:
k_init_guess[i] = 0
return k_init_guess
def objective_function(k_array, x, delta_qm):
"""
Objective function for the torsional parameter fitting.
"""
delta_mm = dihedral_energy(
x, k1=k_array[0], k2=k_array[1], k3=k_array[2], k4=k_array[3]
)
loss_function = error_function(delta_qm, delta_mm)
return loss_function
def fit_params(qm_scan_file, load_topology, system_xml, method):
"""
Optimization of the objective function.
"""
k_guess = gen_init_guess(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
x_data = np.array(get_dihedrals(qm_scan_file))
delta_qm = np.array(
scale_list(list_hartree_kcal(list_=get_qm_energies(qm_scan_file)))
)
optimise = scipy.optimize.minimize(
objective_function,
k_guess,
args=(x_data, delta_qm),
method=method,
bounds=[(0.00, None), (0.00, None), (0.00, None), (0.00, None),],
)
return optimise.x
def get_tor_params(
qm_scan_file, template_pdb, load_topology, system_xml, method
):
"""
Returns the fitted torsional parameters.
"""
qm_e = get_qm_energies(qm_scan_file=qm_scan_file)
qm_e_kcal = list_hartree_kcal(qm_e)
delta_qm = scale_list(qm_e_kcal)
generate_mm_pdbs(qm_scan_file=qm_scan_file, template_pdb=template_pdb)
mm_pe_no_torsion_kcal = get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
delta_mm = scale_list(mm_pe_no_torsion_kcal)
opt_param = fit_params(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
return opt_param
def get_torsional_lines(
template_pdb,
system_xml,
qm_scan_file,
load_topology,
method,
dihedral_text_file,
):
"""
Returns the torsional lines for the XML forcefield file.
"""
opt_param = get_tor_params(
qm_scan_file=qm_scan_file,
template_pdb=template_pdb,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
dihedral_text = open(dihedral_text_file, "r")
dihedral_text_lines = dihedral_text.readlines()
atom_numbers = dihedral_text_lines[-1]
atom_index_from_1 = [
int(re.findall(r"\d+", atom_numbers)[0]),
int(re.findall(r"\d+", atom_numbers)[1]),
int(re.findall(r"\d+", atom_numbers)[2]),
int(re.findall(r"\d+", atom_numbers)[3]),
]
atom_index = [i - 1 for i in atom_index_from_1]
atom_index_lines = (
" "
+ "p1="
+ '"'
+ str(atom_index[0])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(atom_index[1])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(atom_index[2])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(atom_index[3])
+ '"'
+ " "
)
tor_lines = []
for i in range(len(opt_param)):
line_to_append = (
" "
+ "<Torsion "
+ "k="
+ '"'
+ str(round(opt_param[i], 8))
+ '"'
+ atom_index_lines
+ "periodicity="
+ '"'
+ str(i + 1)
+ '"'
+ " "
+ "phase="
+ '"'
+ "0"
+ '"'
+ "/>"
)
# print(line_to_append)
tor_lines.append(line_to_append)
return tor_lines
def singular_resid(pdbfile, qmmmrebind_init_file):
"""
Returns a PDB file with chain ID = A
Parameters
----------
pdbfile: str
Input PDB file
qmmmrebind_init_file: str
Output PDB file
"""
ppdb = PandasPdb().read_pdb(pdbfile)
ppdb.df["HETATM"]["chain_id"] = "A"
ppdb.df["ATOM"]["chain_id"] = "A"
ppdb.to_pdb(
path=qmmmrebind_init_file, records=None, gz=False, append_newline=True
)
def relax_init_structure(
pdbfile,
prmtopfile,
qmmmrebindpdb,
sim_output="output.pdb",
sim_steps=100000,
):
"""
Minimizing the initial PDB file with the given topology
file
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile : str
Input prmtop file.
qmmmrebind_init_file: str
Output PDB file.
sim_output: str
Simulation output trajectory file.
sim_steps: int
MD simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedMethod=simtk.openmm.app.PME,
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=10000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(qmmmrebindpdb, sim_steps)
)
simulation.step(sim_steps)
command = "rm -rf " + sim_output
os.system(command)
def truncate(x):
"""
Returns a float or an integer with an exact number
of characters.
Parameters
----------
x: str
input value
"""
if len(str(int(float(x)))) == 1:
x = format(x, ".8f")
if len(str(int(float(x)))) == 2:
x = format(x, ".7f")
if len(str(int(float(x)))) == 3:
x = format(x, ".6f")
if len(str(int(float(x)))) == 4:
x = format(x, ".5f")
if len(str(x)) > 10:
x = round(x, 10)
return x
def add_vectors_inpcrd(pdbfile, inpcrdfile):
"""
Adds periodic box dimensions to the inpcrd file
Parameters
----------
pdbfile: str
PDB file containing the periodic box information.
inpcrdfile: str
Input coordinate file.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
line_to_add = (
" "
+ truncate(vector_list[0])
+ " "
+ truncate(vector_list[1])
+ " "
+ truncate(vector_list[2])
+ " "
+ truncate(vector_list[3])
+ " "
+ truncate(vector_list[4])
+ " "
+ truncate(vector_list[5])
)
print(line_to_add)
with open(inpcrdfile, "a+") as f:
f.write(line_to_add)
def add_dim_prmtop(pdbfile, prmtopfile):
"""
Adds periodic box dimensions flag in the prmtop file.
Parameters
----------
prmtopfile: str
Input prmtop file.
pdbfile: str
PDB file containing the periodic box information.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
vector_list = [i / 10 for i in vector_list]
vector_list = [truncate(i) for i in vector_list]
vector_list = [i + "E+01" for i in vector_list]
line3 = (
" "
+ vector_list[3]
+ " "
+ vector_list[0]
+ " "
+ vector_list[1]
+ " "
+ vector_list[2]
)
print(line3)
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
with open(prmtopfile) as f1, open("intermediate.prmtop", "w") as f2:
for line in f1:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f2.write(line)
command = "rm -rf " + prmtopfile
os.system(command)
command = "mv intermediate.prmtop " + prmtopfile
os.system(command)
def add_period_prmtop(parm_file, ifbox):
"""
Changes the value of IFBOX if needed for the prmtop / parm file.
Set to 1 if standard periodic box and 2 when truncated octahedral.
"""
with open(parm_file) as f:
parm_lines = f.readlines()
lines_contain = []
for i in range(len(parm_lines)):
if parm_lines[i].startswith("%FLAG POINTERS"):
lines_contain.append(i + 4)
line = parm_lines[lines_contain[0]]
line_new = "%8s %6s %6s %6s %6s %6s %6s %6s %6s %6s" % (
re.findall(r"\d+", line)[0],
re.findall(r"\d+", line)[1],
re.findall(r"\d+", line)[2],
re.findall(r"\d+", line)[3],
re.findall(r"\d+", line)[4],
re.findall(r"\d+", line)[5],
re.findall(r"\d+", line)[6],
str(ifbox),
re.findall(r"\d+", line)[8],
re.findall(r"\d+", line)[9],
)
parm_lines[lines_contain[0]] = line_new + "\n"
with open(parm_file, "w") as f:
for i in parm_lines:
f.write(i)
def add_solvent_pointers_prmtop(non_reparams_file, reparams_file):
"""
Adds the flag solvent pointers to the topology file.
"""
f_non_params = open(non_reparams_file, "r")
lines_non_params = f_non_params.readlines()
for i in range(len(lines_non_params)):
if "FLAG SOLVENT_POINTERS" in lines_non_params[i]:
to_begin = int(i)
solvent_pointers = lines_non_params[to_begin : to_begin + 3]
file = open(reparams_file, "a")
for i in solvent_pointers:
file.write(i)
def prmtop_calibration(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
):
"""
Standardizes the topology files
Parameters
----------
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
"""
parm = parmed.load_file(prmtopfile, inpcrdfile)
parm_1 = parmed.tools.actions.changeRadii(parm, "mbondi3")
parm_1.execute()
parm_2 = parmed.tools.actions.setMolecules(parm)
parm_2.execute()
parm.save(prmtopfile, overwrite=True)
def run_openmm_prmtop_inpcrd(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with inpcrd and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
inpcrd = simtk.openmm.app.AmberInpcrdFile(inpcrdfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
if inpcrd.boxVectors is None:
add_vectors_inpcrd(
pdbfile=pdbfile, inpcrdfile=inpcrdfile,
)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
print(inpcrd.boxVectors)
simulation.context.setPositions(inpcrd.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def run_openmm_prmtop_pdb(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with pdb and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def move_qmmmmrebind_files(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
pdbfile="system_qmmmrebind.pdb",
):
"""
Moves QMMMReBind generated topology and parameter files
to a new directory .
Parameters
----------
prmtopfile: str
QMMMReBind generated prmtop file.
inpcrdfile: str
QMMMReBind generated inpcrd file.
pdbfile: str
QMMMReBind generated PDB file.
"""
current_pwd = os.getcwd()
command = "rm -rf reparameterized_files"
os.system(command)
command = "mkdir reparameterized_files"
os.system(command)
shutil.copy(
current_pwd + "/" + prmtopfile,
current_pwd + "/" + "reparameterized_files" + "/" + prmtopfile,
)
shutil.copy(
current_pwd + "/" + inpcrdfile,
current_pwd + "/" + "reparameterized_files" + "/" + inpcrdfile,
)
shutil.copy(
current_pwd + "/" + pdbfile,
current_pwd + "/" + "reparameterized_files" + "/" + pdbfile,
)
def move_qm_files():
"""
Moves QM engine generated files to a new directory .
"""
current_pwd = os.getcwd()
command = "rm -rf qm_data"
os.system(command)
command = "mkdir qm_data"
os.system(command)
command = "cp -r " + "*.com* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.log* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.chk* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.fchk* " + current_pwd + "/" + "qm_data"
os.system(command)
def move_qmmmrebind_files():
"""
Moves all QMMMREBind files to a new directory.
"""
current_pwd = os.getcwd()
command = "rm -rf qmmmrebind_data"
os.system(command)
command = "mkdir qmmmrebind_data"
os.system(command)
command = "mv " + "*.sdf* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.txt* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.pdb* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xml* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.chk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.fchk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.com* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.log* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.inpcrd* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.prmtop* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.parm7* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.out* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*run_command* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.dat* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xyz* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
class PrepareQMMM:
"""
A class used to segregate the QM and MM regions.
This class contain methods to remove the solvent, ions and all
entities that are exclusive of receptor and the ligand. It also
defines the Quantum Mechanical (QM) region and the Molecular
Mechanical (MM) region based upon the distance of the ligand
from the receptor and the chosen number of receptor residues. It
is also assumed that the initial PDB file will have the receptor
followed by the ligand.
...
Attributes
----------
init_pdb : str
Initial PDB file containing the receptor-ligand complex with
solvent, ions, etc.
cleaned_pdb : str
Formatted PDB file containing only the receptor and the ligand.
guest_init_pdb : str
A separate ligand PDB file with atom numbers not beginning from 1.
host_pdb : str
A separate receptor PDB file with atom numbers beginning from 1.
guest_resname : str
Three letter residue ID for the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
guest_xyz : str, optional
A text file of the XYZ coordinates of the ligand.
distance : float, optional
The distance required to define the QM region of the receptor.
This is the distance between the atoms of the ligand and the
atoms of the receptor.
residue_list : str, optional
A text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
host_qm_atoms : str, optional
A text file of the atom numbers of the receptors in the QM
region.
host_mm_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region (all atoms except atoms in the QM region)
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
host_mm_pdb : str, optional
PDB file for the receptor's MM region.
qm_pdb : str, optional
PDB file for the QM region (receptor's QM region and the
ligand).
mm_pdb : str, optional
PDB file for the MM region.
host_mm_region_I_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region preceeding the QM region.
host_mm_region_II_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region following the QM region.
host_mm_region_I_pdb : str, optional
PDB file of the receptor in the MM region preceeding the
QM region.
host_mm_region_II_pdb : str, optional
PDB file of the receptor in the MM region following the
QM region.
num_residues : int, optional
Number of residues required in the QM region of the receptor.
"""
def __init__(
self,
init_pdb,
distance,
num_residues,
guest_resname,
cleaned_pdb="system.pdb",
guest_init_pdb="guest_init.pdb",
host_pdb="host.pdb",
guest_pdb="guest_init_II.pdb",
guest_xyz="guest_coord.txt",
residue_list="residue_list.txt",
host_qm_atoms="host_qm.txt",
host_mm_atoms="host_mm.txt",
host_qm_pdb="host_qm.pdb",
host_mm_pdb="host_mm.pdb",
qm_pdb="qm.pdb",
mm_pdb="mm.pdb",
host_mm_region_I_atoms="host_mm_region_I.txt",
host_mm_region_II_atoms="host_mm_region_II.txt",
host_mm_region_I_pdb="host_mm_region_I.pdb",
host_mm_region_II_pdb="host_mm_region_II.pdb",
):
self.init_pdb = init_pdb
self.distance = distance
self.num_residues = num_residues
self.guest_resname = guest_resname
self.cleaned_pdb = cleaned_pdb
self.guest_init_pdb = guest_init_pdb
self.host_pdb = host_pdb
self.guest_pdb = guest_pdb
self.guest_xyz = guest_xyz
self.residue_list = residue_list
self.host_qm_atoms = host_qm_atoms
self.host_mm_atoms = host_mm_atoms
self.host_qm_pdb = host_qm_pdb
self.host_mm_pdb = host_mm_pdb
self.qm_pdb = qm_pdb
self.mm_pdb = mm_pdb
self.host_mm_region_I_atoms = host_mm_region_I_atoms
self.host_mm_region_II_atoms = host_mm_region_II_atoms
self.host_mm_region_I_pdb = host_mm_region_I_pdb
self.host_mm_region_II_pdb = host_mm_region_II_pdb
def clean_up(self):
"""
Reads the given PDB file, removes all entities except the
receptor and ligand and saves a new pdb file.
"""
ions = [
"Na+",
"Cs+",
"K+",
"Li+",
"Rb+",
"Cl-",
"Br-",
"F-",
"I-",
"Ca2",
]
intermediate_file_1 = self.cleaned_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.cleaned_pdb[:-4] + "_intermediate_2.pdb"
command = (
"pdb4amber -i "
+ self.init_pdb
+ " -o "
+ intermediate_file_1
+ " --noter --dry"
)
os.system(command)
to_delete = (
intermediate_file_1[:-4] + "_nonprot.pdb",
intermediate_file_1[:-4] + "_renum.txt",
intermediate_file_1[:-4] + "_sslink",
intermediate_file_1[:-4] + "_water.pdb",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_1) as f1, open(
intermediate_file_2, "w") as f2:
for line in f1:
if not any(ion in line for ion in ions):
f2.write(line)
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.cleaned_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def create_host_guest(self):
"""
Saves separate receptor and ligand PDB files.
"""
with open(self.cleaned_pdb) as f1, open(self.host_pdb, "w") as f2:
for line in f1:
if not self.guest_resname in line and not "CRYST1" in line:
f2.write(line)
with open(self.cleaned_pdb) as f1, open(
self.guest_init_pdb, "w"
) as f2:
for line in f1:
if self.guest_resname in line or "END" in line:
f2.write(line)
def realign_guest(self):
"""
Saves a ligand PDB file with atom numbers beginning from 1.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_init_pdb)
to_subtract = min(ppdb.df["ATOM"]["atom_number"]) - 1
ppdb.df["ATOM"]["atom_number"] = (
ppdb.df["ATOM"]["atom_number"] - to_subtract
)
intermediate_file_1 = self.guest_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.guest_pdb[:-4] + "_intermediate_2.pdb"
ppdb.to_pdb(path=intermediate_file_1)
command = (
"pdb4amber -i "
+ intermediate_file_1
+ " -o "
+ intermediate_file_2
)
os.system(command)
to_delete = (
intermediate_file_2[:-4] + "_nonprot.pdb",
intermediate_file_2[:-4] + "_renum.txt",
intermediate_file_2[:-4] + "_sslink",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.guest_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def get_guest_coord(self):
"""
Saves a text file of the XYZ coordinates of the ligand.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
xyz = ppdb.df["ATOM"][["x_coord", "y_coord", "z_coord"]]
xyz_to_list = xyz.values.tolist()
np.savetxt(self.guest_xyz, xyz_to_list)
def get_qm_resids(self):
"""
Saves a text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
"""
guest_coord_list = np.loadtxt(self.guest_xyz)
host_atom_list = []
for i in range(len(guest_coord_list)):
reference_point = guest_coord_list[i]
# TODO: move reads outside of loop
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
distances = ppdb.distance(xyz=reference_point, records=("ATOM"))
all_within_distance = ppdb.df["ATOM"][
distances < float(self.distance)
]
host_df = all_within_distance["atom_number"]
host_list = host_df.values.tolist()
host_atom_list.append(host_list)
host_atom_list = list(itertools.chain(*host_atom_list))
host_atom_list = set(host_atom_list)
host_atom_list = list(host_atom_list)
host_atom_list.sort()
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
index_list = []
for i in host_atom_list:
indices = np.where(df["atom_number"] == i)
indices = list(indices)[0]
indices = list(indices)
index_list.append(indices)
index_list = list(itertools.chain.from_iterable(index_list))
df1 = df.iloc[
index_list,
]
# TODO: make it write list of integers
resid_num = list(df1.residue_number.unique())
np.savetxt(self.residue_list, resid_num, fmt="%i")
def get_host_qm_mm_atoms(self):
"""
Saves a text file of the atom numbers of the receptors in the QM
region and MM region separately.
"""
resid_num = np.loadtxt(self.residue_list)
# approximated_res_list = [int(i) for i in resid_num]
approximated_res_list = []
# TODO: what is this doing?
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
host_index_nested_list = []
for i in approximated_res_list:
indices = np.where(df["residue_number"] == i)
#TODO: the program seems to error when this line is removed, which
# makes no sense.
indices = list(indices)[0]
indices = list(indices)
host_index_nested_list.append(indices)
host_index_list = list(
itertools.chain.from_iterable(host_index_nested_list)
)
df_atom = df.iloc[host_index_list]
df_atom_number = df_atom["atom_number"]
host_atom_list = df_atom_number.values.tolist()
selected_atoms = []
selected_atoms.extend(host_atom_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
non_selected_atoms = list(set(len_atoms).difference(selected_atoms))
assert len(non_selected_atoms) + len(selected_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_qm_atoms, selected_atoms, fmt="%i")
np.savetxt(self.host_mm_atoms, non_selected_atoms, fmt="%i")
def save_host_pdbs(self):
"""
Saves a PDB file for the receptor's QM region and MM
region separately.
"""
selected_atoms = np.loadtxt(self.host_qm_atoms)
# TODO: not necessary if savetxt writes in integers
selected_atoms = [int(i) for i in selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_pdb, records=None, gz=False, append_newline=True,
)
non_selected_atoms = np.loadtxt(self.host_mm_atoms)
non_selected_atoms = [int(i) for i in non_selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in non_selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_qm_pdb, records=None, gz=False, append_newline=True,
)
def get_host_mm_region_atoms(self):
"""
Saves a text file for the atoms of the receptor's MM region
preceding the QM region and saves another text file for the
atoms of the receptor's MM region folllowing the QM region.
"""
resid_num = np.loadtxt(self.residue_list)
approximated_res_list = []
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
# print(approximated_res_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["residue_number"]]
res_list = list(set(df["residue_number"].to_list()))
res_mm_list = list(set(res_list).difference(approximated_res_list))
# print(res_mm_list)
res_mm_region_I_list = []
# TODO: This can probably be made into a single loop by comparing i
# to the maximum value within approximated_res_list
for i in res_mm_list:
for j in approximated_res_list:
if i < j:
res_mm_region_I_list.append(i)
res_mm_region_I_list = list(set(res_mm_region_I_list))
res_mm_region_II_list = list(
set(res_mm_list).difference(res_mm_region_I_list)
)
# print(res_mm_region_II_list)
ppdb.read_pdb(self.host_mm_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
mm_region_I_index_nested_list = []
for i in res_mm_region_I_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_I_index_nested_list.append(indices)
mm_region_I_index_list = list(
itertools.chain.from_iterable(mm_region_I_index_nested_list)
)
df_atom = df.iloc[mm_region_I_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_I_atom_list = df_atom_number.values.tolist()
mm_region_I_atoms = []
mm_region_I_atoms.extend(mm_region_I_atom_list)
mm_region_II_index_nested_list = []
for i in res_mm_region_II_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_II_index_nested_list.append(indices)
mm_region_II_index_list = list(
itertools.chain.from_iterable(mm_region_II_index_nested_list)
)
df_atom = df.iloc[mm_region_II_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_II_atom_list = df_atom_number.values.tolist()
mm_region_II_atoms = []
mm_region_II_atoms.extend(mm_region_II_atom_list)
ppdb.read_pdb(self.host_mm_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
assert len(mm_region_I_atoms) + len(mm_region_II_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_mm_region_I_atoms, mm_region_I_atoms, fmt="%i")
np.savetxt(self.host_mm_region_II_atoms, mm_region_II_atoms, fmt="%i")
def save_host_mm_regions_pdbs(self):
"""
Saves a PDB file for the receptor's MM region preceding
the QM region and saves another PDB file for the receptor's
MM region folllowing the QM region.
"""
mm_region_I_atoms = np.loadtxt(self.host_mm_region_I_atoms)
mm_region_I_atoms = [int(i) for i in mm_region_I_atoms]
mm_region_II_atoms = np.loadtxt(self.host_mm_region_II_atoms)
mm_region_II_atoms = [int(i) for i in mm_region_II_atoms]
# NOTE: this is a slightly confusing way to define the atoms to
# write to a PDB - the members that are *not* in a section, rather
# than the members that are.
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_II_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_I_pdb,
records=None,
gz=False,
append_newline=True,
)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_I_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_II_pdb,
records=None,
gz=False,
append_newline=True,
)
def get_qm_mm_regions(self):
"""
Saves separate PDB files for the QM and MM regions.
QM regions comprise the QM region of the receptor
and the entire ligand where the MM region comprise
the non-selected QM regions of the receptor.
"""
with open(self.host_qm_pdb) as f1, open(self.qm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
with open(self.guest_pdb) as f1, open(self.qm_pdb, "a") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
with open(self.host_mm_pdb) as f1, open(self.mm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
class PrepareGaussianGuest:
"""
A class used to prepare the QM engine input file (Gaussian)
for the ligand and run QM calculations with appropriate
keywords.
This class contain methods to write an input file (.com extension)
for the QM engine. It then runs a QM calculation with the given
basis set and functional. Checkpoint file is then converted to
a formatted checkpoint file. Output files (.log, .chk, and .fhck)
will then be used to extract ligand's force field parameters.
...
Attributes
----------
charge : int, optional
Charge of the ligand.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_out_file: str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_out_file: str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="OPT",
frequency="FREQ",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6)",
gauss_out_file="guest.out",
fchk_out_file="guest_fchk.out",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.gauss_out_file = gauss_out_file
self.fchk_out_file = fchk_out_file
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
def write_input(self):
"""
Writes a Gaussian input file for the ligand.
"""
command_line_1 = "%Chk = " + self.guest_pdb[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = self.guest_pdb[:-4] + " " + "gaussian input file"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_2 = df[["x_coord", "y_coord", "z_coord"]]
df_merged = pd.concat([df_1, df_2], axis=1)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.guest_pdb[:-4] + ".com", "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand locally.
"""
execute_command = (
"g16"
+ " < "
+ self.guest_pdb[:-4]
+ ".com"
+ " > "
+ self.guest_pdb[:-4]
+ ".log"
)
with open(self.gauss_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.guest_pdb[:-4]
+ ".chk"
+ " "
+ self.guest_pdb[:-4]
+ ".fchk"
)
with open(self.fchk_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
class PrepareGaussianHostGuest:
"""
A class used to prepare the QM engine input file (Gaussian) for
the receptor - ligand complex and run the QM calculations with
the appropriate keywords.
This class contain methods to write an input file (.com extension)
for the QM engine for the receptor - ligand complex. It then runs
a QM calculation with the given basis set and functional. Checkpoint
file is then converted to a formatted checkpoint file. Output files
(.log, .chk, and .fhck) will then be used to extract charges for the
ligand and the receptor.
...
Attributes
----------
charge : int, optional
Total charge of the receptor - ligand complex.
multiplicity : int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_system_out_file : str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_system_out_file : str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
host_guest_input : str, optional
Gaussian input file (.com extension) for the receptor - ligand
QM region.
qm_guest_charge_parameter_file : str, optional
File containing the charges of ligand atoms and their corresponding
atoms. Charge obtained are the polarised charged due to the
surrounding receptor's region.
qm_host_charge_parameter_file : str, optional
File containing the charges of the QM region of the receptor.
qm_guest_atom_charge_parameter_file : str, optional
File containing the charges of ligand atoms. Charge obtained
are the polarised charged due to the surrounding receptor's region.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
host_qm_pdb="host_qm.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="",
frequency="",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6) SCRF=PCM",
gauss_system_out_file="system_qm.out",
fchk_system_out_file="system_qm_fchk.out",
host_guest_input="host_guest.com",
qm_guest_charge_parameter_file="guest_qm_surround_charges.txt",
qm_host_charge_parameter_file="host_qm_surround_charges.txt",
qm_guest_atom_charge_parameter_file="guest_qm_atom_surround_charges.txt",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.host_qm_pdb = host_qm_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
self.gauss_system_out_file = gauss_system_out_file
self.fchk_system_out_file = fchk_system_out_file
self.host_guest_input = host_guest_input
self.qm_guest_charge_parameter_file = qm_guest_charge_parameter_file
self.qm_host_charge_parameter_file = qm_host_charge_parameter_file
self.qm_guest_atom_charge_parameter_file = (
qm_guest_atom_charge_parameter_file
)
def write_input(self):
"""
Writes a Gaussian input file for the receptor - ligand QM region.
"""
command_line_1 = "%Chk = " + self.host_guest_input[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = "Gaussian Input File"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_1 = pd.concat([df_1, df_2, df_3], axis=1)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_2 = pd.concat([df_1, df_2, df_3], axis=1)
df_merged = pd.concat([df_merged_1, df_merged_2], axis=0)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.host_guest_input, "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand - receptor region
locally.
"""
execute_command = (
"g16"
+ " < "
+ self.host_guest_input
+ " > "
+ self.host_guest_input[:-4]
+ ".log"
)
with open(self.gauss_system_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.host_guest_input[:-4]
+ ".chk"
+ " "
+ self.host_guest_input[:-4]
+ ".fchk"
)
with open(self.fchk_system_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_qm_host_guest_charges(self):
"""
Extract charge information for the receptor - ligand QM region.
"""
log_file = self.host_guest_input[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
# Why + 4?
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df_guest = ppdb.df["ATOM"]
number_guest_atoms = df_guest.shape[0]
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = pd.DataFrame(data_tuples, columns=["Atom", "Charge"])
number_host_atoms = df_charge.shape[0] - number_guest_atoms
df_charge_guest = df_charge.head(number_guest_atoms)
df_charge_host = df_charge.tail(number_host_atoms)
df_charge_only_guest = df_charge_guest["Charge"]
df_charge_guest.to_csv(
self.qm_guest_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
df_charge_host.to_csv(
self.qm_host_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
df_charge_only_guest.to_csv(
self.qm_guest_atom_charge_parameter_file,
index=False,
header=False,
sep=" ",
)
class ParameterizeGuest:
"""
A class used to obtain force field parameters for the ligand (bond,
angle and charge parameters) from QM calculations.
This class contain methods to process the output files of the
Gaussian QM output files (.chk, .fchk and .log files). Methods
in the class extract the unprocessed hessian matrix from the
Gaussian QM calculations, processes it and uses the Modified
Seminario Method to ontain the bond and angle parameters. The
class also extracts the QM charges from the log file.
...
Attributes
----------
xyz_file: str, optional
XYZ file for ligand coordinates obtained from its corresponding
formatted checkpoint file.
coordinate_file: str, optional
Text file containing the ligand coordinates (extracted
from the formatted checkpoint file).
unprocessed_hessian_file: str, optional
Unprocessed hessian matrix of the ligand obtained from the
formatted checkpoint file.
bond_list_file: str, optional
Text file containing the bond information of the ligand extracted
from the log file.
angle_list_file: str, optional
Text file containing the angle information of the ligand extracted
from the log file.
hessian_file: str, optional
Processed hessian matrix of the ligand.
atom_names_file: str, optional
Text file containing the list of atom names from the fchk file.
bond_parameter_file: str, optional
Text file containing the bond parameters for the ligand obtained
using the Modified Seminario method.
angle_parameter_file: str, optional
Text file containing the angle parameters of the ligand obtained
using the Modified Seminario method..
charge_parameter_file: str, optional
Text file containing the QM charges of the ligand.
guest_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
proper_dihedral_file: str, optional
A text file containing proper dihedral angles of the ligand.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
"""
def __init__(
self,
xyz_file="guest_coords.xyz",
coordinate_file="guest_coordinates.txt",
unprocessed_hessian_file="guest_unprocessed_hessian.txt",
bond_list_file="guest_bond_list.txt",
angle_list_file="guest_angle_list.txt",
hessian_file="guest_hessian.txt",
atom_names_file="guest_atom_names.txt",
bond_parameter_file="guest_bonds.txt",
angle_parameter_file="guest_angles.txt",
charge_parameter_file="guest_qm_surround_charges.txt",
guest_pdb="guest_init_II.pdb",
proper_dihedral_file="proper_dihedrals.txt",
functional="B3LYP",
basis_set="6-31G",
):
self.xyz_file = xyz_file
self.coordinate_file = coordinate_file
self.unprocessed_hessian_file = unprocessed_hessian_file
self.bond_list_file = bond_list_file
self.angle_list_file = angle_list_file
self.hessian_file = hessian_file
self.atom_names_file = atom_names_file
self.bond_parameter_file = bond_parameter_file
self.angle_parameter_file = angle_parameter_file
self.charge_parameter_file = charge_parameter_file
self.guest_pdb = guest_pdb
self.proper_dihedral_file = proper_dihedral_file
self.functional = functional
self.basis_set = basis_set
def get_xyz(self):
"""
Saves XYZ file from the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
to_begin = int(i)
cartesian_coords = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_coordinates / 5))
]
cartesian_list = []
for i in range(len(cartesian_coords)):
cartesian_list.append(cartesian_coords[i].strip().split())
coordinates_list = [
item for sublist in cartesian_list for item in sublist
]
# Converted from Atomic units (Bohrs) to Angstroms
list_coords = [float(x) * BOHRS_PER_ANGSTROM for x in coordinates_list]
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_number_strings = lines[to_begin + 1 : to_end]
atom_numbers_nested = []
for i in range(len(atomic_number_strings)):
atom_numbers_nested.append(atomic_number_strings[i].strip().split())
numbers = [item for sublist in atom_numbers_nested for item in sublist]
N = int(no_coordinates / 3)
# Opens the new xyz file
with open(self.xyz_file, "w") as file:
file.write(str(N) + "\n \n")
coords = np.zeros((N, 3))
n = 0
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
# Print coordinates to new input_coords.xyz file
for i in range(0, N):
for j in range(0, 3):
coords[i][j] = list_coords[n]
n = n + 1
file.write(
names[i]
+ str(round(coords[i][0], 3))
+ " "
+ str(round(coords[i][1], 3))
+ " "
+ str(round(coords[i][2], 3))
+ "\n"
)
np.savetxt(self.coordinate_file, coords, fmt="%s")
def get_unprocessed_hessian(self):
"""
Saves a text file of the unprocessed hessian matrix from the
formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Cartesian Force Constants" in lines[i]:
no_hessian = re.findall(r"\d+|\d+.\d+", lines[i])
no_hessian = int(no_hessian[0])
to_begin = int(i)
hessian = lines[
to_begin + 1 : to_begin + 1 + int(math.ceil(no_hessian / 5))
]
hessian_list = []
for i in range(len(hessian)):
hessian_list.append(hessian[i].strip().split())
unprocessed_Hessian = [
item for sublist in hessian_list for item in sublist
]
np.savetxt(
self.unprocessed_hessian_file, unprocessed_Hessian, fmt="%s",
)
def get_bond_angles(self):
"""
Saves a text file containing bonds and angles from the gaussian
log file.
"""
log_file = self.guest_pdb[:-4] + ".log"
with open(log_file, "r") as fid:
tline = fid.readline()
bond_list = []
angle_list = []
tmp = "R" # States if bond or angle
# Finds the bond and angles from the .log file
while tline:
tline = fid.readline()
# Line starts at point when bond and angle list occurs
if (
len(tline) > 80
and tline[0:81].strip()
== "! Name Definition Value Derivative Info. !"
):
tline = fid.readline()
tline = fid.readline()
# Stops when all bond and angles recorded
while (tmp[0] == "R") or (tmp[0] == "A"):
line = tline.split()
tmp = line[1]
# Bond or angles listed as string
list_terms = line[2][2:-1]
# Bond List
if tmp[0] == "R":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
bond_list.append(x)
# Angle List
if tmp[0] == "A":
x = list_terms.split(",")
# Subtraction due to python array indexing at 0
x = [(int(i) - 1) for i in x]
angle_list.append(x)
tline = fid.readline()
# Leave loop
tline = -1
np.savetxt(self.bond_list_file, bond_list, fmt="%s")
np.savetxt(self.angle_list_file, angle_list, fmt="%s")
def get_hessian(self):
"""
Extracts hessian matrix from the unprocessed hessian matrix
and saves into a new file.
"""
unprocessed_Hessian = np.loadtxt(self.unprocessed_hessian_file)
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
length_hessian = 3 * N
hessian = np.zeros((length_hessian, length_hessian))
m = 0
# Write the hessian in a 2D array format
for i in range(0, length_hessian):
for j in range(0, (i + 1)):
hessian[i][j] = unprocessed_Hessian[m]
hessian[j][i] = unprocessed_Hessian[m]
m = m + 1
hessian = (hessian * HARTREE_PER_KCAL_MOL) / (
BOHRS_PER_ANGSTROM ** 2
) # Change from Hartree/bohr to kcal/mol/ang
np.savetxt(self.hessian_file, hessian, fmt="%s")
def get_atom_names(self):
"""
Saves a list of atom names from the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Atomic numbers" in lines[i]:
to_begin = int(i)
if "Nuclear charges" in lines[i]:
to_end = int(i)
atomic_numbers = lines[to_begin + 1 : to_end]
atom_numbers = []
for i in range(len(atomic_numbers)):
atom_numbers.append(atomic_numbers[i].strip().split())
numbers = [item for sublist in atom_numbers for item in sublist]
names = []
# Gives name for atomic number
for x in range(0, len(numbers)):
names.append(element_list[int(numbers[x]) - 1][1])
atom_names = []
for i in range(0, len(names)):
atom_names.append(names[i].strip() + str(i + 1))
np.savetxt(self.atom_names_file, atom_names, fmt="%s")
def get_bond_angle_params(self):
"""
Saves the bond and angle parameter files obtained from
the formatted checkpoint file.
"""
fchk_file = self.guest_pdb[:-4] + ".fchk"
with open(fchk_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Current cartesian coordinates" in lines[i]:
no_coordinates = re.findall(r"\d+|\d+.\d+", lines[i])
no_coordinates = int(no_coordinates[0])
N = int(no_coordinates / 3)
coords = np.loadtxt(self.coordinate_file)
hessian = np.loadtxt(self.hessian_file)
bond_list = np.loadtxt(self.bond_list_file, dtype=int)
atom_names = np.loadtxt(self.atom_names_file, dtype=str)
# Find bond lengths
bond_lengths = np.zeros((N, N))
for i in range(0, N):
for j in range(0, N):
diff_i_j = np.array(coords[i, :]) - np.array(coords[j, :])
bond_lengths[i][j] = np.linalg.norm(diff_i_j)
eigenvectors = np.empty((3, 3, N, N), dtype=complex)
eigenvalues = np.empty((N, N, 3), dtype=complex)
partial_hessian = np.zeros((3, 3))
for i in range(0, N):
for j in range(0, N):
partial_hessian = hessian[
(i * 3) : ((i + 1) * 3), (j * 3) : ((j + 1) * 3)
]
[a, b] = np.linalg.eig(partial_hessian)
eigenvalues[i, j, :] = a
eigenvectors[:, :, i, j] = b
# Modified Seminario method to find the bond parameters and
# print them to file
file_bond = open(self.bond_parameter_file, "w")
k_b = np.zeros(len(bond_list))
bond_length_list = np.zeros(len(bond_list))
unique_values_bonds = [] # Used to find average values
for i in range(0, len(bond_list)):
AB = force_constant_bond(
bond_list[i][0],
bond_list[i][1],
eigenvalues,
eigenvectors,
coords,
)
BA = force_constant_bond(
bond_list[i][1],
bond_list[i][0],
eigenvalues,
eigenvectors,
coords,
)
# Order of bonds sometimes causes slight differences,
# find the mean
k_b[i] = np.real((AB + BA) / 2)
# Vibrational_scaling takes into account DFT deficities /
# anharmocity
vibrational_scaling = get_vibrational_scaling(
functional=self.functional, basis_set=self.basis_set
)
vibrational_scaling_squared = vibrational_scaling ** 2
k_b[i] = k_b[i] * vibrational_scaling_squared
bond_length_list[i] = bond_lengths[bond_list[i][0]][
bond_list[i][1]
]
file_bond.write(
atom_names[bond_list[i][0]]
+ "-"
+ atom_names[bond_list[i][1]]
+ " "
)
file_bond.write(
str("%#.5g" % k_b[i])
+ " "
+ str("%#.4g" % bond_length_list[i])
+ " "
+ str(bond_list[i][0] + 1)
+ " "
+ str(bond_list[i][1] + 1)
)
file_bond.write("\n")
unique_values_bonds.append(
[
atom_names[bond_list[i][0]],
atom_names[bond_list[i][1]],
k_b[i],
bond_length_list[i],
1,
]
)
file_bond.close()
angle_list = np.loadtxt(self.angle_list_file, dtype=int)
# Modified Seminario method to find the angle parameters
# and print them to file
file_angle = open(self.angle_parameter_file, "w")
k_theta = np.zeros(len(angle_list))
theta_0 = np.zeros(len(angle_list))
unique_values_angles = [] # Used to find average values
# Modified Seminario part goes here ...
# Connectivity information for Modified Seminario Method
central_atoms_angles = []
# A structure is created with the index giving the central
# atom of the angle,
# an array then lists the angles with that central atom.
# i.e. central_atoms_angles{3} contains an array of angles
# with central atom 3
for i in range(0, len(coords)):
central_atoms_angles.append([])
for j in range(0, len(angle_list)):
if i == angle_list[j][1]:
# For angle ABC, atoms A C are written to array
AC_array = [angle_list[j][0], angle_list[j][2], j]
central_atoms_angles[i].append(AC_array)
# For angle ABC, atoms C A are written to array
CA_array = [angle_list[j][2], angle_list[j][0], j]
central_atoms_angles[i].append(CA_array)
# Sort rows by atom number
for i in range(0, len(coords)):
central_atoms_angles[i] = sorted(
central_atoms_angles[i], key=itemgetter(0)
)
# Find normals u_PA for each angle
unit_PA_all_angles = []
for i in range(0, len(central_atoms_angles)):
unit_PA_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
# For the angle at central_atoms_angles[i][j,:] the
# corresponding u_PA value
# is found for the plane ABC and bond AB, where ABC
# corresponds to the order
# of the arguements. This is why the reverse order
# was also added
unit_PA_all_angles[i].append(
u_PA_from_angles(
central_atoms_angles[i][j][0],
i,
central_atoms_angles[i][j][1],
coords,
)
)
# Finds the contributing factors from the other angle terms
# scaling_factor_all_angles
# = cell(max(max(angle_list))); %This array will contain
# scaling factor and angle list position
scaling_factor_all_angles = []
for i in range(0, len(central_atoms_angles)):
scaling_factor_all_angles.append([])
for j in range(0, len(central_atoms_angles[i])):
n = 1
m = 1
angles_around = 0
additional_contributions = 0
scaling_factor_all_angles[i].append([0, 0])
# Position in angle list
scaling_factor_all_angles[i][j][1] = central_atoms_angles[i][
j
][2]
# Goes through the list of angles with the same central atom
# and computes the
# term need for the modified Seminario method
# Forwards directions, finds the same bonds with the central atom i
while (
((j + n) < len(central_atoms_angles[i]))
and central_atoms_angles[i][j][0]
== central_atoms_angles[i][j + n][0]
):
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j + n][:],
)
)
)
** 2
)
n = n + 1
angles_around = angles_around + 1
# Backwards direction, finds the same bonds with the central atom i
while ((j - m) >= 0) and central_atoms_angles[i][j][
0
] == central_atoms_angles[i][j - m][0]:
additional_contributions = (
additional_contributions
+ (
abs(
np.dot(
unit_PA_all_angles[i][j][:],
unit_PA_all_angles[i][j - m][:],
)
)
)
** 2
)
m = m + 1
angles_around = angles_around + 1
if n != 1 or m != 1:
# Finds the mean value of the additional contribution to
# change to normal
# Seminario method comment out + part
scaling_factor_all_angles[i][j][0] = 1 + (
additional_contributions / (m + n - 2)
)
else:
scaling_factor_all_angles[i][j][0] = 1
scaling_factors_angles_list = []
for i in range(0, len(angle_list)):
scaling_factors_angles_list.append([])
# Orders the scaling factors according to the angle list
for i in range(0, len(central_atoms_angles)):
for j in range(0, len(central_atoms_angles[i])):
scaling_factors_angles_list[
scaling_factor_all_angles[i][j][1]
].append(scaling_factor_all_angles[i][j][0])
# Finds the angle force constants with the scaling factors
# included for each angle
for i in range(0, len(angle_list)):
# Ensures that there is no difference when the
# ordering is changed
[AB_k_theta, AB_theta_0] = force_angle_constant(
angle_list[i][0],
angle_list[i][1],
angle_list[i][2],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][0],
scaling_factors_angles_list[i][1],
)
[BA_k_theta, BA_theta_0] = force_angle_constant(
angle_list[i][2],
angle_list[i][1],
angle_list[i][0],
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_factors_angles_list[i][1],
scaling_factors_angles_list[i][0],
)
k_theta[i] = (AB_k_theta + BA_k_theta) / 2
theta_0[i] = (AB_theta_0 + BA_theta_0) / 2
# Vibrational_scaling takes into account DFT
# deficities/ anharmonicity
k_theta[i] = k_theta[i] * vibrational_scaling_squared
file_angle.write(
atom_names[angle_list[i][0]]
+ "-"
+ atom_names[angle_list[i][1]]
+ "-"
+ atom_names[angle_list[i][2]]
+ " "
)
file_angle.write(
str("%#.4g" % k_theta[i])
+ " "
+ str("%#.4g" % theta_0[i])
+ " "
+ str(angle_list[i][0] + 1)
+ " "
+ str(angle_list[i][1] + 1)
+ " "
+ str(angle_list[i][2] + 1)
)
file_angle.write("\n")
unique_values_angles.append(
[
atom_names[angle_list[i][0]],
atom_names[angle_list[i][1]],
atom_names[angle_list[i][2]],
k_theta[i],
theta_0[i],
1,
]
)
file_angle.close()
def get_charges(self):
"""
Saves the atomic charges in a text file obtained from
the Gaussian log file.
"""
log_file = self.guest_pdb[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = pd.DataFrame(data_tuples, columns=["Atom", "Charge"])
df_charge.to_csv(
self.charge_parameter_file, index=False, header=False, sep=" ",
)
def get_proper_dihedrals(self):
"""
Saves proper dihedral angles of the ligand in a text file.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
no_atoms = len(ppdb.df["ATOM"])
atom_index_list = []
for i in range(no_atoms):
atom_index_list.append(i + 1)
possible_dihedrals = []
for dihed in itertools.permutations(atom_index_list, 4):
possible_dihedrals.append(dihed)
df_bonds = pd.read_csv(
self.bond_parameter_file, header=None, delimiter=r"\s+"
)
df_bonds.columns = [
"bond",
"k_bond",
"bond_length",
"bond_1",
"bond_2",
]
bond1 = df_bonds["bond_1"].values.tolist()
bond2 = df_bonds["bond_2"].values.tolist()
bond_list_list = []
for i in range(len(bond1)):
args = (bond1[i], bond2[i])
bond_list_list.append(list(args))
reverse_bond_list_list = []
for bonds in bond_list_list:
reverse_bond_list_list.append(reverse_list(bonds))
bond_lists = bond_list_list + reverse_bond_list_list
proper_dihed_repeated = []
for i in range(len(possible_dihedrals)):
dihed_frag = (
[possible_dihedrals[i][0], possible_dihedrals[i][1]],
[possible_dihedrals[i][1], possible_dihedrals[i][2]],
[possible_dihedrals[i][2], possible_dihedrals[i][3]],
)
a = [
dihed_frag[0] in bond_lists,
dihed_frag[1] in bond_lists,
dihed_frag[2] in bond_lists,
]
if a == [True, True, True]:
proper_dihed_repeated.append(possible_dihedrals[i])
len_repeated_dihed_list = len(proper_dihed_repeated)
proper_dihedrals = proper_dihed_repeated
for x in proper_dihedrals:
z = x[::-1]
if z in proper_dihedrals:
proper_dihedrals.remove(z)
len_non_repeated_dihed_list = len(proper_dihedrals)
# print(len_repeated_dihed_list == len_non_repeated_dihed_list * 2)
np.savetxt(self.proper_dihedral_file, proper_dihedrals, fmt="%s")
# return(proper_dihedrals)
class PrepareGaussianHost:
"""
A class used to prepare the QM engine input file (Gaussian)
for the receptor and run QM calculations with appropriate keywords.
This class contain methods to write an input file (.com extension)
for the QM engine. It then runs a QM calculation with the given
basis set and functional. Checkpoint file is then converted to
a formatted checkpoint file. Output files (.log, .chk, and .fhck)
will then be used to extract receptors's force field parameters.
...
Attributes
----------
charge : int, optional
Charge of the receptor.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the receptor where S represents
the total spin of the receptor.
host_qm_pdb: str, optional
PDB file of the receptor's QM region with atom numbers
beginning from 1.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the receptor
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_out_file: str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_out_file: str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
"""
def __init__(
self,
charge=0,
multiplicity=1,
host_qm_pdb="host_qm.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="OPT",
frequency="FREQ",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE) SCF=(maxcycles=4000) SYMMETRY=NONE",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6)",
gauss_out_file="host_qm.out",
fchk_out_file="host_qm_fchk.out",
):
self.charge = charge
self.multiplicity = multiplicity
self.host_qm_pdb = host_qm_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.gauss_out_file = gauss_out_file
self.fchk_out_file = fchk_out_file
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
def write_input(self):
"""
Writes a Gaussian input file for the receptor QM region.
"""
# TODO: create generic function for Gaussian Input file (DRY principle)
command_line_1 = "%Chk = " + self.host_qm_pdb[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = self.host_qm_pdb[:-4] + " " + "gaussian input file"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_2 = df[["x_coord", "y_coord", "z_coord"]]
df_merged =
|
pd.concat([df_1, df_2], axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
import os
import sys
import re
import pandas as pd, geopandas as gpd
import numpy as np
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
from multiprocessing import Pool
from os.path import isfile, join
import shutil
import warnings
from pathlib import Path
import time
warnings.simplefilter(action='ignore', category=FutureWarning)
import rasterio
from rasterio import features as riofeatures
from rasterio import plot as rioplot
from shapely.geometry import Polygon
"""
Plot Rating Curves and Compare to USGS Gages
Parameters
----------
fim_dir : str
Directory containing FIM output folders.
output_dir : str
Directory containing rating curve plots and tables.
usgs_gages_filename : str
File name of USGS rating curves.
nwm_flow_dir : str
Directory containing NWM recurrence flows files.
number_of_jobs : str
Number of jobs.
stat_groups : str
string of columns to group eval metrics.
"""
def check_file_age(file):
'''
Checks if file exists, determines the file age, and recommends
updating if older than 1 month.
Returns
-------
None.
'''
file = Path(file)
if file.is_file():
modification_time = file.stat().st_mtime
current_time = time.time()
file_age_days = (current_time - modification_time)/86400
if file_age_days > 30:
check = f'{file.name} is {int(file_age_days)} days old, consider updating.\nUpdate with rating_curve_get_usgs_curves.py'
else:
check = f'{file.name} is {int(file_age_days)} days old.'
return check
# recurr_intervals = ['recurr_1_5_cms.csv','recurr_5_0_cms.csv','recurr_10_0_cms.csv']
def generate_rating_curve_metrics(args):
elev_table_filename = args[0]
branches_folder = args[1]
usgs_gages_filename = args[2]
usgs_recurr_stats_filename = args[3]
nwm_recurr_data_filename = args[4]
rc_comparison_plot_filename = args[5]
nwm_flow_dir = args[6]
catfim_flows_filename = args[7]
huc = args[8]
alt_plot = args[9]
elev_table = pd.read_csv(elev_table_filename,dtype={'location_id': object, 'feature_id':object,'HydroID':object, 'levpa_id':object})
elev_table.dropna(subset=['location_id'], inplace=True)
usgs_gages = pd.read_csv(usgs_gages_filename,dtype={'location_id': object, 'feature_id':object})
# Aggregate FIM4 hydroTables
hydrotable = pd.DataFrame()
for branch in elev_table.levpa_id.unique():
branch_elev_table = elev_table.loc[elev_table.levpa_id == branch].copy()
branch_hydrotable = pd.read_csv(join(branches_folder, str(branch), f'hydroTable_{branch}.csv'),dtype={'HydroID':object,'feature_id':object})
# Only pull SRC for hydroids that are in this branch
branch_hydrotable = branch_hydrotable.loc[branch_hydrotable.HydroID.isin(branch_elev_table.HydroID)]
branch_hydrotable.drop(columns=['order_'], inplace=True)
# Join SRC with elevation data
branch_elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
branch_hydrotable = branch_hydrotable.merge(branch_elev_table, on="HydroID")
# Append to full rating curve dataframe
if hydrotable.empty:
hydrotable = branch_hydrotable
else:
hydrotable = hydrotable.append(branch_hydrotable)
# Join rating curves with elevation data
#elev_table.rename(columns={'feature_id':'fim_feature_id'}, inplace=True)
#hydrotable = hydrotable.merge(elev_table, on="HydroID")
relevant_gages = list(hydrotable.location_id.unique())
usgs_gages = usgs_gages[usgs_gages['location_id'].isin(relevant_gages)]
usgs_gages = usgs_gages.reset_index(drop=True)
if len(usgs_gages) > 0:
# Adjust rating curve to elevation
hydrotable['elevation_ft'] = (hydrotable.stage + hydrotable.dem_adj_elevation) * 3.28084 # convert from m to ft
# hydrotable['raw_elevation_ft'] = (hydrotable.stage + hydrotable.dem_elevation) * 3.28084 # convert from m to ft
hydrotable['discharge_cfs'] = hydrotable.discharge_cms * 35.3147
usgs_gages = usgs_gages.rename(columns={"flow": "discharge_cfs", "elevation_navd88": "elevation_ft"})
hydrotable['source'] = "FIM"
usgs_gages['source'] = "USGS"
limited_hydrotable = hydrotable.filter(items=['location_id','elevation_ft','discharge_cfs','source', 'HydroID', 'levpa_id', 'dem_adj_elevation'])
select_usgs_gages = usgs_gages.filter(items=['location_id', 'elevation_ft', 'discharge_cfs','source'])
if 'default_discharge_cms' in hydrotable.columns: # check if both "FIM" and "FIM_default" SRCs are available
hydrotable['default_discharge_cfs'] = hydrotable.default_discharge_cms * 35.3147
limited_hydrotable_default = hydrotable.filter(items=['location_id','elevation_ft', 'default_discharge_cfs'])
limited_hydrotable_default['discharge_cfs'] = limited_hydrotable_default.default_discharge_cfs
limited_hydrotable_default['source'] = "FIM_default"
rating_curves = limited_hydrotable.append(select_usgs_gages)
rating_curves = rating_curves.append(limited_hydrotable_default)
else:
rating_curves = limited_hydrotable.append(select_usgs_gages)
# Add stream order
stream_orders = hydrotable.filter(items=['location_id','order_']).drop_duplicates()
rating_curves = rating_curves.merge(stream_orders, on='location_id')
rating_curves['order_'] = rating_curves['order_'].astype('int')
# NWM recurr intervals
recurr_intervals = ("2","5","10","25","50","100")
recurr_dfs = []
for interval in recurr_intervals:
recurr_file = join(nwm_flow_dir, 'nwm21_17C_recurr_{}_0_cms.csv'.format(interval))
df = pd.read_csv(recurr_file, dtype={'feature_id': str})
# Update column names
df = df.rename(columns={"discharge": interval})
recurr_dfs.append(df)
# Merge NWM recurr intervals into a single layer
nwm_recurr_intervals_all = reduce(lambda x,y: pd.merge(x,y, on='feature_id', how='outer'), recurr_dfs)
nwm_recurr_intervals_all =
|
pd.melt(nwm_recurr_intervals_all, id_vars=['feature_id'], value_vars=recurr_intervals, var_name='recurr_interval', value_name='discharge_cms')
|
pandas.melt
|
import itertools
import numpy as np
import pytest
import pandas as pd
from pandas.core.internals import ExtensionBlock
from .base import BaseExtensionTests
class BaseReshapingTests(BaseExtensionTests):
"""Tests for reshaping and concatenation."""
@pytest.mark.parametrize('in_frame', [True, False])
def test_concat(self, data, in_frame):
wrapped = pd.Series(data)
if in_frame:
wrapped = pd.DataFrame(wrapped)
result = pd.concat([wrapped, wrapped], ignore_index=True)
assert len(result) == len(data) * 2
if in_frame:
dtype = result.dtypes[0]
else:
dtype = result.dtype
assert dtype == data.dtype
assert isinstance(result._data.blocks[0], ExtensionBlock)
@pytest.mark.parametrize('in_frame', [True, False])
def test_concat_all_na_block(self, data_missing, in_frame):
valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1])
na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3])
if in_frame:
valid_block = pd.DataFrame({"a": valid_block})
na_block = pd.DataFrame({"a": na_block})
result = pd.concat([valid_block, na_block])
if in_frame:
expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])})
self.assert_frame_equal(result, expected)
else:
expected = pd.Series(data_missing.take([1, 1, 0, 0]))
self.assert_series_equal(result, expected)
def test_concat_mixed_dtypes(self, data):
# https://github.com/pandas-dev/pandas/issues/20762
df1 = pd.DataFrame({'A': data[:3]})
df2 = pd.DataFrame({"A": [1, 2, 3]})
df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category')
dfs = [df1, df2, df3]
# dataframes
result = pd.concat(dfs)
expected = pd.concat([x.astype(object) for x in dfs])
self.assert_frame_equal(result, expected)
# series
result =
|
pd.concat([x['A'] for x in dfs])
|
pandas.concat
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
#replace can be mm and none. mm= mode for categoric features and median for numeric features
#n_r= null ratio , s_r=skewness ratio , c_r=correalation ratio , n_f=number of features ,t_s= test size, n= remove outliers more than,cat_count= remove categoric columns more than cat_count
def main (train,test,target,Id,n_r=0.6,s_r=0.75,c_r=1,n_f="full",t_s=0.25,r_s=42,replace="mm",cat_count=100,n=2):
#-----------------------------------------------------------------------------------------------------------------------------------------------------
dataset = pd.concat(objs=[train, test], axis=0,sort=False).reset_index(drop=True)
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def check_skew(train,target):
if train[target].skew()>=s_r :
train[target]= np.log1p(train[target])
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def drop_na(dataset,target):
dataset_isna=dataset.isna()
dataset_isna_sum=dataset_isna.sum()
dataset_isna_ratio=dataset_isna_sum/len(dataset)
if target in dataset_isna_ratio:
dataset_isna_ratio.drop(target,inplace=True)
remove_columns=dataset_isna_ratio[dataset_isna_ratio>n_r]
columns=pd.DataFrame(remove_columns)
print("2-This Columns will be remove because of null ratio higher than %"+str(n_r*100)+": ")
print(remove_columns)
return columns
drops=drop_na(dataset,target)
dataset=dataset.drop(drops.index,axis=1)
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def replace_null(dataset,replace):
cat=dataset.select_dtypes("object")
fl=dataset.select_dtypes(["float64","int64"]).drop(target,axis=1)
if replace =="mm":
for column in cat:
dataset[column].fillna(dataset[column].mode()[0], inplace=True)
for column in fl:
dataset[column].fillna(dataset[column].median(), inplace=True)
if replace=="none":
for column in cat:
dataset[column].fillna("NA", inplace=True)
for column in fl:
dataset[column].fillna(0, inplace=True)
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def detect_outliers(dataset,n,features):
from collections import Counter
outlier_indices = []
# iterate over features(columns)
for col in features:
# 1st quartile (25%)
Q1 = np.percentile(dataset[col], 25)
# 3rd quartile (75%)
Q3 = np.percentile(dataset[col],75)
# Interquartile range (IQR)
IQR = Q3 - Q1
# outlier step
outlier_step = 1.5 * IQR
# Determine a list of indices of outliers for feature col
outlier_list_col = dataset[(dataset[col] < Q1 - outlier_step) | (dataset[col] > Q3 + outlier_step )].index
# append the found outlier indices for col to the list of outlier indices
outlier_indices.extend(outlier_list_col)
# select observations containing more than 2 outliers
outlier_indices = Counter(outlier_indices)
multiple_outliers = list( k for k, v in outlier_indices.items() if v > n )
return multiple_outliers
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def skew_features(dataset):
from scipy.special import boxcox1p
from scipy.stats import boxcox
from scipy.stats import skew
lam = 0.15
#boxcox transform skewed numeric features:
numeric_feats = dataset.dtypes[dataset.dtypes != "object"].index
skewed_feats = dataset[numeric_feats].apply(lambda x: skew(x.dropna())) #compute skewness
skewed_feats = skewed_feats[skewed_feats > s_r]
skewed_feats = skewed_feats.index
dataset[skewed_feats] = boxcox1p(dataset[skewed_feats],lam)
#------------------------------------------calling functions--------------------------------------------------------------------------------------
check_skew(dataset,"Survived")
drop_na(dataset,target)
replace_null(dataset,replace)
features=dataset.select_dtypes(["float64","int64"]).drop([target,Id],axis=1)
detect_outliers(dataset,n,features)
Outliers_to_drop = detect_outliers(dataset,n,features)
dataset = dataset.drop(Outliers_to_drop, axis = 0).reset_index(drop=True)
skew_features(dataset)
cat=dataset.select_dtypes("object")
del_col=[]
for c in cat.columns:
if len(cat[c].value_counts())>=cat_count:
del_col.append(c)
cat=cat.drop(del_col,axis=1)
dataset=pd.get_dummies(dataset,columns=cat.columns)
#------------------------------------------train test split--------------------------------------------------------------------------------------
train=dataset[dataset[target].notnull()]
test=dataset[dataset[target].isna()]
if n_f=="full":
k=train.shape[1]
else:
k=n_f
corrmat=abs(dataset.corr())
cols = corrmat.nlargest(k, target)[target].index
train_x=train[cols].drop(target,axis=1)
train_y=train[target]
X_test=test[cols].drop(target,axis=1)
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_x, train_y, test_size=t_s, random_state=r_s)
#------------------------------------------all models--------------------------------------------------------------------------------------
from sklearn.metrics import confusion_matrix
from sklearn.metrics import mean_squared_error,mean_absolute_error
from sklearn.ensemble import GradientBoostingClassifier,RandomForestClassifier,AdaBoostClassifier,ExtraTreesClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
my_models= [
GradientBoostingClassifier(random_state=r_s),
RandomForestClassifier(random_state=r_s),
AdaBoostClassifier(random_state=r_s),
ExtraTreesClassifier(random_state=r_s),
LGBMClassifier(random_state=r_s),
CatBoostClassifier(logging_level='Silent',random_state=r_s),
XGBClassifier(random_state=r_s),
MLPClassifier(random_state=r_s),
KNeighborsClassifier(),
SVC(random_state=r_s),
GaussianProcessClassifier(random_state=r_s),
DecisionTreeClassifier(random_state=r_s),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
LogisticRegression(random_state=r_s)
]
scores_val=[]
scores_train=[]
MAE=[]
MSE=[]
RMSE=[]
for model in my_models:
scores_val.append(model.fit(X_train,y_train).score(X_val,y_val))
scores_train.append(model.fit(X_train,y_train).score(X_train,y_train))
y_pred=model.predict(X_val)
MAE.append(mean_absolute_error(y_val,y_pred))
MSE.append(mean_squared_error(y_val,y_pred))
RMSE.append(np.sqrt(mean_squared_error(y_val,y_pred)))
results=zip(scores_val,scores_train,MAE,MSE,RMSE)
results=list(results)
results_score_val=[item[0] for item in results]
results_score_train=[item[1] for item in results]
results_MAE=[item[2] for item in results]
results_MSE=[item[3] for item in results]
results_RMSE=[item[4] for item in results]
df_results=
|
pd.DataFrame({"Algorithm":my_models,"Training Score":results_score_train,"Validation Score":results_score_val,"MAE":results_MAE,"MSE":results_MSE,"RMSE":results_RMSE})
|
pandas.DataFrame
|
import os
import pandas as pd
import glob
import numpy as np
def run_ad():
input_path = 'C:\\Users\\Joker\\Desktop\\比赛\\数据\\阿尔茨海默\\test-fake'
fake_result = {
'image_id':[],
'category_id':[]}
for subdir in os.scandir(input_path):
for subsubdir in os.scandir(subdir.path):
image_id = os.listdir(subsubdir.path)
image_id = [os.path.basename(case).split('.')[0] for case in image_id]
fake_result['image_id'].extend(image_id)
fake_result['category_id'].extend([subsubdir.name]*len(image_id))
result_csv =
|
pd.DataFrame(data=fake_result)
|
pandas.DataFrame
|
from bokeh.io import curdoc
from bokeh.plotting import figure
from bokeh.layouts import column, row
from bokeh.models import Button, TextInput, Div, Panel, Tabs, ColumnDataSource, RangeTool, DatetimeTickFormatter, LinearColorMapper, ColorBar, BasicTicker, Slider
from bokeh.transform import transform
import pathlib, sys
import pandas as pd
import numpy as np
import datetime as dt
class view_gapfilling:
def __init__(self):
print('Entrou view_bokeh_gapfilling.py')
self.div01 = Div(text=r'C:\Users\User\Desktop\testse\df_filter.csv')
self.path = TextInput(value='', title='Insert df Path:')
self.button_view = Button(label='View')
self.button_view.on_click(self._button_view)
self.source_01 = ColumnDataSource(data=dict(date=[], time=[], ET=[]))
self.fig_01 = figure(title='ET without Corrections', plot_height=350, plot_width=1200,
x_axis_type='datetime', y_axis_type='datetime')
self.fig_01.xaxis[0].formatter = DatetimeTickFormatter(days=["%d/%m/%Y"])
self.fig_01.yaxis[0].formatter = DatetimeTickFormatter(days=["%H:%M"], hours=["%H:%M"])
colors = ['#440154', '#404387', '#29788E', '#22A784', '#79D151', '#FDE724']
self.color_mapper = LinearColorMapper(palette=colors)
self.et_01 = self.fig_01.rect(x='date', y='time', fill_color=transform('ET', self.color_mapper),
source=self.source_01, width=1000*60*60*24, height=1000*60*30, line_color=None)
color_bar = ColorBar(color_mapper=self.color_mapper, ticker=BasicTicker(desired_num_ticks=len(colors)),label_standoff=6, border_line_color=None, location=(0,0))
self.fig_01.add_layout(color_bar, 'right')
self.slider_01 = Slider(start=0, end=30, value=1, step=1, title='# Adjecent Days')
self.slider_01.on_change('value_throttled', lambda attr, old, new: self._mean_diurnal_gapFilling())
self.source_02 = ColumnDataSource(data=dict(date=[], time=[], ET_mean=[]))
self.fig_02 = figure(title='ET GapFilled', plot_width=1200, plot_height=350,
x_axis_type='datetime', y_axis_type='datetime')
self.fig_02.xaxis[0].formatter = DatetimeTickFormatter(days=["%d/%m/%Y"])
self.fig_02.yaxis[0].formatter = DatetimeTickFormatter(days=["%H:%M"], hours=["%H:%M"])
self.et_02 = self.fig_02.rect(x='date', y='time', fill_color=transform('ET_mean', self.color_mapper),
source=self.source_02, width=1000*60*60*24, height=1000*60*30, line_color=None)
self.fig_02.add_layout(color_bar, 'right')
curdoc().add_root(column(self.div01,
self.path,
self.button_view,
self.fig_01,
self.slider_01,
self.fig_02))
def _button_view(self):
path = pathlib.Path('{}'.format(self.path.value))
self.df = pd.read_csv(path, parse_dates=['TIMESTAMP','date_ns','time_ns'])
print(self.df.columns.to_list())
self.source_01.data = dict(date=self.df['date_ns'],
time=self.df['time_ns'],
ET=self.df['ET'])
min_datetime = self.df['TIMESTAMP'].min()
max_datetime = self.df['TIMESTAMP'].max()
# self.df['date02'] = pd.to_datetime(self.df['TIMESTAMP'].dt.date)
# self.df['time02'] = pd.to_datetime(self.df['TIMESTAMP'].dt.time, format="%H:%M:%S")
# self.df['time02'] = pd.to_datetime
df_full_timestamp = pd.DataFrame({"TIMESTAMP": pd.date_range(start=min_datetime, end=max_datetime, freq='30min')})
self.df_merge =
|
pd.merge(left=self.df, right=df_full_timestamp, on='TIMESTAMP', how='outer')
|
pandas.merge
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 10:24:34 2019
@author: labadmin
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 21:05:32 2019
@author: Hassan
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier as GBC
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import BorderlineSMOTE
from imblearn.over_sampling import SMOTENC
data_ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset1.csv",skiprows=4)
data_ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset2.csv",skiprows=4)
data_ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset3.csv",skiprows=4)
data_ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset4.csv",skiprows=4)
data_ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset5.csv",skiprows=4)
data_ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset6.csv",skiprows=4)
data_ben7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending1\\dataset7.csv",skiprows=4)
frames_ben1 = [data_ben1,data_ben2,data_ben3,data_ben4,data_ben5,data_ben6,data_ben7]
result_ben1 = pd.concat(frames_ben1)
result_ben1.index=range(3360)
df_ben1 = pd.DataFrame({'label': [1]},index=range(0,3360))
dat_ben1=pd.concat([result_ben1,df_ben1],axis=1)
#-------------------------------------------------------------------------------------------------
data__ben1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset1.csv",skiprows=4)
data__ben2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset2.csv",skiprows=4)
data__ben3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset3.csv",skiprows=4)
data__ben4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset4.csv",skiprows=4)
data__ben4=data__ben4['# Columns: time'].str.split(expand=True)
data__ben4.columns=['# Columns: time','avg_rss12','var_rss12','avg_rss13','var_rss13','avg_rss23','var_rss23']
data__ben5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset5.csv",skiprows=4)
data__ben6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\bending2\\dataset6.csv",skiprows=4)
frames_ben2 = [data__ben1,data__ben2,data__ben3,data__ben4,data__ben5,data__ben6]
result_ben2 = pd.concat(frames_ben2)
result_ben2.index=range(2880)
df_ben2 = pd.DataFrame({'label': [2]},index=range(0,2880))
dat__ben2=pd.concat([result_ben2,df_ben2],axis=1)
#-----------------------------------------------------------------------------------------------------
data_cyc1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset1.csv",skiprows=4)
data_cyc2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset2.csv",skiprows=4)
data_cyc3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset3.csv",skiprows=4)
data_cyc4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset4.csv",skiprows=4)
data_cyc5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset5.csv",skiprows=4)
data_cyc6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset6.csv",skiprows=4)
data_cyc7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset7.csv",skiprows=4)
data_cyc8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset8.csv",skiprows=4)
data_cyc9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset99.csv",skiprows=4)
data_cyc10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset10.csv",skiprows=4)
data_cyc11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset11.csv",skiprows=4)
data_cyc12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset12.csv",skiprows=4)
data_cyc13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset13.csv",skiprows=4)
data_cyc14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset144.csv",skiprows=4)
data_cyc15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\cycling\\dataset15.csv",skiprows=4)
frames_cyc = [data_cyc1,data_cyc2,data_cyc3,data_cyc4,data_cyc5,data_cyc6,data_cyc7,data_cyc8,data_cyc9,data_cyc10,data_cyc11,data_cyc12,data_cyc13,data_cyc14,data_cyc15]
result_cyc = pd.concat(frames_cyc)
result_cyc.index=range(7200)
df_cyc = pd.DataFrame({'label': [3]},index=range(0,7200))
data_cyc=pd.concat([result_cyc,df_cyc],axis=1)
#----------------------------------------------------------------------------------------------
data_ly1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset1.csv",skiprows=4)
data_ly2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset2.csv",skiprows=4)
data_ly3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset3.csv",skiprows=4)
data_ly4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset4.csv",skiprows=4)
data_ly5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset5.csv",skiprows=4)
data_ly6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset6.csv",skiprows=4)
data_ly7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset7.csv",skiprows=4)
data_ly8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset8.csv",skiprows=4)
data_ly9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset9.csv",skiprows=4)
data_ly10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset10.csv",skiprows=4)
data_ly11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset11.csv",skiprows=4)
data_ly12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset12.csv",skiprows=4)
data_ly13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset13.csv",skiprows=4)
data_ly14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset14.csv",skiprows=4)
data_ly15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\lying\\dataset15.csv",skiprows=4)
frames_ly = [data_ly1,data_ly2,data_ly3,data_ly4,data_ly5,data_ly6,data_ly7,data_ly8,data_ly9,data_ly10,data_ly11,data_ly12,data_ly13,data_ly14,data_ly15]
result_ly = pd.concat(frames_ly)
result_ly.index=range(7200)
df_ly = pd.DataFrame({'label': [4]},index=range(0,7200))
data_ly=pd.concat([result_ly,df_ly],axis=1)
#-------------------------------------------------------------------------------------------------
data_sit1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset1.csv",skiprows=4)
data_sit2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset2.csv",skiprows=4)
data_sit3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset3.csv",skiprows=4)
data_sit4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset4.csv",skiprows=4)
data_sit5=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset5.csv",skiprows=4)
data_sit6=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset6.csv",skiprows=4)
data_sit7=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset7.csv",skiprows=4)
data_sit8=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset8.csv",skiprows=4)
data_sit9=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset9.csv",skiprows=4)
data_sit10=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset10.csv",skiprows=4)
data_sit11=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset11.csv",skiprows=4)
data_sit12=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset12.csv",skiprows=4)
data_sit13=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset13.csv",skiprows=4)
data_sit14=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\\dataset14.csv",skiprows=4)
data_sit15=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\sitting\dataset15.csv",skiprows=4)
frames_sit= [data_sit1,data_sit2,data_sit3,data_sit4,data_sit5,data_sit6,data_sit7,data_sit8,data_sit9,data_sit10,data_sit11,data_sit12,data_sit13,data_sit14,data_sit15]
result_sit = pd.concat(frames_sit)
result_sit.index=range(7199)
df_sit= pd.DataFrame({'label': [5]},index=range(0,7199))
data_sit=pd.concat([result_sit,df_sit],axis=1)
#----------------------------------------------------------------------------------------------------
data_sta1=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset1.csv",skiprows=4)
data_sta2=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset2.csv",skiprows=4)
data_sta3=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset3.csv",skiprows=4)
data_sta4=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset4.csv",skiprows=4)
data_sta5=
|
pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\standing\\dataset5.csv",skiprows=4)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""Locational price analysis plots.
Price analysis plots, price duration curves and timeseries plots.
Prices plotted in $/MWh
@author: adyreson and <NAME>
"""
import os
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import marmot.config.mconfig as mconfig
import marmot.plottingmodules.plotutils.plot_library as plotlib
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, DataSavedInModule,
InputSheetError)
class MPlot(PlotDataHelper):
"""price MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The price.py module contains methods that are
related to grid prices at regions, zones, nodes etc.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
def pdc_all_regions(self, y_axis_max: float = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a price duration curve for all regions/zones and plots them on a single facet plot.
Price is in $/MWh.
The code automatically creates a facet plot based on the number of regions/zones in the input.
All scenarios are plotted on a single facet for each region/zone
Args:
y_axis_max (float, optional): Max y-axis value.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"{agg}_Price", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
#Location to save to
save_figures = os.path.join(self.figure_folder, self.AGG_BY + '_prices')
region_number = len(self.Zones)
# determine x,y length for plot
xdimension, ydimension = self.set_x_y_dimension(region_number)
grid_size = xdimension*ydimension
# Used to calculate any excess axis to delete
excess_axs = grid_size - region_number
#setup plot
fig2, axs = plotlib.setup_plot(xdimension,ydimension)
plt.subplots_adjust(wspace=0.1, hspace=0.50)
data_table = []
for n, zone_input in enumerate(self.Zones):
all_prices=[]
for scenario in self.Scenarios:
price = self._process_data(self[f"{agg}_Price"],scenario,zone_input)
price = price.groupby(["timestamp"]).sum()
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
price = price[start_date_range:end_date_range]
price.sort_values(by=scenario,ascending=False,inplace=True)
price.reset_index(drop=True,inplace=True)
all_prices.append(price)
duration_curve = pd.concat(all_prices, axis=1)
duration_curve.columns = duration_curve.columns.str.replace('_',' ')
data_out = duration_curve.copy()
data_out.columns = [zone_input + "_" + str(col) for col in data_out.columns]
data_table.append(data_out)
color_dict = dict(zip(duration_curve.columns,self.color_list))
for column in duration_curve:
plotlib.create_line_plot(axs,duration_curve, column, color_dict,
n=n, label=column)
if pd.notna(y_axis_max):
axs[n].set_ylim(bottom=0, top=float(y_axis_max))
axs[n].set_xlim(0,len(duration_curve))
axs[n].set_title(zone_input.replace('_',' '))
handles, labels = axs[n].get_legend_handles_labels()
#Legend
axs[grid_size-1].legend((handles), (labels), loc='lower left',
bbox_to_anchor=(1,0), facecolor='inherit',
frameon=True)
# Remove extra axes
if excess_axs != 0:
PlotDataHelper.remove_excess_axs(axs,excess_axs,grid_size)
fig2.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False,
right=False)
plt.ylabel(self.AGG_BY + ' Price ($/MWh)', color='black', rotation='vertical',
labelpad=30)
plt.xlabel('Intervals', color='black', rotation='horizontal', labelpad=20)
Data_Table_Out = pd.concat(data_table, axis=1)
Data_Table_Out = Data_Table_Out.add_suffix(" ($/MWh)")
fig2.savefig(os.path.join(save_figures, "Price_Duration_Curve_All_Regions.svg"),
dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(save_figures, "Price_Duration_Curve_All_Regions.csv"))
outputs = DataSavedInModule()
return outputs
def region_pdc(self, figure_name: str = None, y_axis_max: float = None,
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a price duration curve for each region. Price in $/MWh
The code will create either a facet plot or a single plot depending on
if the Facet argument is active.
If a facet plot is created, each scenario is plotted on a separate facet,
otherwise all scenarios are plotted on a single plot.
To make a facet plot, ensure the work 'Facet' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
y_axis_max (float, optional): Max y-axis value.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
outputs = {}
facet=False
if 'Facet' in figure_name:
facet = True
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"{agg}_Price", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
all_prices=[]
for scenario in self.Scenarios:
price = self._process_data(self[f"{agg}_Price"],scenario,zone_input)
price = price.groupby(["timestamp"]).sum()
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
price = price[start_date_range:end_date_range]
price.sort_values(by=scenario,ascending=False,inplace=True)
price.reset_index(drop=True,inplace=True)
all_prices.append(price)
duration_curve = pd.concat(all_prices, axis=1)
duration_curve.columns = duration_curve.columns.str.replace('_',' ')
Data_Out = duration_curve.add_suffix(" ($/MWh)")
xdimension=len(self.xlabels)
if xdimension == 0:
xdimension = 1
ydimension=len(self.ylabels)
if ydimension == 0:
ydimension = 1
# If the plot is not a facet plot, grid size should be 1x1
if not facet:
xdimension = 1
ydimension = 1
color_dict = dict(zip(duration_curve.columns,self.color_list))
#setup plot
fig1, axs = plotlib.setup_plot(xdimension,ydimension)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
n=0
for column in duration_curve:
plotlib.create_line_plot(axs, duration_curve, column, color_dict,
n=n, label=column)
if pd.notna(y_axis_max):
axs[n].set_ylim(bottom=0,top=float(y_axis_max))
axs[n].set_xlim(0,len(duration_curve))
axs[n].legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
if facet:
n+=1
fig1.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False,
right=False)
plt.ylabel(f"{self.AGG_BY} Price ($/MWh)", color='black', rotation='vertical',
labelpad=20)
plt.xlabel('Intervals', color='black', rotation='horizontal', labelpad=20)
if mconfig.parser("plot_title_as_region"):
plt.title(zone_input)
outputs[zone_input] = {'fig': fig1, 'data_table':Data_Out}
return outputs
def region_timeseries_price(self, figure_name: str = None, y_axis_max: float = None,
timezone: str = "", start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates price timeseries line plot for each region. Price is $/MWh.
The code will create either a facet plot or a single plot depending on
if the Facet argument is active.
If a facet plot is created, each scenario is plotted on a separate facet,
otherwise all scenarios are plotted on a single plot.
To make a facet plot, ensure the work 'Facet' is found in the figure_name.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
y_axis_max (float, optional): Max y-axis value.
Defaults to None.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table
"""
outputs = {}
facet=False
if 'Facet' in figure_name:
facet = True
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"{agg}_Price", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
self.logger.info(f"{self.AGG_BY} = {zone_input}")
all_prices=[]
for scenario in self.Scenarios:
price = self._process_data(self[f"{agg}_Price"],scenario,zone_input)
price = price.groupby(["timestamp"]).sum()
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
price = price[start_date_range:end_date_range]
all_prices.append(price)
timeseries = pd.concat(all_prices, axis=1)
timeseries.columns = timeseries.columns.str.replace('_',' ')
Data_Out = timeseries.add_suffix(" ($/MWh)")
xdimension=len(self.xlabels)
if xdimension == 0:
xdimension = 1
ydimension=len(self.ylabels)
if ydimension == 0:
ydimension = 1
# If the plot is not a facet plot, grid size should be 1x1
if not facet:
xdimension = 1
ydimension = 1
color_dict = dict(zip(timeseries.columns,self.color_list))
#setup plot
fig3, axs = plotlib.setup_plot(xdimension,ydimension)
plt.subplots_adjust(wspace=0.05, hspace=0.2)
n=0 #Counter for scenario subplots
for column in timeseries:
plotlib.create_line_plot(axs, timeseries, column,
color_dict, n=n, label=column)
if pd.notna(y_axis_max):
axs[n].set_ylim(bottom=0,top=float(y_axis_max))
axs[n].legend(loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
PlotDataHelper.set_plot_timeseries_format(axs,n)
if facet:
n+=1
fig3.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False,
left=False, right=False)
if mconfig.parser("plot_title_as_region"):
plt.title(zone_input)
plt.ylabel(f"{self.AGG_BY} Price ($/MWh)", color='black',
rotation='vertical', labelpad=20)
plt.xlabel(timezone, color='black', rotation='horizontal', labelpad=20)
outputs[zone_input] = {'fig': fig3, 'data_table':Data_Out}
return outputs
def timeseries_price_all_regions(self, y_axis_max: float = None,
timezone: str = "", start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a price timeseries plot for all regions/zones and plots them on a single facet plot.
Price in $/MWh.
The code automatically creates a facet plot based on the number of regions/zones in the input.
All scenarios are plotted on a single facet for each region/zone.
Args:
y_axis_max (float, optional): Max y-axis value.
Defaults to None.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, f"{agg}_Price", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
#Location to save to
save_figures = os.path.join(self.figure_folder, self.AGG_BY + '_prices')
outputs = {}
region_number = len(self.Zones)
xdimension, ydimension = self.set_x_y_dimension(region_number)
grid_size = xdimension*ydimension
# Used to calculate any excess axis to delete
excess_axs = grid_size - region_number
#setup plot
fig4, axs = plotlib.setup_plot(xdimension,ydimension)
plt.subplots_adjust(wspace=0.1, hspace=0.70)
data_table = []
for n, zone_input in enumerate(self.Zones):
self.logger.info(f"{self.AGG_BY} = {zone_input}")
all_prices=[]
for scenario in self.Scenarios:
price = self._process_data(self[f"{agg}_Price"],scenario,zone_input)
price = price.groupby(["timestamp"]).sum()
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
price = price[start_date_range:end_date_range]
all_prices.append(price)
timeseries = pd.concat(all_prices, axis=1)
timeseries.columns = timeseries.columns.str.replace('_',' ')
data_out = timeseries.copy()
data_out.columns = [zone_input + "_" + str(col) for col in data_out.columns]
data_table.append(data_out)
color_dict = dict(zip(timeseries.columns,self.color_list))
for column in timeseries:
plotlib.create_line_plot(axs,timeseries,column,color_dict,n=n,label=column)
axs[n].set_title(zone_input.replace('_',' '))
if pd.notna(y_axis_max):
axs[n].set_ylim(bottom=0,top=float(y_axis_max))
PlotDataHelper.set_plot_timeseries_format(axs,n)
handles, labels = axs[n].get_legend_handles_labels()
#Legend
axs[grid_size-1].legend((handles), (labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
# Remove extra axes
if excess_axs != 0:
PlotDataHelper.remove_excess_axs(axs,excess_axs,grid_size)
fig4.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False,
right=False)
plt.ylabel(f"{self.AGG_BY} Price ($/MWh)", color='black', rotation='vertical',
labelpad=30)
plt.xlabel(timezone, color='black', rotation='horizontal', labelpad=20)
Data_Table_Out = pd.concat(data_table, axis=1)
Data_Table_Out = Data_Table_Out.add_suffix(" ($/MWh)")
fig4.savefig(os.path.join(save_figures, "Price_Timeseries_All_Regions.svg"),
dpi=600, bbox_inches='tight')
Data_Table_Out.to_csv(os.path.join(save_figures, "Price_Timeseries_All_Regions.csv"))
outputs = DataSavedInModule()
return outputs
def node_pdc(self, **kwargs):
"""Creates a price duration curve for a set of specifc nodes.
Price in $/MWh.
The code will create either a facet plot or a single plot depending on
the number of nodes included in plot_select.csv property entry.
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
outputs = self._node_price(PDC=True, **kwargs)
return outputs
def node_timeseries_price(self, **kwargs):
"""Creates a price timeseries plot for a set of specifc nodes.
Price in $/MWh.
The code will create either a facet plot or a single plot depending on
the number of nodes included in plot_select.csv property entry.
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
outputs = self._node_price(**kwargs)
return outputs
def _node_price(self, PDC: bool = False, figure_name: str = None,
prop: str = None, y_axis_max: float = None,
timezone: str = "",
start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a price duration curve or timeseries plot for a set of specifc nodes.
This method is called from either node_pdc() or node_timeseries_price()
If PDC == True, a price duration curve plot will be created
The code will create either a facet plot or a single plot depending on
the number of nodes included in plot_select.csv property entry.
Plots and Data are saved within the module
Args:
PDC (bool, optional): If True creates a price duration curve.
Defaults to False.
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): comma seperated string of nodes to display.
Defaults to None.
y_axis_max (float, optional): Max y-axis value.
Defaults to None.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "node_Price", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
node_figure_folder = os.path.join(self.figure_folder, 'node_prices')
try:
os.makedirs(node_figure_folder)
except FileExistsError:
# directory already exists
pass
#Select only node specified in Marmot_plot_select.csv.
select_nodes = prop.split(",")
if select_nodes == None:
return InputSheetError()
self.logger.info(f'Plotting Prices for {select_nodes}')
all_prices=[]
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
price = self["node_Price"][scenario]
price = price.loc[(slice(None), select_nodes),:]
price = price.groupby(["timestamp","node"]).sum()
price.rename(columns={0:scenario}, inplace=True)
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
price = price[pd.to_datetime(start_date_range):pd.to_datetime(end_date_range)]
if PDC:
price.sort_values(by=['node',scenario], ascending=False,
inplace=True)
price.reset_index('timestamp', drop=True, inplace=True)
all_prices.append(price)
pdc = pd.concat(all_prices,axis=1)
pdc.columns = pdc.columns.str.replace('_',' ')
Data_Out = pdc.add_suffix(" ($/MWh)")
xdimension, ydimension = self.set_x_y_dimension(len(select_nodes))
#setup plot
fig, axs = plotlib.setup_plot(xdimension,ydimension)
plt.subplots_adjust(wspace=0.1, hspace=0.70)
color_dict = dict(zip(pdc.columns, self.color_list))
for n, node in enumerate(select_nodes):
if PDC:
try:
node_pdc = pdc.xs(node)
node_pdc.reset_index(drop=True, inplace=True)
except KeyError:
self.logger.info(f"{node} not found")
continue
else:
try:
node_pdc = pdc.xs(node, level='node')
except KeyError:
self.logger.info(f"{node} not found")
continue
for column in node_pdc:
plotlib.create_line_plot(axs,node_pdc, column, color_dict,
n=n, label=column)
if pd.notna(y_axis_max):
axs[n].set_ylim(bottom=0, top=float(y_axis_max))
if not PDC:
PlotDataHelper.set_plot_timeseries_format(axs,n)
# axs[n].set_xlim(0,len(node_pdc))
handles, labels = axs[n].get_legend_handles_labels()
#Legend
axs[len(select_nodes)-1].legend((handles), (labels),
loc='lower left',
bbox_to_anchor=(1,0),
facecolor='inherit',
frameon=True)
axs[n].set_title(node)
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False,
left=False, right=False)
plt.ylabel('Node Price ($/MWh)', color='black', rotation='vertical',
labelpad=30)
if PDC:
plt.xlabel('Intervals', color='black', rotation='horizontal',
labelpad=20)
else:
plt.xlabel(timezone, color='black', rotation='horizontal',
labelpad=20)
fig.savefig(os.path.join(node_figure_folder, figure_name + ".svg"),
dpi=600, bbox_inches='tight')
Data_Out.to_csv(os.path.join(node_figure_folder, figure_name + ".csv"))
outputs = DataSavedInModule()
return outputs
def node_price_hist(self, **kwargs):
"""Creates a price histogram for a specifc nodes. Price in $/MWh.
A facet plot will be created if more than one scenario are included on the
user input sheet
Each scenario will be plotted on a separate subplot.
If a set of nodes are passed at input, each will be saved to a separate
figure with node name as a suffix.
Plots and Data are saved within the module
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
outputs = self._node_hist(**kwargs)
return outputs
def node_price_hist_diff(self, **kwargs):
"""Creates a difference price histogram for a specifc nodes. Price in $/MWh.
This plot requires more than one scenario to display correctly.
A facet plot will be created
Each scenario will be plotted on a separate subplot, with values displaying
the relative difference to the first scenario in the list.
If a set of nodes are passed at input, each will be saved to a separate
figure with node name as a suffix.
Plots and Data are saved within the module
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
outputs = self._node_hist(diff_plot=True, **kwargs)
return outputs
def _node_hist(self, diff_plot: bool = False, figure_name: str = None,
prop: str = None, start_date_range: str = None,
end_date_range: str = None, **_):
"""Internal code for hist plots.
Called from node_price_hist() or node_price_hist_diff().
Hist range and bin size is currently hardcoded from -100 to +100
with a bin width of 2.5 $/MWh
Args:
diff_plot (bool, optional): If True creates a diff plot.
Defaults to False.
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): comma seperated string of nodes to display.
Defaults to None.
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
DataSavedInModule: DataSavedInModule exception.
"""
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "node_Price", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
return MissingInputData()
node_figure_folder = os.path.join(self.figure_folder, 'node_prices')
try:
os.makedirs(node_figure_folder)
except FileExistsError:
# directory already exists
pass
#Select only node specified in Marmot_plot_select.csv.
select_nodes = prop.split(",")
if select_nodes == None:
return InputSheetError()
for node in select_nodes:
self.logger.info(f'Plotting Prices for Node: {node}')
all_prices=[]
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
price = self["node_Price"][scenario]
try:
price = price.xs(node, level='node')
except KeyError:
self.logger.info(f"{node} not found")
continue
# price = price.loc[(slice(None), select_nodes),:]
price = price.groupby(["timestamp"]).sum()
price.rename(columns={0:scenario}, inplace=True)
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
price = price[pd.to_datetime(start_date_range):
|
pd.to_datetime(end_date_range)
|
pandas.to_datetime
|
from datetime import datetime, timedelta
from scipy import stats
import pandas as pd
import math
import numpy as np
def create_sharpe_ratio(returns, periods=252, rf=0):
'''
Create Sharpe ratio for the strategy, based on a benchmark of zero (i.e. no risk-free rate information).
:param returns: A pandas Series representing period percentage returns.
:param periods: Daily (252), Hourly (252 * 6.5), Minutely (252 * 6.5 * 60), etc.
'''
return np.sqrt(periods) * (np.mean(returns - rf/periods)) / np.std(returns - rf/periods)
def create_drawdowns(pnl):
'''
Calculate the largest peak-to-trough drawdown of the PnL curve as well as the duration of the drawdown. Requires that the pnl_returns is a pandas Series.
:param pnl: A pandas Series representing period percentage returns.
'''
# Calculate cumulative returns curve and set up High Water Mark
hwm = [0]
# Create drawdown and duration series
idx = pnl.index
drawdown =
|
pd.Series(index=idx)
|
pandas.Series
|
import logging
from temporal_granularity.src.metrics.metrics import Metrics
from pandas.util.testing import assert_frame_equal
import pandas as pd
import sys
from pathlib import Path
project_dir = Path("__file__").resolve().parents[1]
sys.path.insert(0, '{}/temporal_granularity/'.format(project_dir))
logging.basicConfig(level=logging.DEBUG)
class Test_Metrics:
def test_all_nrmse(self):
original_solar = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
representative_solar = pd.DataFrame({"capacity_factor": [1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8], "datetime": [1, 2, 3, 4, 5, 6, 7]})
original_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
representative_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
original_load = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
representative_load = pd.DataFrame({"capacity_factor": [1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65], "datetime": [1, 2, 3, 4, 5, 6, 7]})
all_nrmse = Metrics(original_solar, representative_solar, original_wind, representative_wind, original_load, representative_load, "dc")._get_nrmse()
expected_nrmse = [{'metric': 'nrmse dc', 'series_type': 'solar', 'value': 16.666666666666668}, {'metric': 'nrmse dc',
'series_type': 'wind', 'value': 0.0}, {'metric': 'nrmse dc', 'series_type': 'load', 'value': 8.33333333333334}]
assert all_nrmse == expected_nrmse
def test_all_rae(self):
original_solar = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
representative_solar = pd.DataFrame({"capacity_factor": [1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
original_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
representative_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
original_load = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
representative_load = pd.DataFrame({"capacity_factor": [1.05, 1.15, 1.25, 1.35, 1.45, 1.55, 1.65], "index_for_year": [1, 2, 3, 4, 5, 6, 7]})
all_nrmse = Metrics(original_solar, representative_solar, original_wind, representative_wind, original_load, representative_load, "dc")._get_rae()
expected_nrmse = [{'metric': 'rae dc', 'series_type': 'solar', 'value': 7.142857142857138}, {'metric': 'rae dc',
'series_type': 'wind', 'value': 0.0}, {'metric': 'rae dc', 'series_type': 'load', 'value': 3.5714285714285796}]
assert all_nrmse == expected_nrmse
def test_all_correlations(self):
original_solar = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
representative_solar = pd.DataFrame({"capacity_factor": [1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8], "datetime": [1, 2, 3, 4, 5, 6, 7]})
original_wind = pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
representative_wind =
|
pd.DataFrame({"capacity_factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "datetime": [1, 2, 3, 4, 5, 6, 7]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 16 09:04:46 2017
@author: <NAME>
pygemfxns_output_postprocessing.py is a mix of post-processing for things like plots, relationships between variables,
and any other comparisons between output or input data.
"""
# Built-in Libraries
import os
import collections
# External Libraries
import numpy as np
import pandas as pd
import netCDF4 as nc
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import scipy
import cartopy
import xarray as xr
# Local Libraries
import pygem_input as input
import pygemfxns_modelsetup as modelsetup
import pygemfxns_massbalance as massbalance
import class_mbdata
import run_simulation
# Script options
option_plot_futuresim = 0
option_mb_shean_analysis = 0
option_mb_shean_regional = 0
option_geodeticMB_loadcompare = 0
option_check_biasadj = 0
option_parameter_relationships = 0
option_MCMC_ensembles = 0
option_calcompare_w_geomb = 0
option_add_metadata2netcdf = 0
option_var_mon2annual = 0
#%% SUBSET RESULTS INTO EACH VARIABLE NAME SO EASIER TO TRANSFER
if option_var_mon2annual == 1:
netcdf_fp_prefix = input.output_filepath + 'simulations/spc/20181108_vars/'
vns = ['acc_glac_monthly', 'melt_glac_monthly', 'refreeze_glac_monthly', 'frontalablation_glac_monthly',
'massbaltotal_glac_monthly', 'temp_glac_monthly', 'prec_glac_monthly', 'runoff_glac_monthly']
# vns = ['runoff_glac_monthly']
def coords_attrs_dict(ds, vn):
"""
Retrieve dictionaries containing coordinates, attributes, and encoding for the dataset and variable name
Parameters
----------
ds : xr.Dataset
dataset of a variable of interest
vn : str
variable name
Returns
-------
output_coords_dict : dictionary
coordiantes for the modified variable
output_attrs_dict: dictionary
attributes to add to the modified variable
encoding : dictionary
encoding used with exporting xarray dataset to netcdf
"""
# Variable coordinates dictionary
output_coords_dict = {
'temp_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'prec_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'runoff_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'acc_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'acc_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'acc_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'melt_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'melt_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'melt_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'refreeze_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'refreeze_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'refreeze_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'frontalablation_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'frontalablation_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'frontalablation_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'massbaltotal_glac_annual': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'massbaltotal_glac_summer': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)]),
'massbaltotal_glac_winter': collections.OrderedDict(
[('glac', ds.glac.values), ('year', ds.year.values), ('stats', ds.stats.values)])
}
# Attributes dictionary
output_attrs_dict = {
'temp_glac_annual': {
'long_name': 'glacier-wide mean air temperature',
'units': 'degC',
'temporal_resolution': 'annual',
'comment': (
'annual mean has each month weight equally, each elevation bin is weighted equally'
' to compute the mean temperature, and bins where the glacier no longer exists due to '
'retreat have been removed')},
'prec_glac_annual': {
'long_name': 'glacier-wide precipitation (liquid)',
'units': 'm',
'temporal_resolution': 'annual',
'comment': 'only the liquid precipitation, solid precipitation excluded'},
'acc_glac_annual': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'annual',
'comment': 'only the solid precipitation'},
'acc_glac_summer': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'annual summer',
'comment': 'only the solid precipitation'},
'acc_glac_winter': {
'long_name': 'glacier-wide accumulation',
'units': 'm w.e.',
'temporal_resolution': 'annual winter',
'comment': 'only the solid precipitation'},
'melt_glac_annual': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'annual'},
'melt_glac_summer': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'annual summer'},
'melt_glac_winter': {
'long_name': 'glacier-wide melt',
'units': 'm w.e.',
'temporal_resolution': 'annual winter'},
'refreeze_glac_annual': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'annual'},
'refreeze_glac_summer': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'annual summer'},
'refreeze_glac_winter': {
'long_name': 'glacier-wide refreeze',
'units': 'm w.e.',
'temporal_resolution': 'annual winter'},
'frontalablation_glac_annual': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'annual',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'frontalablation_glac_summer': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'annual summer',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'frontalablation_glac_winter': {
'long_name': 'glacier-wide frontal ablation',
'units': 'm w.e.',
'temporal_resolution': 'annual winter',
'comment': (
'mass losses from calving, subaerial frontal melting, sublimation above the '
'waterline and subaqueous frontal melting below the waterline')},
'massbaltotal_glac_annual': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'annual',
'comment': 'total mass balance is the sum of the climatic mass balance and frontal ablation'},
'massbaltotal_glac_summer': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'annual summer',
'comment': 'total mass balance is the sum of the climatic mass balance and frontal ablation'},
'massbaltotal_glac_winter': {
'long_name': 'glacier-wide total mass balance',
'units': 'm w.e.',
'temporal_resolution': 'annual winter',
'comment': 'total mass balance is the sum of the climatic mass balance and frontal ablation'},
'runoff_glac_annual': {
'long_name': 'glacier-wide runoff',
'units': 'm**3',
'temporal_resolution': 'annual',
'comment': 'runoff from the glacier terminus, which moves over time'},
}
encoding = {}
noencoding_vn = ['stats', 'glac_attrs']
# Encoding (specify _FillValue, offsets, etc.)
if vn not in noencoding_vn:
encoding[vn] = {'_FillValue': False}
return output_coords_dict, output_attrs_dict, encoding
for vn in vns:
netcdf_fp = netcdf_fp_prefix + vn + '/'
for i in os.listdir(netcdf_fp):
if i.endswith('.nc'):
print(i)
# Open dataset and extract annual values
ds = xr.open_dataset(netcdf_fp + i)
ds_mean = ds[vn].values[:,:,0]
ds_std = ds[vn].values[:,:,1]
ds_var = ds_std**2
# Compute annual/seasonal mean/sum and standard deviation for the variable of interest
if vn is 'temp_glac_monthly':
output_list = ['annual']
vn_annual = 'temp_glac_annual'
# Mean annual temperature, standard deviation, and variance
ds_mean_annual = ds_mean.reshape(-1,12).mean(axis=1).reshape(-1,int(ds_mean.shape[1]/12))
ds_var_annual = ds_var.reshape(-1,12).mean(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_annual = ds_var_annual**0.5
ds_values_annual = np.concatenate((ds_mean_annual[:,:,np.newaxis], ds_std_annual[:,:,np.newaxis]),
axis=2)
elif vn in ['prec_glac_monthly', 'runoff_glac_monthly']:
output_list = ['annual']
vn_annual = 'prec_glac_annual'
# Total annual precipitation, standard deviation, and variance
ds_sum_annual = ds_mean.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_mean.shape[1]/12))
ds_var_annual = ds_var.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_annual = ds_var_annual**0.5
ds_values_annual = np.concatenate((ds_sum_annual[:,:,np.newaxis], ds_std_annual[:,:,np.newaxis]),
axis=2)
elif vn in ['acc_glac_monthly', 'melt_glac_monthly', 'refreeze_glac_monthly',
'frontalablation_glac_monthly', 'massbaltotal_glac_monthly']:
output_list = ['annual', 'summer', 'winter']
# Annual total, standard deviation, and variance
ds_sum_annual = ds_mean.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_mean.shape[1]/12))
ds_var_annual = ds_var.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_annual = ds_var_annual**0.5
ds_values_annual = np.concatenate((ds_sum_annual[:,:,np.newaxis], ds_std_annual[:,:,np.newaxis]),
axis=2)
# Seasonal total, standard deviation, and variance
if ds.time.year_type == 'water year':
option_wateryear = 1
elif ds.time.year_type == 'calendar year':
option_wateryear = 2
else:
option_wateryear = 3
dates_table = modelsetup.datesmodelrun(startyear=ds.year.values[0], endyear=ds.year.values[-1],
spinupyears=0, option_wateryear=option_wateryear)
# For seasonal calculations copy monthly values and remove the other season's values
ds_mean_summer = ds_mean.copy()
ds_var_summer = ds_var.copy()
ds_mean_summer[:,dates_table.season.values == 'winter'] = 0
ds_sum_summer = ds_mean_summer.reshape(-1,12).sum(axis=1).reshape(-1, int(ds_mean.shape[1]/12))
ds_var_summer = ds_var_summer.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_summer = ds_var_summer**0.5
ds_values_summer = np.concatenate((ds_sum_summer[:,:,np.newaxis], ds_std_summer[:,:,np.newaxis]),
axis=2)
ds_mean_winter = ds_mean.copy()
ds_var_winter = ds_var.copy()
ds_mean_winter[:,dates_table.season.values == 'summer'] = 0
ds_sum_winter = ds_mean_winter.reshape(-1,12).sum(axis=1).reshape(-1, int(ds_mean.shape[1]/12))
ds_var_winter = ds_var_winter.reshape(-1,12).sum(axis=1).reshape(-1,int(ds_std.shape[1]/12))
ds_std_winter = ds_var_winter**0.5
ds_values_winter = np.concatenate((ds_sum_winter[:,:,np.newaxis], ds_std_winter[:,:,np.newaxis]),
axis=2)
# Create modified dataset
for temporal_res in output_list:
vn_new = vn.split('_')[0] + '_glac_' + temporal_res
output_fp = netcdf_fp_prefix + vn_new + '/'
output_fn = i.split('.nc')[0][:-7] + temporal_res + '.nc'
output_coords_dict, output_attrs_dict, encoding = coords_attrs_dict(ds, vn_new)
if temporal_res is 'annual':
ds_new = xr.Dataset({vn_new: (list(output_coords_dict[vn_new].keys()), ds_values_annual)},
coords=output_coords_dict[vn_new])
elif temporal_res is 'summer':
ds_new = xr.Dataset({vn_new: (list(output_coords_dict[vn_new].keys()), ds_values_summer)},
coords=output_coords_dict[vn_new])
elif temporal_res is 'winter':
ds_new = xr.Dataset({vn_new: (list(output_coords_dict[vn_new].keys()), ds_values_winter)},
coords=output_coords_dict[vn_new])
ds_new[vn_new].attrs = output_attrs_dict[vn_new]
# Merge new dataset into the old to retain glacier table and other attributes
output_ds = xr.merge((ds, ds_new))
output_ds = output_ds.drop(vn)
# Export netcdf
if not os.path.exists(output_fp):
os.makedirs(output_fp)
output_ds.to_netcdf(output_fp + output_fn, encoding=encoding)
# Remove file
os.remove(netcdf_fp + i)
#%%===== PLOT FUNCTIONS =============================================================================================
def plot_latlonvar(lons, lats, variable, rangelow, rangehigh, title, xlabel, ylabel, colormap, east, west, south, north,
xtick=1,
ytick=1,
marker_size=2,
option_savefig=0,
fig_fn='Samplefig_fn.png',
output_filepath = input.main_directory + '/../Output/'):
"""
Plot a variable according to its latitude and longitude
"""
# Create the projection
ax = plt.axes(projection=cartopy.crs.PlateCarree())
# Add country borders for reference
ax.add_feature(cartopy.feature.BORDERS)
# Set the extent
ax.set_extent([east, west, south, north], cartopy.crs.PlateCarree())
# Label title, x, and y axes
plt.title(title)
ax.set_xticks(np.arange(east,west+1,xtick), cartopy.crs.PlateCarree())
ax.set_yticks(np.arange(south,north+1,ytick), cartopy.crs.PlateCarree())
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Plot the data
plt.scatter(lons, lats, s=marker_size, c=variable, cmap='RdBu', marker='o', edgecolor='black', linewidths=0.25)
# plotting x, y, size [s=__], color bar [c=__]
plt.clim(rangelow,rangehigh)
# set the range of the color bar
plt.colorbar(fraction=0.02, pad=0.04)
# fraction resizes the colorbar, pad is the space between the plot and colorbar
if option_savefig == 1:
plt.savefig(output_filepath + fig_fn)
plt.show()
def plot_caloutput(data):
"""
Plot maps and histograms of the calibration parameters to visualize results
"""
# Set extent
east = int(round(data['CenLon'].min())) - 1
west = int(round(data['CenLon'].max())) + 1
south = int(round(data['CenLat'].min())) - 1
north = int(round(data['CenLat'].max())) + 1
xtick = 1
ytick = 1
# Select relevant data
lats = data['CenLat'][:]
lons = data['CenLon'][:]
precfactor = data['precfactor'][:]
tempchange = data['tempchange'][:]
ddfsnow = data['ddfsnow'][:]
calround = data['calround'][:]
massbal = data['MB_geodetic_mwea']
# Plot regional maps
plot_latlonvar(lons, lats, massbal, 'Geodetic mass balance [mwea]', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
plot_latlonvar(lons, lats, precfactor, 'precipitation factor', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
plot_latlonvar(lons, lats, tempchange, 'Temperature bias [degC]', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
plot_latlonvar(lons, lats, ddfsnow, 'DDF_snow [m w.e. d-1 degC-1]', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
plot_latlonvar(lons, lats, calround, 'Calibration round', 'longitude [deg]', 'latitude [deg]', east, west,
south, north, xtick, ytick)
# Plot histograms
data.hist(column='MB_difference_mwea', bins=50)
plt.title('Mass Balance Difference [mwea]')
data.hist(column='precfactor', bins=50)
plt.title('Precipitation factor [-]')
data.hist(column='tempchange', bins=50)
plt.title('Temperature bias [degC]')
data.hist(column='ddfsnow', bins=50)
plt.title('DDFsnow [mwe d-1 degC-1]')
plt.xticks(rotation=60)
data.hist(column='calround', bins = [0.5, 1.5, 2.5, 3.5])
plt.title('Calibration round')
plt.xticks([1, 2, 3])
#%% ===== PARAMETER RELATIONSHIPS ======
if option_parameter_relationships == 1:
# Load csv
ds = pd.read_csv(input.main_directory + '/../Output/20180710_cal_modelparams_opt1_R15_ERA-Interim_1995_2015.csv',
index_col=0)
property_cn = 'Zmed'
# Relationship between model parameters and glacier properties
plt.figure(figsize=(6,10))
plt.subplots_adjust(wspace=0.05, hspace=0.05)
plt.suptitle('Model parameters vs. ' + property_cn, y=0.94)
# Temperature change
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(ds[property_cn], ds['tempchange'])
xplot = np.arange(4000,6500)
line = slope*xplot+intercept
plt.subplot(4,1,1)
plt.plot(ds[property_cn], ds['tempchange'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.xlabel(property_cn + ' [masl]', size=10)
plt.ylabel('tempchange \n[degC]', size=12)
equation = 'tempchange = ' + str(round(slope,7)) + ' * ' + property_cn + ' + ' + str(round(intercept,5))
plt.text(0.15, 0.85, equation, fontsize=12, transform=plt.gcf().transFigure,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.85))
print(equation, ' , R2 =', round(r_value**2,2))
# Precipitation factor
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(ds[property_cn], ds['precfactor'])
xplot = np.arange(4000,6500)
line = slope*xplot+intercept
plt.subplot(4,1,2)
plt.plot(ds[property_cn], ds['precfactor'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.xlabel(property_cn + ' [masl]', size=12)
plt.ylabel('precfactor \n[-]', size=12)
equation = 'precfactor = ' + str(round(slope,7)) + ' * ' + property_cn + ' + ' + str(round(intercept,5))
plt.text(0.15, 0.65, equation, fontsize=12, transform=plt.gcf().transFigure,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.85))
print(equation, ' , R2 =', round(r_value**2,2))
# Degree day factor of snow
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(ds[property_cn], ds['ddfsnow'])
xplot = np.arange(4000,6500)
line = slope*xplot+intercept
plt.subplot(4,1,3)
plt.plot(ds[property_cn], ds['ddfsnow'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.xlabel(property_cn + ' [masl]', size=12)
plt.ylabel('ddfsnow \n[mwe d-1 degC-1]', size=12)
# plt.legend()
equation = 'ddfsnow = ' + str(round(slope,12)) + ' * ' + property_cn + ' + ' + str(round(intercept,5))
plt.text(0.15, 0.45, equation, fontsize=12, transform=plt.gcf().transFigure,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.85))
print(equation, ' , R2 =', round(r_value**2,2))
# Precipitation gradient
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(ds[property_cn], ds['precgrad'])
xplot = np.arange(4000,6500)
line = slope*xplot+intercept
plt.subplot(4,1,4)
plt.plot(ds[property_cn], ds['precgrad'], 'o', mfc='none', mec='black')
plt.plot(xplot, line)
plt.xlabel(property_cn + ' [masl]', size=12)
plt.ylabel('precgrad \n[% m-1]', size=12)
# plt.legend()
equation = 'precgrad = ' + str(round(slope,12)) + ' * ' + property_cn + ' + ' + str(round(intercept,5))
plt.text(0.15, 0.25, equation, fontsize=12, transform=plt.gcf().transFigure,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.85))
print(equation, ' , R2 =', round(r_value**2,2))
# Plot and save figure
if option_savefigs == 1:
plt.savefig(input.output_filepath + 'figures/' + 'modelparameters_vs_' + property_cn + '.png',
bbox_inches='tight')
plt.show()
#%% ===== PLOTTING: Future simulations =====
if option_plot_futuresim == 1:
output_fp = input.output_filepath + 'R15_sims_20180530/'
gcm_list = ['MPI-ESM-LR', 'GFDL-CM3', 'CanESM2', 'GISS-E2-R']
# gcm_list = ['NorESM1-M']
# gcm_list = ['MPI-ESM-LR']
rcp_scenario = 'rcp26'
rgi_regionO1 = [15]
output_all = []
gcm = gcm_list[0]
for gcm in gcm_list:
# for rcp_scenario in ['rcp26', 'rcp85']:
print(gcm)
output_fn = 'PyGEM_R' + str(rgi_regionO1[0]) + '_' + gcm + '_' + rcp_scenario + '_biasadj_opt1_1995_2100.nc'
output = nc.Dataset(output_fp + output_fn)
# Select relevant data
main_glac_rgi =
|
pd.DataFrame(output['glacier_table'][:], columns=output['glacier_table_header'][:])
|
pandas.DataFrame
|
"""
Functions for comparing and visualizing model performance. Most of these functions rely on ATOM's model tracker and
datastore services, which are not part of the standard AMPL installation, but a few functions will work on collections of
models saved as local files.
"""
import os
import sys
import pdb
import pandas as pd
import numpy as np
import matplotlib
import logging
import json
import shutil
import tarfile
import tempfile
from collections import OrderedDict
from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.pipeline import model_tracker as trkr
import atomsci.ddm.pipeline.model_pipeline as mp
import atomsci.ddm.pipeline.parameter_parser as parse
import atomsci.ddm.pipeline.model_wrapper as mw
import atomsci.ddm.pipeline.featurization as feat
from tensorflow.python.keras.utils.layer_utils import count_params
logger = logging.getLogger('ATOM')
mlmt_supported = True
try:
from atomsci.clients import MLMTClient
except (ModuleNotFoundError, ImportError):
logger.debug("Model tracker client not supported in your environment; can look at models in filesystem only.")
mlmt_supported = False
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('axes', labelsize=12)
logging.basicConfig(format='%(asctime)-15s %(message)s')
nan = np.float32('nan')
#------------------------------------------------------------------------------------------------------------------
def del_ignored_params(dictionary, ignored_params):
"""
Deletes ignored parameters from the dictionary if they exist
Args:
dictionary (dict): A dictionary with parameters
ignored_parameters (list(str)): A list of keys potentially in the dictionary
Returns:
None
"""
for ip in ignored_params:
if ip in dictionary:
del dictionary[ip]
#------------------------------------------------------------------------------------------------------------------
def get_collection_datasets(collection_name):
"""
Returns a list of unique training datasets used for all models in a given collection.
Args:
collection_name (str): Name of model tracker collection to search for models.
Returns:
list: List of model training (dataset_key, bucket) tuples.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
dataset_set = set()
mlmt_client = dsf.initialize_model_tracker()
dset_dicts = mlmt_client.model.query_datasets(collection_name=collection_name, metrics_type='training').result()
# Convert to a list of (dataset_key, bucket) tuples
for dset_dict in dset_dicts:
dataset_set.add((dset_dict['dataset_key'], dset_dict['bucket']))
return sorted(dataset_set)
#------------------------------------------------------------------------------------------------------------------
def extract_collection_perf_metrics(collection_name, output_dir, pred_type='regression'):
"""
Obtain list of training datasets with models in the given collection. Get performance metrics for
models on each dataset and save them as CSV files in the given output directory.
Args:
collection_name (str): Name of model tracker collection to search for models.
output_dir (str): Directory where tables of performance metrics will be written.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
None
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
datasets = get_collection_datasets(collection_name)
os.makedirs(output_dir, exist_ok=True)
for dset_key, bucket in datasets:
dset_perf_df = get_training_perf_table(dset_key, bucket, collection_name, pred_type=pred_type)
dset_perf_file = '%s/%s_%s_model_perf_metrics.csv' % (output_dir, os.path.basename(dset_key).replace('.csv', ''), collection_name)
dset_perf_df.to_csv(dset_perf_file, index=False)
print('Wrote file %s' % dset_perf_file)
#------------------------------------------------------------------------------------------------------------------
def get_training_perf_table(dataset_key, bucket, collection_name, pred_type='regression', other_filters = {}):
"""
Load performance metrics from model tracker for all models saved in the model tracker DB under
a given collection that were trained against a particular dataset. Identify training parameters
that vary between models, and generate plots of performance vs particular combinations of
parameters.
Args:
dataset_key (str): Training dataset key.
bucket (str): Training dataset bucket.
collection_name (str): Name of model tracker collection to search for models.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
other_filters (dict): Other filter criteria to use in querying models.
Returns:
pd.DataFrame: Table of models and performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
mlmt_client = dsf.initialize_model_tracker()
query_params = {
"match_metadata": {
"training_dataset.bucket": bucket,
"training_dataset.dataset_key": dataset_key,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
query_params['match_metadata'].update(other_filters)
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
if metadata_list == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(metadata_list))
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
xgb_learning_rate_list = []
xgb_gamma_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
score_dict = {}
for subset in subsets:
score_dict[subset] = []
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for metadata_dict in metadata_list:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key = metadata_dict['training_dataset']['dataset_key']
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['rf_specific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'xgboost':
xgb_params = metadata_dict['xgb_specific']
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(xgb_params["xgb_learning_rate"])
xgb_gamma_list.append(xgb_params["xgb_gamma"])
for subset in subsets:
score_dict[subset].append(subset_metrics[subset][metric_type])
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
dataset_key=dataset_key,
featurizer=featurizer_list,
splitter=splitter_list,
max_epochs=max_epochs_list,
best_epoch=best_epoch_list,
learning_rate=learning_rate_list,
layer_sizes=layer_sizes_list,
dropouts=dropouts_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list,
xgb_learning_rate = xgb_learning_rate_list,
xgb_gamma = xgb_gamma_list))
for subset in subsets:
metric_col = '%s_%s' % (metric_type, subset)
perf_df[metric_col] = score_dict[subset]
sort_metric = '%s_valid' % metric_type
perf_df = perf_df.sort_values(sort_metric, ascending=False)
return perf_df
# -----------------------------------------------------------------------------------------------------------------
def extract_model_and_feature_parameters(metadata_dict):
"""
Given a config file, extract model and featuer parameters. Looks for parameter names
that end in *_specific. e.g. nn_specific, auto_featurizer_specific
Args:
model_metadict (dict): Dictionary containing NON-FLATTENED metadata for an AMPL model
Returns:
dictionary containing featurizer and model parameters. Most contain the following
keys. ['max_epochs', 'best_epoch', 'learning_rate', 'layer_sizes', 'drop_outs',
'rf_estimators', 'rf_max_features', 'rf_max_depth', 'xgb_gamma', 'xgb_learning_rate',
'featurizer_parameters_dict', 'model_parameters_dict']
"""
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
required = ['max_epochs', 'best_epoch', 'learning_rate', 'layer_sizes', 'dropouts',
'rf_estimators', 'rf_max_features', 'rf_max_depth', 'xgb_gamma', 'xgb_learning_rate']
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
model_info['max_epochs'] = nn_params['max_epochs']
model_info['best_epoch'] = nn_params['best_epoch']
model_info['learning_rate'] = nn_params['learning_rate']
model_info['layer_sizes'] = ','.join(['%d' % s for s in nn_params['layer_sizes']])
model_info['dropouts'] = ','.join(['%.2f' % d for d in nn_params['dropouts']])
elif model_type == 'RF':
rf_params = metadata_dict['rf_specific']
model_info['rf_estimators'] = rf_params['rf_estimators']
model_info['rf_max_features'] = rf_params['rf_max_features']
model_info['rf_max_depth'] = rf_params['rf_max_depth']
elif model_type == 'xgboost':
xgb_params = metadata_dict['xgb_specific']
model_info['xgb_gamma'] = xgb_params['xgb_gamma']
model_info['xgb_learning_rate'] = xgb_params['xgb_learning_rate']
for r in required:
if r not in model_info:
# all fields must be filled in
model_info[r] = nan
# the new way of extracting model parameters is to simply save them in json
if 'nn_specific' in metadata_dict:
model_metadata = metadata_dict['nn_specific']
# include learning rate, max_epochs, and best_epoch for convenience
model_info['max_epochs'] = model_metadata['max_epochs']
model_info['best_epoch'] = model_metadata['best_epoch']
learning_rate_col = [c for c in model_metadata.keys() if c.endswith('learning_rate')]
if len(learning_rate_col) == 1:
model_info['learning_rate'] = model_metadata[learning_rate_col[0]]
# delete several parameters that aren't normally saved
ignored_params = ['batch_size','bias_init_consts','optimizer_type',
'weight_decay_penalty','weight_decay_penalty_type','weight_init_stddevs']
del_ignored_params(model_metadata, ignored_params)
elif 'rf_specific' in metadata_dict:
model_metadata = metadata_dict['rf_specific']
elif 'xgb_specific' in metadata_dict:
model_metadata = metadata_dict['xgb_specific']
# delete several parameters that aren't normally saved
ignored_params = ['xgb_colsample_bytree','xgb_max_depth',
'xgb_min_child_weight','xgb_n_estimators','xgb_subsample']
del_ignored_params(model_metadata, ignored_params)
else:
# no model parameters found
model_metadata = {}
model_info['model_parameters_dict'] = json.dumps(model_metadata)
if 'ecfp_specific' in metadata_dict:
feat_metadata = metadata_dict['ecfp_specific']
elif 'auto_featurizer_specific' in metadata_dict:
feat_metadata = metadata_dict['auto_featurizer_specific']
elif 'autoencoder_specific' in metadata_dict:
feat_metadata = metadata_dict['autoencoder_specific']
else:
# no model parameters found
feat_metadata = {}
model_info['feat_parameters_dict'] = json.dumps(feat_metadata)
return model_info
# ------------------------------------------------------------------------------------------------------------------
def get_best_perf_table(metric_type, col_name=None, result_dir=None, model_uuid=None, metadata_dict=None, PK_pipe=False):
"""
Extract parameters and training run performance metrics for a single model. The model may be
specified either by a metadata dictionary, a model_uuid or a result directory; in the model_uuid case, the function
queries the model tracker DB for the model metadata. For models saved in the filesystem, can query the performance
data from the original result directory, but not from a saved tarball.
Args:
metric_type (str): Performance metric to include in result dictionary.
col_name (str): Collection name containing model, if model is specified by model_uuid.
result_dir (str): result directory of the model, if Model tracker is not supported and metadata_dict not provided.
model_uuid (str): UUID of model to query, if metadata_dict is not provided.
metadata_dict (dict): Full metadata dictionary for a model, including training metrics and
dataset metadata.
PK_pipe (bool): If True, include some additional parameters in the result dictionary specific to PK models.
Returns:
model_info (dict): Dictionary of parameter or metric name - value pairs.
Todo:
Add support for models saved as local tarball files.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' needs to be provided.")
return None
elif mlmt_supported and col_name:
mlmt_client = dsf.initialize_model_tracker()
if metadata_dict is None:
if model_uuid is None:
print("Have to specify either metadata_dict or model_uuid")
return
query_params = {
"match_metadata": {
"model_uuid": model_uuid,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
metadata_list = list(mlmt_client.model.query_model_metadata(
collection_name=col_name,
query_params=query_params
).result())
if len(metadata_list) == 0:
print("No matching models returned")
return None
metadata_dict = metadata_list[0]
elif result_dir:
model_dir = ""
for dirpath, dirnames, filenames in os.walk(result_dir):
if model_uuid in dirnames:
model_dir = os.path.join(dirpath, model_uuid)
break
if model_dir:
with open(os.path.join(model_dir, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
else:
print(f"model_uuid ({model_uuid}) not exist in {result_dir}.")
return None
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
model_info['collection_name'] = col_name
# Get model metrics for this model
metrics_dicts = [d for d in metadata_dict['training_metrics'] if d['label'] == 'best']
if len(metrics_dicts) != 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
return None
model_params = metadata_dict['model_parameters']
model_info['model_type'] = model_params['model_type']
model_info['featurizer'] = model_params['featurizer']
split_params = metadata_dict['splitting_parameters']
model_info['splitter'] = split_params['splitter']
if 'split_uuid' in split_params:
model_info['split_uuid'] = split_params['split_uuid']
model_info['dataset_key'] = metadata_dict['training_dataset']['dataset_key']
model_info['bucket'] = metadata_dict['training_dataset']['bucket']
dset_meta = metadata_dict['training_dataset']['dataset_metadata']
if PK_pipe:
model_info['assay_name'] = dset_meta.get('assay_category', 'NA')
model_info['response_col'] = dset_meta.get('response_cols', dset_meta.get('response_col', 'NA'))
try:
model_info['descriptor_type'] = metadata_dict['descriptor_specific']['descriptor_type']
except KeyError:
model_info['descriptor_type'] = 'NA'
try:
model_info['num_samples'] = dset_meta['num_row']
except:
# KSM: Commented out because original dataset may no longer be accessible.
#tmp_df = dsf.retrieve_dataset_by_datasetkey(model_info['dataset_key'], model_info['bucket'])
#model_info['num_samples'] = tmp_df.shape[0]
model_info['num_samples'] = nan
# add model and feature params
# model_uuid appears in model_feature_params and will overwrite the one in model_info
# it's the same uuid, so it should be ok
model_feature_params = extract_model_and_feature_parameters(metadata_dict)
model_info.update(model_feature_params)
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
metric_col = '%s_%s' % (metric_type, subset)
model_info[metric_col] = metrics_dict['prediction_results'][metric_type]
if (model_params['prediction_type'] == 'regression') and (metric_type != 'rms_score'):
metric_col = 'rms_score_%s' % subset
model_info[metric_col] = metrics_dict['prediction_results']['rms_score']
return model_info
# ---------------------------------------------------------------------------------------------------------
def get_best_models_info(col_names=None, bucket='public', pred_type="regression", result_dir=None, PK_pipeline=False,
output_dir='/usr/local/data',
shortlist_key=None, input_dset_keys=None, save_results=False, subset='valid',
metric_type=None, selection_type='max', other_filters={}):
"""
Tabulate parameters and performance metrics for the best models, according to a given metric, trained against
each specified dataset.
Args:
col_names (list of str): List of model tracker collections to search.
bucket (str): Datastore bucket for training datasets.
pred_type (str): Type of models (regression or classification).
result_dir (list of str): Result directories of the models, if model tracker is not supported.
PK_pipeline (bool): Are we being called from PK pipeline?
output_dir (str): Directory to write output table to.
shortlist_key (str): Datastore key for table of datasets to query models for.
input_dset_keys (str or list of str): List of datastore keys for datasets to query models for. Either shortlist_key
or input_dset_keys must be specified, but not both.
save_results (bool): If True, write the table of results to a CSV file.
subset (str): Input dataset subset ('train', 'valid', or 'test') for which metrics are used to select best models.
metric_type (str): Type of performance metric (r2_score, roc_auc_score, etc.) to use to select best models.
selection_type (str): Score criterion ('max' or 'min') to use to select best models.
other_filters (dict): Additional selection criteria to include in model query.
Returns:
top_models_df (DataFrame): Table of parameters and metrics for best models for each dataset.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' needs to be provided.")
return None
top_models_info = []
sort_order = {'max': -1, 'min': 1}
sort_ascending = {'max': False, 'min': True}
if metric_type is None:
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
if other_filters is None:
other_filters = {}
# define dset_keys
if input_dset_keys is not None and shortlist_key is not None:
raise ValueError("You can specify either shortlist_key or input_dset_keys but not both.")
elif input_dset_keys is not None and shortlist_key is None:
if type(input_dset_keys) == str:
dset_keys = [input_dset_keys]
else:
dset_keys = input_dset_keys
elif input_dset_keys is None and shortlist_key is None:
raise ValueError('Must specify either input_dset_keys or shortlist_key')
else:
dset_keys = dsf.retrieve_dataset_by_datasetkey(shortlist_key, bucket)
if dset_keys is None:
# define dset_keys, col_names and buckets from shortlist file
shortlist = pd.read_csv(shortlist_key)
if 'dataset_key' in shortlist.columns:
dset_keys = shortlist['dataset_key'].unique()
elif 'task_name' in shortlist.columns:
dset_keys = shortlist['task_name'].unique()
else:
dset_keys = shortlist.values
if 'collection' in shortlist.columns:
col_names = shortlist['collection'].unique()
if 'bucket' in shortlist.columns:
bucket = shortlist['bucket'].unique()
if mlmt_supported and col_names is not None:
mlmt_client = dsf.initialize_model_tracker()
if type(col_names) == str:
col_names = [col_names]
if type(bucket) == str:
bucket=[bucket]
# Get the best model over all collections for each dataset
for dset_key in dset_keys:
dset_key = dset_key.strip()
dset_model_info = []
for col_name in col_names:
for buck in bucket:
try:
query_params = {
"match_metadata": {
"training_dataset.dataset_key": dset_key,
"training_dataset.bucket": buck,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
"subset": subset,
"$sort": [{"prediction_results.%s" % metric_type : sort_order[selection_type]}]
},
}
query_params['match_metadata'].update(other_filters)
try:
print('Querying collection %s for models trained on dataset %s, %s' % (col_name, buck, dset_key))
metadata_list = list(mlmt_client.model.query_model_metadata(
collection_name=col_name,
query_params=query_params,
limit=1
).result())
except Exception as e:
print("Error returned when querying the best model for dataset %s in collection %s" % (dset_key, col_name))
print(e)
continue
if len(metadata_list) == 0:
print("No models returned for dataset %s in collection %s" % (dset_key, col_name))
continue
print('Query returned %d models' % len(metadata_list))
model = metadata_list[0]
model_info = get_best_perf_table(metric_type, col_name, metadata_dict=model, PK_pipe=PK_pipeline)
if model_info is not None:
res_df = pd.DataFrame.from_records([model_info])
dset_model_info.append(res_df)
except Exception as e:
print(e)
continue
metric_col = '%s_%s' % (metric_type, subset)
if len(dset_model_info) > 0:
dset_model_df = pd.concat(dset_model_info, ignore_index=True).sort_values(
by=metric_col, ascending=sort_ascending[selection_type])
top_models_info.append(dset_model_df.head(1))
print('Adding data for bucket %s, dset_key %s' % (dset_model_df.bucket.values[0], dset_model_df.dataset_key.values[0]))
elif result_dir:
metric_col = '%s_%s' % (subset, metric_type)
for rd in result_dir:
temp_perf_df = get_filesystem_perf_results(result_dir = rd, pred_type = pred_type).sort_values(
by=metric_col, ascending=sort_ascending[selection_type])
top_models_info.append(temp_perf_df.head(1))
print(f"Adding data from '{rd}' ")
if len(top_models_info) == 0:
print("No metadata found")
return None
top_models_df = pd.concat(top_models_info, ignore_index=True)
if save_results:
os.makedirs(output_dir, exist_ok=True)
if shortlist_key is not None:
# Not including shortlist key right now because some are weirdly formed and have .csv in the middle
top_models_df.to_csv(os.path.join(output_dir, 'best_models_metadata.csv'), index=False)
else:
for dset_key in input_dset_keys:
# TODO: This doesn't make sense; why output multiple copies of the same table?
shortened_key = dset_key.rstrip('.csv')
top_models_df.to_csv(os.path.join(output_dir, 'best_models_metadata_%s.csv' % shortened_key), index=False)
return top_models_df
# TODO: This function looks like work in progress, should we delete it?
'''
#---------------------------------------------------------------------------------------------------------
def _get_best_grouped_models_info(collection='pilot_fixed', pred_type='regression', top_n=1, subset='test'):
"""
Get results for models in the given collection.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
res_dir = '/usr/local/data/%s_perf' % collection
plt_dir = '%s/Plots' % res_dir
os.makedirs(plt_dir, exist_ok=True)
res_files = os.listdir(res_dir)
suffix = '_%s_model_perf_metrics.csv' % collection
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for res_file in res_files:
try:
if not res_file.endswith(suffix):
continue
res_path = os.path.join(res_dir, res_file)
res_df = pd.read_csv(res_path, index_col=False)
res_df['combo'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
dset_name = res_file.replace(suffix, '')
datasets.append(dset_name)
res_df['dataset'] = dset_name
print(dset_name)
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
res_df['model_type/feat'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
grouped_df = res_df.groupby('model_type/feat').apply(
lambda t: t.head(top_n)
).reset_index(drop=True)
top_grouped_models.append(grouped_df)
top_combo = res_df['model_type/feat'].values[0]
top_combo_dsets.append(top_combo + dset_name.lstrip('ATOM_GSK_dskey'))
top_score = res_df['{0}_{1}'.format(metric_type, subset)].values[0]
top_model_feat.append(top_combo)
top_scores.append(top_score)
num_samples.append(res_df['Dataset Size'][0])
'''
#------------------------------------------------------------------------------------------------------------------
def get_umap_nn_model_perf_table(dataset_key, bucket, collection_name, pred_type='regression'):
"""
Load performance metrics from model tracker for all NN models with the given prediction_type saved in
the model tracker DB under a given collection that were trained against a particular dataset. Show
parameter settings for UMAP transformer for models where they are available.
Args:
dataset_key (str): Dataset key for training dataset.
bucket (str): Dataset bucket for training dataset.
collection_name (str): Name of model tracker collection to search for models.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
query_params = {
"match_metadata": {
"training_dataset.bucket": bucket,
"training_dataset.dataset_key": dataset_key,
"model_parameters.model_type" : "NN",
"model_parameters.prediction_type" : pred_type
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
query_params['match_metadata'].update(other_filters)
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
mlmt_client = dsf.initialize_model_tracker()
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
if metadata_list == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(metadata_list))
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
best_epoch_list = []
max_epochs_list = []
feature_transform_type_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
sort_metric = 'r2_score'
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
sort_metric = 'roc_auc_score'
metrics = ['roc_auc_score', 'prc_auc_score', 'matthews_cc', 'kappa', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
for metadata_dict in metadata_list:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
if len(metrics_dicts) > 3:
raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
if model_type != 'NN':
continue
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
if 'umap_specific' in metadata_dict:
umap_params = metadata_dict['umap_specific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
featurizer=featurizer_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
feature_transform_type=feature_transform_type_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list ))
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (metric, subset)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = '%s_valid' % sort_metric
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_tarball_perf_table(model_tarball, pred_type='classification'):
"""
Retrieve model metadata and performance metrics for a model saved as a tarball (.tar.gz) file.
Args:
model_tarball (str): Path of model tarball file, named as model.tar.gz.
pred_type (str): Prediction type ('classification' or 'regression') of model.
Returns:
tuple (pd.DataFrame, dict): Table of performance metrics and a dictionary of model metadata.
"""
tarf_content = tarfile.open(model_tarball, "r")
metadata_file = tarf_content.getmember("./model_metadata.json")
ext_metadata = tarf_content.extractfile(metadata_file)
meta_json = json.load(ext_metadata)
ext_metadata.close()
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score',
'accuracy_score', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = [0,0]
for emet in meta_json["training_metrics"]:
label = emet["label"]
score_ix = 0 if label == "best" else 1
subset = emet["subset"]
for metric in metrics:
score_dict[subset][metric][score_ix] = emet["prediction_results"][metric]
perf_df = pd.DataFrame()
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
return perf_df, meta_json
#------------------------------------------------------------------------------------------------------------------
def get_filesystem_perf_results(result_dir, pred_type='classification'):
"""
Retrieve metadata and performance metrics for models stored in the filesystem from a hyperparameter search run.
Args:
result_dir (str): Root directory for results from a hyperparameter search training run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of metadata fields and performance metrics.
"""
ampl_version_list = []
model_uuid_list = []
model_type_list = []
featurizer_list = []
dataset_key_list = []
splitter_list = []
model_score_type_list = []
feature_transform_type_list = []
# model type specific lists
param_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score', 'num_compounds']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score', 'num_compounds',
'accuracy_score', 'bal_accuracy', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
score_dict['valid']['model_choice_score'] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
tar_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
# collect all tars for later
tar_list = tar_list + [os.path.join(dirpath, f) for f in filenames if f.endswith('.tar.gz')]
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
if meta_dict['model_parameters']['prediction_type']==pred_type:
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
# build dictonary of tarball names
tar_dict = {os.path.basename(tf):tf for tf in tar_list}
path_list = []
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
dataset_key = metadata_dict['training_dataset']['dataset_key']
dataset_name = mp.build_tarball_name(mp.build_dataset_name(dataset_key), model_uuid)
if dataset_name in tar_dict:
path_list.append(tar_dict[dataset_name])
else:
# unable to find saved tar file
path_list.append('')
# Get list of training run metrics for this model
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
ampl_version = model_params['ampl_version']
ampl_version_list.append(ampl_version)
model_type = model_params['model_type']
model_type_list.append(model_type)
model_score_type = model_params['model_choice_score_type']
model_score_type_list.append(model_score_type)
featurizer = model_params['featurizer']
#mix ecfp, graphconv, moe, mordred, rdkit for concise representation
if featurizer in ["computed_descriptors", "descriptors"]:
featurizer = metadata_dict["descriptor_specific"]["descriptor_type"]
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key_list.append(metadata_dict['training_dataset']['dataset_key'])
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
param_list.append(extract_model_and_feature_parameters(metadata_dict))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
score_dict['valid']['model_choice_score'].append(subset_metrics['valid']['model_choice_score'])
param_df = pd.DataFrame(param_list)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_path = path_list,
ampl_version=ampl_version_list,
model_type=model_type_list,
dataset_key=dataset_key_list,
featurizer=featurizer_list,
splitter=splitter_list,
model_score_type=model_score_type_list,
feature_transform_type=feature_transform_type_list))
perf_df = perf_df.merge(param_df, on='model_uuid', how='inner')
perf_df['model_choice_score'] = score_dict['valid']['model_choice_score']
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = 'model_choice_score'
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
def get_filesystem_models(result_dir, pred_type):
"""
Identify all models in result_dir and create perf_result table with 'tarball_path' column containing a path
to each tarball.
"""
perf_df = get_filesystem_perf_results(result_dir, pred_type)
if pred_type == 'regression':
metric = 'valid_r2_score'
else:
metric = 'valid_roc_auc_score'
#best_df = perf_df.sort_values(by=metric, ascending=False).drop_duplicates(subset='dataset_key').copy()
perf_df['dataset_names'] = perf_df['dataset_key'].apply(lambda f: os.path.splitext(os.path.basename(f))[0])
perf_df['tarball_names'] = perf_df.apply(lambda x: '%s_model_%s.tar.gz' % (x['dataset_names'], x['model_uuid']), axis=1)
tarball_names = set(perf_df['tarball_names'].values)
all_filenames = []
for dirpath, dirnames, filenames in os.walk(result_dir):
for fn in filenames:
if fn in tarball_names:
all_filenames.append((fn, os.path.join(dirpath, fn)))
found_files_df = pd.DataFrame({'tarball_names':[f[0] for f in all_filenames],
'tarball_paths':[f[1] for f in all_filenames]})
perf_df = perf_df.merge(found_files_df, on='tarball_names', how='outer')
return perf_df
#------------------------------------------------------------------------------------------------------------------
def copy_best_filesystem_models(result_dir, dest_dir, pred_type, force_update=False):
"""
Identify the best models for each dataset within a result directory tree (e.g. from a hyperparameter search).
Copy the associated model tarballs to a destination directory.
Args:
result_dir (str): Path to model training result directory.
dest_dir (str): Path of directory wherre model tarballs will be copied to.
pred_type (str): Prediction type ('classification' or 'regression') of models to copy
force_update (bool): If true, overwrite tarball files that already exist in dest_dir.
Returns:
pd.DataFrame: Table of performance metrics for best models.
"""
perf_df = get_filesystem_perf_results(result_dir, pred_type)
if pred_type == 'regression':
metric = 'valid_r2_score'
else:
metric = 'valid_roc_auc_score'
best_df = perf_df.sort_values(by=metric, ascending=False).drop_duplicates(subset='dataset_key').copy()
dataset_names = [os.path.splitext(os.path.basename(f))[0] for f in best_df.dataset_key.values]
model_uuids = best_df.model_uuid.values
tarball_names = ['%s_model_%s.tar.gz' % (dset_name, model_uuid) for dset_name, model_uuid in zip(dataset_names, model_uuids)]
for dirpath, dirnames, filenames in os.walk(result_dir):
for fn in filenames:
if (fn in tarball_names) and (force_update or not os.path.exists(os.path.join(dest_dir, fn))):
shutil.copy2(os.path.join(dirpath, fn), dest_dir)
print('Copied %s' % fn)
return best_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_perf_tables(collection_names=None, filter_dict={}, result_dir=None, prediction_type='regression', verbose=False):
"""
Load model parameters and performance metrics from model tracker for all models saved in the model tracker DB under
the given collection names (or result directory if Model tracker is not available) with the given prediction type.
Tabulate the parameters and metrics including:
dataset (assay name, target, parameter, key, bucket)
dataset size (train/valid/test/total)
number of training folds
model type (NN or RF)
featurizer
transformation type
metrics: r2_score, mae_score and rms_score for regression, or ROC AUC for classification
Args:
collection_names (list): Names of model tracker collections to search for models.
filter_dict (dict): Additional filter criteria to use in model query.
result_dir (str or list): Directories to search for models; must be provided if the model tracker DB is not available.
prediction_type (str): Type of models (classification or regression) to query.
verbose (bool): If true, print status messages as collections are processed.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' is needed.")
return None
collection_list = []
ampl_version_list=[]
model_uuid_list = []
time_built_list = []
model_type_list = []
dataset_key_list = []
bucket_list = []
param_list = []
featurizer_list = []
desc_type_list = []
transform_list = []
dset_size_list = []
splitter_list = []
split_strategy_list = []
split_uuid_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
split_uuid_list=[]
model_feat_param_list = []
if prediction_type == 'regression':
score_types = ['r2_score', 'mae_score', 'rms_score']
else:
# TODO: add more classification metrics later
score_types = ['roc_auc_score', 'prc_auc_score', 'accuracy_score', 'bal_accuracy', 'precision', 'recall_score', 'npv', 'matthews_cc', 'kappa']
subsets = ['train', 'valid', 'test']
score_dict = {}
ncmpd_dict = {}
for subset in subsets:
score_dict[subset] = {}
for score_type in score_types:
score_dict[subset][score_type] = []
ncmpd_dict[subset] = []
metadata_list_dict = {}
if mlmt_supported and collection_names:
mlmt_client = dsf.initialize_model_tracker()
filter_dict['model_parameters.prediction_type'] = prediction_type
for collection_name in collection_names:
print("Finding models in collection %s" % collection_name)
query_params = {
"match_metadata": filter_dict,
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
metadata_list_dict[collection_name] = metadata_list
elif result_dir:
if isinstance(result_dir, str):
result_dir = [result_dir]
for rd in result_dir:
if rd not in metadata_list_dict:
metadata_list_dict[rd] = []
for dirpath, dirnames, filenames in os.walk(rd):
if "model_metadata.json" in filenames:
with open(os.path.join(dirpath, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
metadata_list_dict[rd].append(metadata_dict)
for ss in metadata_list_dict:
for i, metadata_dict in enumerate(metadata_list_dict[ss]):
if (i % 10 == 0) and verbose:
print('Processing collection %s model %d' % (ss, i))
# Check that model has metrics before we go on
if not 'training_metrics' in metadata_dict:
continue
collection_list.append(ss)
model_uuid = metadata_dict['model_uuid']
model_uuid_list.append(model_uuid)
time_built = metadata_dict['time_built']
time_built_list.append(time_built)
model_params = metadata_dict['model_parameters']
ampl_version = model_params.get('ampl_version', 'probably 1.0.0')
ampl_version_list.append(ampl_version)
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
if 'descriptor_specific' in metadata_dict:
desc_type = metadata_dict['descriptor_specific']['descriptor_type']
elif featurizer in ['graphconv', 'ecfp']:
desc_type = featurizer
else:
desc_type = ''
desc_type_list.append(desc_type)
dataset_key = metadata_dict['training_dataset']['dataset_key']
bucket = metadata_dict['training_dataset']['bucket']
dataset_key_list.append(dataset_key)
bucket_list.append(bucket)
dset_metadata = metadata_dict['training_dataset']['dataset_metadata']
param = metadata_dict['training_dataset']['response_cols'][0]
param_list.append(param)
transform_type = metadata_dict['training_dataset']['feature_transform_type']
transform_list.append(transform_type)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
split_uuid_list.append(split_params.get('split_uuid', ''))
split_strategy = split_params['split_strategy']
split_strategy_list.append(split_strategy)
if 'umap_specific' in metadata_dict:
umap_params = metadata_dict['umap_specific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
model_feat_param_list.append(extract_model_and_feature_parameters(metadata_dict))
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
if split_strategy == 'k_fold_cv':
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['test']['num_compounds']
else:
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['valid']['num_compounds'] + subset_metrics['test']['num_compounds']
for subset in subsets:
subset_size = subset_metrics[subset]['num_compounds']
for score_type in score_types:
try:
score = subset_metrics[subset][score_type]
except KeyError:
score = float('nan')
score_dict[subset][score_type].append(score)
ncmpd_dict[subset].append(subset_size)
dset_size_list.append(dset_size)
col_dict = dict(
collection=collection_list,
ampl_version=ampl_version_list,
model_uuid=model_uuid_list,
time_built=time_built_list,
model_type=model_type_list,
featurizer=featurizer_list,
features=desc_type_list,
transformer=transform_list,
splitter=splitter_list,
split_strategy=split_strategy_list,
split_uuid=split_uuid_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list,
dataset_bucket=bucket_list,
dataset_key=dataset_key_list,
dataset_size=dset_size_list,
parameter=param_list
)
perf_df = pd.DataFrame(col_dict)
param_df = pd.DataFrame(model_feat_param_list)
perf_df = perf_df.merge(param_df, on='model_uuid', how='inner')
for subset in subsets:
ncmpds_col = '%s_size' % subset
perf_df[ncmpds_col] = ncmpd_dict[subset]
for score_type in score_types:
metric_col = '%s_%s' % (subset, score_type)
perf_df[metric_col] = score_dict[subset][score_type]
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_metadata_table(uuids, collections=None):
"""
Tabulate metadata fields and performance metrics for a set of models identified by specific model_uuids.
Args:
uuids (list): List of model UUIDs to query.
collections (list or str): Names of collections in model tracker DB to get models from. If collections is
a string, it must identify one collection to search for all models. If a list, it must be of the same
length as `uuids`. If not provided, all collections will be searched.
Returns:
pd.DataFrame: Table of metadata fields and performance metrics for models.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
if isinstance(uuids,str):
uuids = [uuids]
if isinstance(collections,str):
collections = [collections] * len(uuids)
mlist = []
mlmt_client = dsf.initialize_model_tracker()
for idx,uuid in enumerate(uuids):
if collections is not None:
collection_name = collections[idx]
else:
collection_name = trkr.get_model_collection_by_uuid(uuid)
model_meta = trkr.get_full_metadata_by_uuid(uuid, collection_name=collection_name)
mdl_params = model_meta['model_parameters']
data_params = model_meta['training_dataset']
# Get model metrics for this model
metrics = pd.DataFrame(model_meta['training_metrics'])
metrics = metrics[metrics['label']=='best']
train_metrics = metrics[metrics['subset']=='train']['prediction_results'].values[0]
valid_metrics = metrics[metrics['subset']=='valid']['prediction_results'].values[0]
test_metrics = metrics[metrics['subset']=='test']['prediction_results'].values[0]
# Try to name the model something intelligible in the table
name = 'NA'
if 'target' in data_params['dataset_metadata']:
name = data_params['dataset_metadata']['target']
if (name == 'NA') & ('assay_endpoint' in data_params['dataset_metadata']):
name = data_params['dataset_metadata']['assay_endpoint']
if (name == 'NA') & ('response_col' in data_params['dataset_metadata']):
name = data_params['dataset_metadata']['response_col']
if name != 'NA':
if 'param' in data_params['dataset_metadata'].keys():
name = name + ' ' + data_params['dataset_metadata']['param']
else:
name = 'unknown'
transform = 'None'
if 'transformation' in data_params['dataset_metadata'].keys():
transform = data_params['dataset_metadata']['transformation']
if mdl_params['featurizer'] == 'computed_descriptors':
featurizer = model_meta['descriptor_specific']['descriptor_type']
else:
featurizer = mdl_params['featurizer']
try:
split_uuid = model_meta['splitting_parameters']['split_uuid']
except:
split_uuid = 'Not Available'
if mdl_params['prediction_type'] == 'regression':
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['nn_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['rf_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'xgboost':
xgb_params = model_meta['xgb_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Gamma': xgb_params['xgb_gamma'],
'Learning rate': xgb_params['xgb_max_depth'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
elif mdl_params['prediction_type'] == 'classification':
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['nn_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{str(train_metrics['confusion_matrix'])}/{str(valid_metrics['confusion_matrix'])}/{str(test_metrics['confusion_matrix'])}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['rf_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{train_metrics['confusion_matrix']}/{valid_metrics['confusion_matrix']}/{test_metrics['confusion_matrix']}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'xgboost':
xgb_params = model_meta['xgb_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Gamma': xgb_params['xgb_gamma'],
'XGB Learning rate': xgb_params['xgb_max_depth'],
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{train_metrics['confusion_matrix']}/{valid_metrics['confusion_matrix']}/{test_metrics['confusion_matrix']}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
mlist.append(OrderedDict(minfo))
return pd.DataFrame(mlist).set_index('Name').transpose()
#------------------------------------------------------------------------------------------------------------------
def get_training_datasets(collection_names):
"""
Query the model tracker DB for all the unique dataset keys and buckets used to train models in the given
collections.
Args:
collection_names (list): List of names of model tracker collections to search for models.
Returns:
dict: Dictionary mapping collection names to lists of (dataset_key, bucket) tuples for training sets.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
result_dict = {}
mlmt_client = dsf.initialize_model_tracker()
for collection_name in collection_names:
dset_list = mlmt_client.model.get_training_datasets(collection_name=collection_name).result()
result_dict[collection_name] = dset_list
return result_dict
#------------------------------------------------------------------------------------------------------------------
def get_dataset_models(collection_names, filter_dict={}):
"""
Query the model tracker for all models saved in the model tracker DB under the given collection names. Returns a dictionary
mapping (dataset_key,bucket) pairs to the list of (collection,model_uuid) pairs trained on the corresponding datasets.
Args:
collection_names (list): List of names of model tracker collections to search for models.
filter_dict (dict): Additional filter criteria to use in model query.
Returns:
dict: Dictionary mapping training set (dataset_key, bucket) tuples to (collection, model_uuid) pairs.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
result_dict = {}
coll_dset_dict = get_training_dict(collection_names)
mlmt_client = dsf.initialize_model_tracker()
for collection_name in collection_names:
dset_list = coll_dset_dict[collection_name]
for dset_dict in dset_list:
query_filter = {
'training_dataset.bucket': dset_dict['bucket'],
'training_dataset.dataset_key': dset_dict['dataset_key']
}
query_filter.update(filter_dict)
query_params = {
"match_metadata": query_filter
}
print('Querying models in collection %s for dataset %s, %s' % (collection_name, bucket, dset_key))
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
include_fields=['model_uuid']
).result()
for i, metadata_dict in enumerate(metadata_list):
if i % 50 == 0:
print('Processing collection %s model %d' % (collection_name, i))
model_uuid = metadata_dict['model_uuid']
result_dict.setdefault((dset_key,bucket), []).append((collection_name, model_uuid))
return result_dict
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_files(result_dir, pred_type='regression'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a multitask hyperparameter search.
Format the per-task performance metrics in a table with a row for each task and columns for each model/subset
combination.
Args:
result_dir (str): Path to root result directory containing output from a hyperparameter search run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['num_compounds', 'r2_score', 'task_r2_scores']
else:
metrics = ['num_compounds', 'roc_auc_score', 'task_roc_auc_scores']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
raise Exception("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
dset_params = metadata_dict['training_dataset']
response_cols = dset_params['response_cols']
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
# Format the data as a table with groups of 3 columns for each model
num_models = len(model_uuid_list)
if pred_type == 'regression':
model_params = ['model_uuid', 'learning_rate', 'layer_sizes', 'dropouts', 'max_epochs', 'best_epoch',
'subset', 'num_compounds', 'mean_r2_score']
else:
model_params = ['model_uuid', 'learning_rate', 'layer_sizes', 'dropouts', 'max_epochs', 'best_epoch',
'subset', 'num_compounds', 'mean_roc_auc_score']
param_list = model_params + response_cols
perf_df = pd.DataFrame(dict(col_0=param_list))
colnum = 0
for i in range(num_models):
for subset in subsets:
vals = []
if subset == 'train':
vals.append(model_uuid_list[i])
vals.append(learning_rate_list[i])
vals.append(layer_sizes_list[i])
vals.append(dropouts_list[i])
vals.append('%d' % max_epochs_list[i])
vals.append('%d' % best_epoch_list[i])
else:
vals = vals + ['']*6
vals.append(subset)
vals.append('%d' % score_dict[subset]['num_compounds'][i])
if pred_type == 'regression':
vals.append('%.3f' % score_dict[subset]['r2_score'][i])
vals = vals + ['%.3f' % v for v in score_dict[subset]['task_r2_scores'][i]]
else:
vals.append('%.3f' % score_dict[subset]['roc_auc_score'][i])
vals = vals + ['%.3f' % v for v in score_dict[subset]['task_roc_auc_scores'][i]]
colnum += 1
colname = 'col_%d' % colnum
perf_df[colname] = vals
return perf_df
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_files_new(result_dir, pred_type='regression'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a multitask hyperparameter search.
Format the per-task performance metrics in a table with a row for each task and columns for each model/subset
combination.
Args:
result_dir (str): Path to root result directory containing output from a hyperparameter search run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
best_epoch_list = []
max_epochs_list = []
featurizer_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['num_compounds', 'r2_score', 'task_r2_scores',
'task_rms_scores']
else:
metrics = ['num_compounds', 'roc_auc_score', 'task_roc_auc_scores']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
raise Exception("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
dset_params = metadata_dict['training_dataset']
response_cols = dset_params['response_cols']
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
featurizer_list.append(model_params["featurizer"])
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
# Format the data as a table with groups of 3 columns for each model
num_models = len(model_uuid_list)
data = {
"model_uuid": model_uuid_list,
"learning_rate": learning_rate_list,
"layer_sizes": layer_sizes_list,
"dropouts": dropouts_list,
"featurizer": featurizer_list
}
for i in range(num_models):
for subset in subsets:
for ix, task in enumerate(response_cols):
if pred_type == "regression":
colr2 = f"{subset}_{task}_r2"
colrms = f"{subset}_{task}_rms"
if colr2 not in data:
data[colr2] = []
data[colrms] = []
data[colr2].append(score_dict[subset]["task_r2_scores"][i][ix])
data[colrms].append(score_dict[subset]["task_rms_scores"][i][ix])
else:
colauc = f"{subset}_{task}_roc_auc"
if colauc not in data:
data[colauc] = []
data[colauc].append(score_dict[subset]["task_roc_auc_scores"][i][ix])
perf_df = pd.DataFrame(data)
return perf_df
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_tracker(collection_name, response_cols=None, expand_responses=None, expand_subsets='test',
exhaustive=False):
"""
Retrieve full metadata and metrics from model tracker for all models in a collection and format them
into a table, including per-task performance metrics for multitask models.
Meant for multitask NN models, but works for single task models as well.
By AKP. Works for model tracker as of 10/2020
Args:
collection_name (str): Name of model tracker collection to search for models.
response_cols (list, str or None): Names of tasks (response columns) to query performance results for.
If None, checks to see if the entire collection has the same response cols.
Otherwise, should be list of strings or a comma-separated string.
asks for clarification. Note: make sure response cols are listed in same order as in metadata.
Recommended: None first, then clarify.
expand_responses (list, str or None): Names of tasks / response columns you want to include results for in
the final dataframe. Useful if you have a lot of tasks and only want to look at the performance of a
few of them. Must also be a list or comma separated string, and must be a subset of response_cols.
If None, will expand all responses.
expand_subsets (list, str or None): Dataset subsets ('train', 'valid' and/or 'test') to show metrics for.
Again, must be list or comma separated string, or None to expand all.
exhaustive (bool): If True, return large dataframe with all model tracker metadata minus any columns not
in expand_responses. If False, return trimmed dataframe with most relevant columns.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
# check inputs are correct
if collection_name.startswith('old_'):
raise Exception("This function is not implemented for the old format of metadata.")
if isinstance(response_cols, list):
pass
elif response_cols is None:
pass
elif isinstance(response_cols, str):
response_cols=[x.strip() for x in response_cols.split(',')]
else:
raise Exception("Please input response cols as None, list or comma separated string.")
if isinstance(expand_responses, list):
pass
elif expand_responses is None:
pass
elif isinstance(expand_responses, str):
expand_responses=[x.strip() for x in expand_responses.split(',')]
else:
raise Exception("Please input expand response col(s) as list or comma separated string.")
if isinstance(expand_subsets, list):
pass
elif expand_subsets is None:
pass
elif isinstance(expand_subsets, str):
expand_subsets=[x.strip() for x in expand_subsets.split(',')]
else:
raise Exception("Please input subset(s) as list or comma separated string.")
# get metadata
if response_cols is not None:
filter_dict={'training_dataset.response_cols': response_cols}
else:
filter_dict={}
models = trkr.get_full_metadata(filter_dict, collection_name)
if len(models)==0:
raise Exception("No models found with these response cols in this collection. To get a list of possible response cols, pass response_cols=None.")
models = pd.DataFrame.from_records(models)
# expand model metadata - deal with NA descriptors / NA other fields
alldat=models[['model_uuid', 'time_built']]
models=models.drop(['model_uuid', 'time_built'], axis = 1)
for column in models.columns:
if column == 'training_metrics':
continue
nai=models[models[column].isna()].index
nonas=models[~models[column].isna()]
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
alldat=alldat.join(tempdf)
# assign response cols
if len(alldat.response_cols.astype(str).unique())==1:
response_cols=alldat.response_cols[0]
print("Response cols:", response_cols)
else:
raise Exception(f"There is more than one set of response cols in this collection. Please choose from these lists: {alldat.response_cols.unique()}")
# expand training metrics - deal with NA's in columns
metrics=pd.DataFrame.from_dict(models['training_metrics'].tolist())
allmet=alldat[['model_uuid']]
for column in metrics.columns:
nai=metrics[metrics[column].isna()].index
nonas=metrics[~metrics[column].isna()]
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf,
|
pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)
|
pandas.DataFrame
|
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
class importar_arxivs:
def __init__(self, ruta_de_archivo):
valido = False
for extension in self.extensiones:
if ruta_de_archivo.endswith(extension):
valido = True
if not valido:
raise Exception('Formato incorrecto')
self.ruta = ruta_de_archivo
class importar_csv(importar_arxivs):
extensiones = ['csv']
def importar(self):
return pd.read_csv(self.ruta)
class importar_excel(importar_arxivs):
extensiones = ['xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'odt']
def importar(self, hoja=0):
return
|
pd.read_excel(self.ruta, sheet_name=hoja)
|
pandas.read_excel
|
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx =
|
DatetimeIndex(org, freq=f)
|
pandas.DatetimeIndex
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pyarrow as pa
import pytest
from pyarrow.parquet import ParquetFile
from kartothek.serialization import (
CsvSerializer,
DataFrameSerializer,
ParquetSerializer,
default_serializer,
)
from kartothek.serialization._util import ensure_unicode_string_type
TYPE_STABLE_SERIALISERS = [ParquetSerializer()]
SERLIALISERS = TYPE_STABLE_SERIALISERS + [
CsvSerializer(),
CsvSerializer(compress=False),
default_serializer(),
]
type_stable_serialisers = pytest.mark.parametrize("serialiser", TYPE_STABLE_SERIALISERS)
predicate_serialisers = pytest.mark.parametrize(
"serialiser",
[
ParquetSerializer(chunk_size=1),
ParquetSerializer(chunk_size=2),
ParquetSerializer(chunk_size=4),
]
+ SERLIALISERS,
)
def test_load_df_from_store_unsupported_format(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, "test.unknown")
def test_store_df_to_store(store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"]})
dataframe_format = default_serializer()
assert isinstance(dataframe_format, ParquetSerializer)
key = dataframe_format.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_store_table_to_store(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"]})
table = pa.Table.from_pandas(df)
key = serialiser.store(store, "prefix", table)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip(serialiser, store):
if serialiser in TYPE_STABLE_SERIALISERS:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], b"d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
else:
df = pd.DataFrame(
{"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], "d": ["#", ";"]}
)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
# Test that all serialisers can ingest predicate_pushdown_to_io
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["a", "c"], predicate_pushdown_to_io=False
),
df[["a", "c"]],
)
# Test that all serialisers can deal with categories
expected = df[["c", "d"]].copy()
expected["c"] = expected["c"].astype("category")
# Check that the dtypes match but don't care about the order of the categoricals.
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(
store, key, columns=["c", "d"], categories=["c"]
),
expected,
check_categorical=False,
)
# Test restore w/ empty col list
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=[]), df[[]]
)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_missing_column(serialiser, store):
df = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["∆", "€"], "d": ["#", ";"]})
key = serialiser.store(store, "prefix", df)
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "x"])
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_empty(serialiser, store):
df = pd.DataFrame({})
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
@pytest.mark.parametrize("serialiser", SERLIALISERS)
def test_dataframe_roundtrip_no_rows(serialiser, store):
df = pd.DataFrame({"a": [], "b": [], "c": []}).astype(object)
key = serialiser.store(store, "prefix", df)
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
# Test partial restore
pdt.assert_frame_equal(
DataFrameSerializer.restore_dataframe(store, key, columns=["a", "c"]),
df[["a", "c"]],
)
def test_filter_query_predicate_exclusion(store):
with pytest.raises(ValueError):
DataFrameSerializer.restore_dataframe(
store, "test.parquet", predicates=[[("a", "==", 1)]], filter_query="True"
)
def assert_frame_almost_equal(df_left, df_right):
"""
Be more friendly to some dtypes that are not preserved during the roundtrips.
"""
# FIXME: This needs a better documentation
for col in df_left.columns:
if pd.api.types.is_datetime64_dtype(
df_left[col].dtype
) and pd.api.types.is_object_dtype(df_right[col].dtype):
df_right[col] = pd.to_datetime(df_right[col])
elif pd.api.types.is_object_dtype(
df_left[col].dtype
) and pd.api.types.is_datetime64_dtype(df_right[col].dtype):
df_left[col] = pd.to_datetime(df_left[col])
elif (
len(df_left) > 0
and pd.api.types.is_object_dtype(df_left[col].dtype)
and pd.api.types.is_object_dtype(df_right[col].dtype)
):
if isinstance(df_left[col].iloc[0], datetime.date) or isinstance(
df_right[col].iloc[0], datetime.date
):
df_left[col] = pd.to_datetime(df_left[col])
df_right[col] = pd.to_datetime(df_right[col])
elif pd.api.types.is_object_dtype(
df_left[col].dtype
) and pd.api.types.is_categorical_dtype(df_right[col].dtype):
df_left[col] = df_left[col].astype(df_right[col].dtype)
pdt.assert_frame_equal(
df_left.reset_index(drop=True), df_right.reset_index(drop=True)
)
@pytest.mark.parametrize(
"df, read_kwargs",
[
(pd.DataFrame({"string_ü": ["abc", "affe", "banane", "buchstabe_ü"]}), {}),
(pd.DataFrame({"integer_ü": np.arange(4)}), {}),
(pd.DataFrame({"float_ü": [-3.141591, 0.0, 3.141593, 3.141595]}), {}),
(
pd.DataFrame(
{
"date_ü": [
datetime.date(2011, 1, 31),
datetime.date(2011, 2, 3),
datetime.date(2011, 2, 4),
datetime.date(2011, 3, 10),
]
}
),
{"date_as_object": False},
),
(
pd.DataFrame(
{
"date_ü": [
datetime.date(2011, 1, 31),
datetime.date(2011, 2, 3),
datetime.date(2011, 2, 4),
datetime.date(2011, 3, 10),
]
}
),
{"date_as_object": True},
),
(
pd.DataFrame(
{"categorical_ü": list("abcd")},
dtype=pd.api.types.CategoricalDtype(list("abcd"), ordered=True),
),
{},
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown(
store, df, read_kwargs, predicate_pushdown_to_io, serialiser
):
"""
Test predicate pushdown for several types and operations.
The DataFrame parameters all need to be of same length for this test to
work universally. Also the values in the DataFrames need to be sorted in
ascending order.
"""
# All test dataframes need to have the same length
assert len(df) == 4
assert df[df.columns[0]].is_monotonic and df.iloc[0, 0] < df.iloc[-1, 0]
# This is due to the limitation that dates cannot be expressed in
# Pandas' query() method.
if isinstance(serialiser, CsvSerializer) and isinstance(
df.iloc[0, 0], datetime.date
):
pytest.skip("CsvSerialiser cannot filter on dates")
key = serialiser.store(store, "prefix", df)
# Test `<` and `>` operators
expected = df.iloc[[1, 2], :].copy()
predicates = [
[(df.columns[0], "<", df.iloc[3, 0]), (df.columns[0], ">", df.iloc[0, 0])]
]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `=<` and `>=` operators
expected = df.iloc[[1, 2, 3], :].copy()
predicates = [
[(df.columns[0], "<=", df.iloc[3, 0]), (df.columns[0], ">=", df.iloc[1, 0])]
]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `==` operator
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "==", df.iloc[1, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `in` operator
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "in", [df.iloc[1, 0]])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test `!=` operator
expected = df.iloc[[0, 2, 3], :].copy()
predicates = [[(df.columns[0], "!=", df.iloc[1, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test empty DataFrame
expected = df.head(0)
predicates = [[(df.columns[0], "<", df.iloc[0, 0])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test in empty list
expected = df.head(0)
predicates = [[(df.columns[0], "in", [])]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test in numpy array
expected = df.iloc[[1], :].copy()
predicates = [[(df.columns[0], "in", np.asarray([df.iloc[1, 0], df.iloc[1, 0]]))]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert_frame_almost_equal(result, expected)
# Test malformed predicates 1
predicates = []
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Empty predicates"
# Test malformed predicates 2
predicates = [[]]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Invalid predicates: Conjunction 0 is empty"
# Test malformed predicates 3
predicates = [[(df.columns[0], "<", df.iloc[0, 0])], []]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert str(exc.value) == "Invalid predicates: Conjunction 1 is empty"
# Test malformed predicates 4
predicates = [[(df.columns[0], "<", df.iloc[0, 0])], ["foo"]]
with pytest.raises(ValueError) as exc:
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
**read_kwargs,
)
assert (
str(exc.value)
== "Invalid predicates: Clause 0 in conjunction 1 should be a 3-tuple, got object of type <class 'str'> instead"
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_float_equal_big(predicate_pushdown_to_io, store, serialiser):
df = pd.DataFrame({"float": [3141590.0, 3141592.0, 3141594.0]})
key = serialiser.store(store, "prefix", df)
predicates = [[("float", "==", 3141592.0)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[1], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_float_equal_small(predicate_pushdown_to_io, store, serialiser):
df = pd.DataFrame({"float": [0.3141590, 0.3141592, 0.3141594]})
key = serialiser.store(store, "prefix", df)
predicates = [[("float", "==", 0.3141592)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[1], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@type_stable_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_eval_string_types(serialiser, store, predicate_pushdown_to_io):
df = pd.DataFrame({b"a": [1, 2], "b": [3.0, 4.0]})
key = serialiser.store(store, "prefix", df)
df.columns = [ensure_unicode_string_type(col) for col in df.columns]
pdt.assert_frame_equal(DataFrameSerializer.restore_dataframe(store, key), df)
for col in ["a", b"a", "a"]:
predicates = [[(col, "==", 1)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
for col in ["b", b"b", "b"]:
predicates = [[(col, "==", 3.0)]]
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
for preds in (
[[("a", "==", 1), ("b", "==", 3.0)]],
[[("a", "==", 1), (b"b", "==", 3.0)]],
[[(b"a", "==", 1), ("b", "==", 3.0)]],
):
result_df = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=preds,
)
expected_df = df.iloc[[0], :].copy()
pdt.assert_frame_equal(
result_df.reset_index(drop=True), expected_df.reset_index(drop=True)
)
@pytest.mark.parametrize(
"df,value",
[
(pd.DataFrame({"u": pd.Series([None], dtype=object)}), "foo"),
(pd.DataFrame({"b": pd.Series([None], dtype=object)}), b"foo"),
(pd.DataFrame({"f": pd.Series([np.nan], dtype=float)}), 1.2),
(
pd.DataFrame({"t": pd.Series([pd.NaT], dtype="datetime64[ns]")}),
pd.Timestamp("2017"),
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_pushdown_null_col(
store, df, value, predicate_pushdown_to_io, serialiser
):
key = serialiser.store(store, "prefix", df)
expected = df.iloc[[]].copy()
predicates = [[(df.columns[0], "==", value)]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
check_datetimelike_compat = (
isinstance(value, pd.Timestamp) and not serialiser.type_stable
)
pdt.assert_frame_equal(
result.reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=serialiser.type_stable,
check_datetimelike_compat=check_datetimelike_compat,
)
@pytest.mark.parametrize(
"df, op, value, expected_index",
[
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"==",
None,
[0, 2],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"in",
[None],
[0, 2],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"!=",
None,
[1],
),
(
pd.DataFrame({"u": pd.Series([None, "x", np.nan], dtype=object)}),
"in",
[None, "x"],
[0, 1, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"==",
np.nan,
[0, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"in",
[np.nan],
[0, 2],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"!=",
np.nan,
[1],
),
(
pd.DataFrame({"f": pd.Series([np.nan, 1.0, np.nan], dtype=float)}),
"in",
[np.nan, 1.0],
[0, 1, 2],
),
],
)
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_parsing_null_values(
store, df, op, value, expected_index, predicate_pushdown_to_io, serialiser
):
key = serialiser.store(store, "prefix", df)
expected = df.iloc[expected_index].copy()
predicates = [[(df.columns[0], op, value)]]
result = serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
pdt.assert_frame_equal(
result.reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=serialiser.type_stable,
)
@pytest.mark.parametrize("op", ["<", "<=", ">", ">="])
@predicate_serialisers
@pytest.mark.parametrize("predicate_pushdown_to_io", [True, False])
def test_predicate_parsing_null_values_failing(
store, op, predicate_pushdown_to_io, serialiser
):
df = pd.DataFrame({"u": pd.Series([1.0, np.nan])})
key = serialiser.store(store, "prefix", df)
predicates = [[(df.columns[0], op, np.nan)]]
with pytest.raises(ValueError, match="Only operators supporting null values"):
serialiser.restore_dataframe(
store,
key,
predicate_pushdown_to_io=predicate_pushdown_to_io,
predicates=predicates,
)
@pytest.mark.parametrize(
"column, expected_null_count",
[
("no_nulls_int", (0, 0, 0)),
("partial_nulls_int", (0, 1, 2)),
("no_nulls_float", (0, 0, 0)),
("partial_nulls_float", (0, 1, 2)),
("partial_nulls_obj", (0, 1, 2)),
("no_nulls_obj", (0, 0, 0)),
("partial_nulls_obj_mixed", (0, 2, 1)),
("nulls_reverse_rg", (1, 0, 1)),
],
)
def test_null_count(store, column, expected_null_count):
serialiser = ParquetSerializer(chunk_size=2)
df = pd.DataFrame(
{
"no_nulls_int": [1, 2, 3, 4, 5, 6],
"partial_nulls_int": [1, 2, 3, None, None, None],
"no_nulls_float": [1.1, 2.2, 3.3, 4.4, 5.5, 6.6],
"partial_nulls_float": [1.0, 2.2, 3.3, np.nan, np.nan, np.nan],
"partial_nulls_obj": [1.0, 2.2, 3.3, np.nan, np.nan, np.nan],
"no_nulls_obj": ["1.1", "2", "3", "vier", "fuenfeinhalb", "6.6"],
"partial_nulls_obj_mixed": [1.0, 2.2, None, np.nan, np.nan, 6.6],
"nulls_reverse_rg": [3.3, np.nan, 1.0, 2.0, np.nan, -1.1],
}
)
key = serialiser.store(store, "prefix", df)
reader = pa.BufferReader(store.get(key))
parquet_file = ParquetFile(reader)
col_idx = parquet_file.reader.column_name_idx(column)
assert parquet_file.num_row_groups == 3
for idx in range(0, 3):
rg = parquet_file.metadata.row_group(idx)
assert rg.column(col_idx).statistics.null_count == expected_null_count[idx]
@pytest.mark.parametrize(
"df,value",
[
(pd.DataFrame({"nan": pd.Series([np.nan, -1.0, 1.0], dtype=float)}), 0.0),
(pd.DataFrame({"inf": pd.Series([np.inf, -1.0, 1.0], dtype=float)}), 0.0),
(pd.DataFrame({"ninf": pd.Series([-np.inf, -1.0, 1.0], dtype=float)}), 0.0),
(
pd.DataFrame(
{"inf2": pd.Series([-np.inf, np.inf, -1.0, 1.0], dtype=float)}
),
0.0,
),
(
pd.DataFrame(
{"inf2":
|
pd.Series([-np.inf, np.inf, -1.0, 1.0], dtype=float)
|
pandas.Series
|
import tensorflow as tf
import numpy as np
import scipy.io as sio
import pandas as pd
import os
import csv
from feature_encoding import *
from keras.models import load_model
from keras.utils import to_categorical
import Efficient_CapsNet_sORF150
import Efficient_CapsNet_sORF250
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
import sys
from optparse import OptionParser
##read Fasta sequence
def readFasta(file):
if os.path.exists(file) == False:
print('Error: "' + file + '" does not exist.')
sys.exit(1)
with open(file) as f:
records = f.read()
if re.search('>', records) == None:
print('The input file seems not in fasta format.')
sys.exit(1)
records = records.split('>')[1:]
myFasta = []
for fasta in records:
array = fasta.split('\n')
name, sequence = array[0].split()[0], re.sub('[^ARNDCQEGHILKMFPSTWYV-]', '-', ''.join(array[1:]).upper())
myFasta.append([name, sequence])
return myFasta
##extract sORF sequence
def get_sORF(fastas):
sORF_seq = []
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
g = 0
if len(seq) > 303:
for j in range(len(seq)-2):
seg_start = seq[j:j+3]
if seg_start == 'ATG':
for k in range(j+3, len(seq)-2, 3):
seg_end = seq[k:k+3]
if seg_end == 'TAA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TAG':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TGA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
elif len(seq) <= 303 and np.mod(len(seq), 3) != 0:
for j in range(len(seq)-2):
seg_start = seq[j:j+3]
if seg_start == 'ATG':
for k in range(j+3, len(seq)-2, 3):
seg_end = seq[k:k+3]
if seg_end == 'TAA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TAG':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
if seg_end == 'TGA':
sequence = seq[j:k+3]
if np.mod(len(sequence), 3) == 0 and 12 <= len(sequence) <= 303:
g+=1
sequence_name = '>' + name + '_sORF' + str(g)
sORF_seq.append([sequence_name, sequence])
break
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TAA' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TAG' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
elif seq[0:3] == 'ATG' and seq[len(seq)-3:len(seq)] == 'TGA' and np.mod(len(seq), 3) == 0 and 12 <= len(seq) <= 303:
sORF_seq.append([name, seq])
return sORF_seq
##get protein sequence
def get_protein(fastas):
protein_seq=[]
start_codon = 'ATG'
codon_table = {
'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',
'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',
'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',
'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',
'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',
'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',
'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',
'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',
'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',
'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',
'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',
'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',
'TAC': 'Y', 'TAT': 'Y', 'TAA': '', 'TAG': '',
'TGC': 'C', 'TGT': 'C', 'TGA': '', 'TGG': 'W'}
for i in fastas:
name, seq = i[0], re.sub('-', '', i[1])
start_site = re.search(start_codon, seq)
protein = ''
for site in range(start_site.start(), len(seq), 3):
protein = protein + codon_table[seq[site:site+3]]
protein_name = '>Micropeptide_' + name
protein_seq.append([protein_name, protein])
return protein_seq
##extract features
def feature_encode(datapath, dna_seq, protein_seq, s_type, d_type):
if s_type == 'H.sapiens':
if d_type == 'CDS':
c_m = pd.read_csv(datapath + 'human_cds_trainp_6mermean.csv', header=None, delimiter=',')
nc_m = pd.read_csv(datapath + 'human_cds_trainn_6mermean.csv', header=None, delimiter=',')
Tc_pos1 =
|
pd.read_csv(datapath + 'human_cds_trainp_framed_3mer_1.csv', header=None, delimiter=',')
|
pandas.read_csv
|
from flask import Flask,render_template,url_for,session,request,make_response,send_file
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pandas import DataFrame,read_csv
import random
from flask import Response
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import io
from fbprophet import Prophet
festivals=pd.DataFrame({
'holiday':(['New Year Day', 'Guru Govind Singh Birthday',
'<NAME>',
'Pongal',
'Republic Day',
'<NAME>',
'Guru Ravidas Birthday',
'<NAME>',
'<NAME>',
'<NAME>/Shivaratri',
'<NAME> Birthday',
'<NAME>',
'Holi',
'<NAME>/Gudi Padava/Ugadi/Cheti Chand',
'<NAME>',
'<NAME>',
'Good Friday',
'Easter Day',
'Mesadi',
'Vaisakhi/Vishu',
'Vaisakhadi(Bengal)/Bahag Bihu (Assam)',
'Guru Rabindranath birthday',
'<NAME>',
'Jamat Ul-Vida',
'<NAME>',
'<NAME>',
'Id-ul-Zuha(Bakrid)',
'Independence Day',
'Rak<NAME>han (Rakhi)',
'Parsi New Year day',
'Janmashtarni',
'Onam',
'Muharram',
'<NAME>',
'<NAME>',
'Dussehra',
'Maharishi Valmiki Birthday',
'<NAME> (Karva Chouth)',
'<NAME>',
'Diwali (Deepavali)',
'Govardhan Puja',
'<NAME>',
'Chhat Puja',
'Milad-un-Nabi or Id-e- Milad',
'Guru Nanaks Birthday',
'Guru Teg Bahadur Martyrdom Day',
'Christmas Eve',
'Christmas Day',
'New Year Day',
'Guru Gobind Singh Jayanti',
'Lohri',
'Pongal, Uttarayan, Makar Sankranti',
'Republic Day',
'<NAME>',
'Guru Ravidas Jayanti',
'<NAME> <NAME>',
'Mahashivratri',
'<NAME>',
'Holi',
'Ugadi, Gudi Padwa',
'Bank Holiday',
'<NAME>',
'<NAME>',
'Good Friday',
'Easter',
'Baisakhi',
'<NAME>, <NAME>',
'Eid-al-Fitr / Ramadan',
'<NAME>',
'Guru Purnima',
'<NAME>',
'Bakrid',
'<NAME>',
'Janmashtami',
'Independence Day',
'<NAME>',
'Muharram',
'Onam',
'<NAME>',
'Dussehra',
'Id-e-Milad',
'<NAME>',
'<NAME>',
'Dhanteras',
"Diwali, Narak Chaturdashi, Children's day",
'Govardhan Puja',
'<NAME>',
'<NAME>',
'Guru Nanak Birthday',
'Christmas'
]),
'ds':([
'Jan-1-2019',
'Jan-13-2019',
'Jan-14-2019',
'Jan-15-2019',
'Jan-26-2019',
'Feb-10-2019',
'Feb-19-2019',
'Feb-19-2019',
'Mar-1-2019',
'Mar-4-2019',
'Mar-19-2019',
'Mar-20-2019',
'Mar-21-2019',
'Apr-6-2019',
'Apr-13-2019',
'Apr-17-2019',
'Apr-19-2019',
'Apr-21-2019',
'Apr-13-2019',
'Apr-14-2019',
'Apr-14-2019',
'May-9-2019',
'May-18-2019',
'May-31-2019',
'Jun-5-2019',
'Jul-4-2019',
'Aug-12-2019',
'Aug-15-2019',
'Aug-15-2019',
'Aug-17-2019',
'Aug-24-2019',
'Sep-11-2019',
'Sep-10-2019',
'Sep-2-2019',
'Oct-2-2019',
'Oct-7-2019',
'Oct-13-2019',
'Oct-17-2019',
'Oct-27-2019',
'Oct-27-2019',
'Oct-28-2019',
'Oct-29-2019',
'Nov-2-2019',
'Nov-10-2019',
'Nov-12-2019',
'Nov-24-2019',
'Dec-24-2019',
'Dec-25-2019',
'Jan-1-2020',
'Jan-2-2020',
'Jan-14-2020',
'Jan-15-2020',
'Jan-26-2020',
'Jan-29-2020',
'Feb-9-2020',
'Feb-18-2020',
'Feb-21-2020',
'Mar-9-2020',
'Mar-10-2020',
'Mar-25-2020',
'Apr-1-2020',
'Apr-2-2020',
'Apr-6-2020',
'Apr-10-2020',
'Apr-12-2020',
'Apr-13-2020',
'May-7-2020',
'May-24-2020',
'Jun-23-2020',
'Jul-5-2020',
'Jul-23-2020',
'Jul-31-2020',
'Aug-3-2020',
'Aug-11-2020',
'Aug-15-2020',
'Aug-22-2020',
'Aug-23-2020',
'Aug-29-2020',
'Aug-31-2020',
'Oct-2-2020',
'Oct-25-2020',
'Oct-29-2020',
'Oct-31-2020',
'Nov-4-2020',
'Nov-12-2020',
'Nov-14-2020',
'Nov-15-2020',
'Nov-16-2020',
'Nov-30-2020',
'Dec-25-2020'])
})
holidays=festivals.filter(['holiday','ds'])
holidays['ds']=pd.to_datetime(festivals['ds'])
file=r'./forecasting-prj-sample-data-v0.1.csv'
df=pd.read_csv(file)
df.to_html(header="true",table_id="table")
app=Flask(__name__)
app.config['SECRET_KEY']='f2091f20a36545a4ffe6ef38439845f5'
frame1=df.filter(['Date','Occupancy Adults'])
frame1['Date']=
|
pd.to_datetime(df['Date'])
|
pandas.to_datetime
|
import os
import sys
from copy import copy
from functools import wraps
from time import time
import skimage.filters
import funcs
import numpy as np
import pandas as pd
import seaborn as sns
import uncertainties as un
from funcs.post_processing.images.soot_foil import deltas as pp_deltas
from matplotlib import patches
from matplotlib import pyplot as plt
from matplotlib_scalebar.scalebar import ScaleBar
from scipy.stats import ks_2samp, t, ttest_ind, ttest_ind_from_stats
from skimage import io, transform
from uncertainties import unumpy as unp
d_drive = funcs.dir.d_drive
DF_SF_SPATIAL = pd.read_csv(
os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"spatial_calibrations.csv",
)
)
SF_DATE = "2020-12-27"
SF_SHOT = 3
SF_IMG_DIR = os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"foil images",
SF_DATE,
f"Shot {SF_SHOT:02d}",
)
SF_SPATIAL_SHOT_MASK = (DF_SF_SPATIAL["date"] == SF_DATE) & (
DF_SF_SPATIAL["shot"] == SF_SHOT
)
SF_DELTA_MM = DF_SF_SPATIAL[SF_SPATIAL_SHOT_MASK]["delta_mm"]
SF_DELTA_PX = DF_SF_SPATIAL[SF_SPATIAL_SHOT_MASK]["delta_px"]
PLOT_FILETYPE = "png"
# ibm color blind safe palette
# https://lospec.com/palette-list/ibm-color-blind-safe
# https://davidmathlogic.com/colorblind/#%23648FFF-%23785EF0-%23DC267F-%23FE6100-%23FFB000
COLOR_SC = "#fe6100"
COLOR_SF = "#648fff"
SAVE_LOC = os.path.join(d_drive, "Measurement-Paper", "images")
DPI = 200
def hex2rgb(hex_color):
hex_color = hex_color.replace("#", "")
r = int(hex_color[:2], 16)
g = int(hex_color[2:4], 16)
b = int(hex_color[4:], 16)
return r, g, b
def rgb2hex(r, g, b):
out = f"#{hex(r)[2:]}{hex(g)[2:]}{hex(b)[2:]}"
return out
def hex_add(c0, c1):
"""
c0 + c1
Parameters
----------
c0
c1
Returns
-------
"""
r0, g0, b0 = hex2rgb(c0)
r1, g1, b1 = hex2rgb(c1)
r_out = min(255, r0 + r1)
g_out = min(255, g0 + g1)
b_out = min(255, b0 + b1)
out = rgb2hex(r_out, g_out, b_out)
return out
def hex_sub(c0, c1):
"""
c0 - c1
Parameters
----------
c0
c1
Returns
-------
"""
r0, g0, b0 = hex2rgb(c0)
r1, g1, b1 = hex2rgb(c1)
r_out = max(0, r0 - r1)
g_out = max(0, g0 - g1)
b_out = max(0, b0 - b1)
out = rgb2hex(r_out, g_out, b_out)
return out
def timed(func):
@wraps(func)
def _timed(*args, **kwargs):
name = func.__name__
if name != "main":
sys.stderr.write(f"{func.__name__} ")
t0 = time()
out = func(*args, **kwargs)
t1 = time()
if name != "main":
sys.stderr.write(f"took {t1 - t0:6f} sec\n")
else:
sys.stderr.write(f"Done! {t1 - t0:6f} sec\n")
return out
return _timed
@timed
def set_plot_format():
common_size = 7.5
sns.set_color_codes("deep")
sns.set_context(
"paper",
rc={
"font.size": common_size,
"axes.titlesize": common_size + 1.5,
"axes.titleweight": "bold",
"axes.labelsize": common_size,
"xtick.labelsize": common_size,
"ytick.labelsize": common_size,
},
)
sns.set_style(
{
"font.family": "serif",
"font.serif": "Computer Modern",
}
)
# plt.rcParams["axes.titleweight"] = "bold"
plt.rcParams["figure.dpi"] = DPI
def sf_imread(
img_path,
plot=True,
):
"""
Thin wrapper around `skimage.io.imread` that rotates the image if it is
to be used for plotting, but does not if it is to be used for measurements.
Parameters
----------
img_path : str
Path to image
plot : bool
Determines whether or not image will be rotated 90 degrees
Returns
-------
np.array
"""
img_in = io.imread(img_path)
if plot:
img_in = transform.rotate(img_in, -90) # show images going left-right
return img_in
# noinspection PyTypeChecker
def get_scale_bar(
delta_px,
delta_mm,
cell_size,
text_color="#000",
box_color="#fff",
box_alpha=1,
rotation="vertical",
):
"""
Thin wrapper around ScaleBar that does a bit of standard formatting for
my needs.
Parameters
----------
delta_px : float
Calibration delta (px)
delta_mm : float
Calibration delta (mm)
cell_size : float
Fixed value to display in scale bar
text_color : str
Text color (hex)
box_color: str
Background box color (hex)
box_alpha : float
Box alpha -- NOTE: does not apply to border >:(
rotation : str
Which direction to place the scale bar: "vertical" or "horizontal"
Returns
-------
ScaleBar
"""
return ScaleBar(
delta_mm / delta_px,
"mm",
location=3,
fixed_value=cell_size,
scale_formatter=(lambda x, u: f"{x:.1f} {u}"),
border_pad=0.2,
color=text_color,
box_color=box_color,
box_alpha=box_alpha,
rotation=rotation,
)
@timed
def get_schlieren_data(estimator):
"""
Read in schlieren data from assorted .h5 stores and calculate cell sizes
for individual shots.
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame]
"""
# read in data
df_schlieren_tube = pd.DataFrame()
df_schlieren_all_frames = pd.DataFrame()
df_schlieren_frames = pd.DataFrame()
for group in ("fffff", "hhhhh", "ggggg"):
with pd.HDFStore(
f"/d/Data/Processed/Data/data_{group}.h5",
"r",
) as store:
df_schlieren_tube = pd.concat((df_schlieren_tube, store.data))
with pd.HDFStore(
f"/d/Data/Processed/Data/schlieren_{group}.h5",
"r",
) as store:
df_schlieren_all_frames = pd.concat(
(df_schlieren_all_frames, store.data)
)
# fix jacked up measurement
with pd.HDFStore(
"/d/Data/Processed/Data/tube_data_2020-08-07.h5",
"r",
) as store:
df_schlieren_tube[
(
(df_schlieren_tube["date"] == "2020-08-07")
& (df_schlieren_tube["shot"] == 3)
)
] = store.data.iloc[0].values
# After some analysis it looks like I was dumb and used u_delta_px as
# u_loc_px. This gives an overly large estimate of uncertainty for
# schlieren. Fix that before continuing with the analysis.
df_schlieren_all_frames.loc[:, "u_delta_px"] = (
df_schlieren_all_frames["u_loc_px"].copy()
)
df_schlieren_all_frames.loc[:, "u_loc_px"] = df_schlieren_all_frames[
"u_loc_px"
].div(np.sqrt(2))
# calculate cell size measurements
df_schlieren_tube = df_schlieren_tube[
np.isclose(df_schlieren_tube["phi_nom"], 1)
& np.isclose(df_schlieren_tube["dil_mf_nom"], 0.2)
& (df_schlieren_tube["fuel"] == "CH4")
& (df_schlieren_tube["oxidizer"] == "N2O")
& (df_schlieren_tube["diluent"] == "N2")
]
df_schlieren_tube["cell_size"] = np.NaN
df_schlieren_tube["u_cell_size"] = np.NaN
u_delta_bias = np.sqrt(2) / 2
deltas = unp.uarray(
df_schlieren_all_frames["delta_px"],
df_schlieren_all_frames["u_delta_px"], # precision only
) + un.ufloat(0, u_delta_bias)
spatials = unp.uarray(
df_schlieren_all_frames["spatial_centerline"],
df_schlieren_all_frames["u_spatial_centerline"],
)
cell_sizes = deltas * spatials
df_schlieren_all_frames["cell_size"] = unp.nominal_values(cell_sizes)
df_schlieren_all_frames["u_cell_size"] = unp.std_devs(cell_sizes)
for (date, shot), _ in df_schlieren_tube.groupby(["date", "shot"]):
_df_this_shot = df_schlieren_all_frames[
(
(df_schlieren_all_frames["date"] == date)
& (df_schlieren_all_frames["shot"] == shot)
)
].dropna()
df_schlieren_frames = pd.concat((df_schlieren_frames, _df_this_shot))
if len(_df_this_shot):
_deltas = unp.uarray(
_df_this_shot["delta_px"],
_df_this_shot["u_delta_px"],
)
_mm_per_px = unp.uarray(
_df_this_shot["spatial_centerline"],
_df_this_shot["u_spatial_centerline"],
)
_meas = estimator(_deltas * _mm_per_px) * 2
# noinspection PyUnresolvedReferences
df_schlieren_tube.loc[
(
(df_schlieren_tube["date"] == date)
& (df_schlieren_tube["shot"] == shot)
),
["cell_size", "u_cell_size"],
] = (_meas.nominal_value, _meas.std_dev)
df_schlieren_tube = df_schlieren_tube[
~pd.isna(df_schlieren_tube["cell_size"])
]
return df_schlieren_frames, df_schlieren_tube
@timed
def build_schlieren_images(
cmap,
df_meas,
image_width=None,
image_height=None,
save=False,
limits_x=(10, 110),
limits_y=(10, 210),
):
"""
Generates images of:
* Raw schlieren image
* Schlieren image with triple point locations identified
Images will be rendered with an aspect ration of 0.5; only one of
`image_width`, `image_height` should be given.
Parameters
----------
cmap : str
Colormap to use for schlieren frame
df_meas : pd.DataFrame
DataFrame of schlieren measurements
image_width : float or None
Image width (in)
image_height : float or None
Image height (in)
save : bool
Whether or not to save images
limits_x : tuple
X limits of trimmed image
limits_y : tuple
Y limits of trimmed image
Returns
-------
"""
aspect_ratio = 0.5 # w/h
if image_width is None and image_height is None:
raise ValueError("image_width or image_height must be given")
if image_width is None:
image_width = image_height * aspect_ratio
elif image_height is None:
image_height = image_width / aspect_ratio
date = "2020-08-07"
shot = 3
frame = 0
tube_data_h5_suffix = "fffff"
with pd.HDFStore(
f"/d/Data/Processed/Data/data_{tube_data_h5_suffix}.h5", "r"
) as store:
schlieren_key_date = date.replace("-", "_")
key = (
f"/schlieren/d{schlieren_key_date}/" + f"shot{shot:02d}/"
f"frame_{frame:02d}"
)
schlieren_raw = np.fliplr(store[key])
# jankily shoehorn spatial calibration into existing function
mm_per_px = df_meas[
(df_meas["date"] == date)
& (df_meas["shot"] == shot)
]["spatial_centerline"].iloc[0]
schlieren_scalebar = get_scale_bar(
1,
mm_per_px,
cell_size=25.4,
)
# trim image to ROI
limits_x = sorted(limits_x)
limits_y = sorted(limits_y)
schlieren_raw = (
schlieren_raw[np.arange(*limits_y), :][:, np.arange(*limits_x)]
)
schlieren_raw /= schlieren_raw.max()
schlieren_raw = skimage.filters.unsharp_mask(
schlieren_raw,
radius=1.5,
amount=3,
)
df_meas = df_meas[
(df_meas["loc_px"] >= limits_y[0])
& (df_meas["loc_px"] <= limits_y[1])
]
df_meas["loc_px"] -= limits_y[0]
# raw frame
name = "schlieren_frame_raw"
fig, ax = plt.subplots(figsize=(image_width, image_height))
fig.canvas.set_window_title(name)
ax.imshow(schlieren_raw, cmap=cmap)
ax.axis("off")
# ax.set_title("Raw")
ax.grid(False)
ax.add_artist(
copy(schlieren_scalebar),
)
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# frame with triple point measurements
name = "schlieren_frame_measurements"
fig, ax = plt.subplots(figsize=(image_width, image_height))
fig.canvas.set_window_title(name)
ax.imshow(schlieren_raw, cmap=cmap)
ax.axis("off")
# ax.set_title("Measurements")
ax.grid(False)
for loc_px in df_meas[
(df_meas["date"] == date)
& (df_meas["shot"] == shot)
& (df_meas["frame"] == frame)
]["loc_px"]:
plt.axhline(
loc_px,
c=COLOR_SC,
lw=0.5,
)
ax.add_artist(
copy(schlieren_scalebar),
)
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def calculate_schlieren_cell_size(
df_schlieren_frames,
iqr_fencing=False,
estimator=np.mean,
):
"""
Parameters
----------
df_schlieren_frames : pd.DataFrame
DataFrame containing schlieren data
iqr_fencing : bool
Remove outliers using IQR fencing?
estimator : func
Estimator function to use
Returns
-------
Tuple[float, float, unp.uarray, int]
* Measured nominal cell size (mm)
* Measured cell size uncertainty (mm)
* Individual deltas with uncertainties (mm)
* Number of measurements
"""
df_schlieren_frames = df_schlieren_frames.copy().dropna()
if iqr_fencing:
# remove outliers
meas_mean = df_schlieren_frames["cell_size"].mean()
meas_std = df_schlieren_frames["cell_size"].std()
mask = (
meas_mean - 1.5 * meas_std <= df_schlieren_frames["cell_size"]
) & (df_schlieren_frames["cell_size"] <= meas_mean + 1.5 * meas_std)
del meas_std, meas_mean # make sure we use reduced dataset!
else:
# leave em
mask = np.ones_like(df_schlieren_frames["cell_size"], dtype=bool)
meas = unp.uarray(
df_schlieren_frames["cell_size"][mask],
df_schlieren_frames["u_cell_size"][mask].values,
)
n_meas = len(meas)
nominal_values = unp.nominal_values(meas)
# cell_size_meas = np.sum(meas) / n_meas
cell_size_meas = 2 * estimator(meas)
cell_size_uncert_population = (
nominal_values.std() / np.sqrt(n_meas) * t.ppf(0.975, n_meas - 1)
)
# noinspection PyUnresolvedReferences
cell_size_uncert_schlieren = np.sqrt(
np.sum(
np.square(
[
cell_size_uncert_population,
cell_size_meas.std_dev,
]
)
)
)
uncertainty = {
"instrument": cell_size_meas.std_dev,
"population": cell_size_uncert_population,
"total": cell_size_uncert_schlieren,
}
# noinspection PyUnresolvedReferences
return (
cell_size_meas.nominal_value,
uncertainty,
meas,
n_meas,
)
@timed
def plot_schlieren_measurement_distribution(
schlieren_meas,
cell_size_meas,
cell_size_uncert,
plot_width,
plot_height,
save=False,
):
"""
Plot the distribution of schlieren measurements
Parameters
----------
schlieren_meas : np.array
Array of individual schlieren nominal measurements (mm)
cell_size_meas : float
Nominal mean cell size measurement (mm)
cell_size_uncert : float
Uncertainty in cell size (mm)
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "schlieren_measurement_distribution"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
sns.distplot(
schlieren_meas,
hist=False,
# rug=True,
ax=ax,
color=COLOR_SC,
)
ax_ylim = ax.get_ylim()
plt.fill_between(
[cell_size_meas + cell_size_uncert, cell_size_meas - cell_size_uncert],
ax_ylim[0],
ax_ylim[1],
alpha=0.25,
color=COLOR_SC,
ec=None,
zorder=-1,
)
ax.axvline(
cell_size_meas,
c=COLOR_SC,
ls="--",
alpha=0.7,
zorder=-1,
)
ax.set_ylim(ax_ylim)
ax.set_xlabel("Measured Cell Size (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
# ax.set_title("Schlieren Cell Size Measurement Distribution")
ax.grid(False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def plot_all_schlieren_deltas_distribution(
df_schlieren_frames,
plot_width,
plot_height,
save=False,
):
"""
Plot the distribution of all schlieren deltas in the dataset
Parameters
----------
df_schlieren_frames : pd.DataFrame
Dataframe containing all schlieren frame deltas
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "schlieren_all_deltas_distribution"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
deltas = (
df_schlieren_frames["spatial_centerline"]
* df_schlieren_frames["delta_px"]
).dropna()
sns.kdeplot(
deltas,
ax=ax,
color=COLOR_SC,
clip=[0, 100],
)
ax.set_xlabel("Triple Point Delta (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
# ax.set_title("Schlieren Triple Point Delta Distribution")
ax.grid(False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
return deltas
@timed
def plot_all_soot_foil_deltas_distribution(
soot_foil_meas,
plot_width,
plot_height,
save=False,
):
"""
Plot the distribution of all schlieren deltas in the dataset
Parameters
----------
soot_foil_meas : list
List of all soot foil deltas
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "soot_foil_all_deltas_distribution"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
sns.kdeplot(
soot_foil_meas,
ax=ax,
color=COLOR_SF,
clip=[0, 100],
)
ax.set_xlabel("Triple Point Delta (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
# ax.set_title("Soot Foil Triple Point Delta Distribution")
ax.grid(False)
plt.xlim([0, 100])
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def plot_both_delta_distributions(
df_schlieren_frames,
soot_foil_meas,
plot_width,
plot_height,
save=False,
):
"""
Plot the distribution of all schlieren and soot foil deltas in the dataset
Parameters
----------
df_schlieren_frames : pd.DataFrame
Dataframe containing all schlieren frame deltas
soot_foil_meas : list
List of all soot foil deltas
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "all_deltas_distributions"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
deltas = (
df_schlieren_frames["spatial_centerline"]
* df_schlieren_frames["delta_px"]
).dropna()
sns.kdeplot(
deltas,
ax=ax,
color=COLOR_SC,
label="Schlieren",
clip=[0, 100],
)
sns.kdeplot(
soot_foil_meas,
ax=ax,
color=COLOR_SF,
label="Soot Foil",
clip=[0, 100],
)
ax.set_xlabel("Triple Point Delta (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
plt.xlim([0, 100])
# ax.set_title("Triple Point Delta Distributions")
ax.grid(False)
plt.legend(frameon=False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# noinspection DuplicatedCode
@timed
def plot_schlieren_measurement_convergence(
schlieren_meas,
schlieren_uncert,
schlieren_meas_all,
n_schlieren_meas,
plot_width,
plot_height,
save=False,
):
"""
Plots convergence of schlieren measurements vs. number of measurements.
Parameters
----------
schlieren_meas : float
Actual measured schlieren value
schlieren_uncert : float
Uncertainty in measured value
schlieren_meas_all : np.array
Array of individual schlieren nominal measurements (mm)
n_schlieren_meas : int
Number of schlieren measurements
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "schlieren_measurement_convergence"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
n_meas = np.arange(1, n_schlieren_meas + 1)
schlieren_meas_all = pd.Series(schlieren_meas_all)
running_mean = schlieren_meas_all.rolling(
n_schlieren_meas,
min_periods=0,
).median()
ax.plot(
n_meas,
np.abs(running_mean - schlieren_meas) * 100 / schlieren_meas,
c=COLOR_SC,
)
ax.axhline(
schlieren_uncert / schlieren_meas * 100,
c="k",
alpha=0.5,
zorder=-1,
lw=0.5,
ls=(0, (5, 1, 1, 1)),
)
ax.set_xlim([2, len(running_mean)])
ax.set_xlabel("Number of Triple Point Deltas")
ax.set_ylabel("Absolute Difference\nFrom Final (%)")
# ax.set_title("Schlieren Cell Size Measurement Convergence")
ax.grid(False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# noinspection DuplicatedCode
@timed
def plot_soot_foil_measurement_convergence(
nominal_measurement,
uncert,
all_measurements,
plot_width,
plot_height,
save=False,
):
"""
Plots convergence of soot foil measurements vs. number of measurements.
Parameters
----------
nominal_measurement : float
Actual measured value
uncert : float
Uncertainty in measured value
all_measurements : np.array
Array of individual nominal measurements (mm)
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save the plot
Returns
-------
"""
name = "soot_foil_measurement_convergence"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
n_meas = len(all_measurements)
meas_range = np.arange(1, n_meas + 1)
all_measurements = pd.Series(all_measurements)
running_mean = all_measurements.rolling(
n_meas,
min_periods=0,
).median()
uncert_pct = uncert / nominal_measurement * 100
ax.plot(
meas_range,
np.abs(running_mean - nominal_measurement) * 100 / nominal_measurement,
c=COLOR_SF,
)
ax.axhline(
uncert_pct,
c="k",
alpha=0.5,
zorder=-1,
lw=0.5,
ls=(0, (5, 1, 1, 1)),
)
ax.set_xlim([2, len(running_mean)])
ax.set_xlabel("Number of Triple Point Deltas")
ax.set_ylabel("Absolute Difference\nFrom Final (%)")
plt.ticklabel_format(
style="sci",
axis="x",
scilimits=(0, 0),
useMathText=True,
)
# ax.set_title("Soot Foil Cell Size Measurement Convergence")
ax.grid(False)
plt.tight_layout()
sns.despine()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
return uncert_pct
@timed
def build_soot_foil_images(
cmap,
image_height,
save=False,
):
"""
Generates images of:
* Raw soot foil image next to traced soot foil
* Zoomed in trace with arrows to demonstrate measurements
Parameters
----------
cmap : str
Colormap to use for schlieren frame
image_height : float or None
Image height (in)
save : bool
Whether or not to save images
Returns
-------
"""
# settings
aspect_ratio = 2 # w/h
image_width = aspect_ratio * image_height
sf_scalebar = get_scale_bar(
SF_DELTA_PX,
SF_DELTA_MM,
cell_size=25.4,
)
# read in foil images
sf_img = sf_imread(os.path.join(SF_IMG_DIR, "square.png"))
sf_img_lines_thk = sf_imread(os.path.join(SF_IMG_DIR, "lines_thk.png"))
# display foil images
name = "soot_foil_images_main"
fig, ax = plt.subplots(1, 2, figsize=(image_width, image_height))
fig.canvas.set_window_title(name)
ax[0].imshow(sf_img, cmap=cmap)
ax[0].axis("off")
# ax[0].set_title("Soot Foil")
ax[1].imshow(sf_img_lines_thk, cmap=cmap)
ax[1].axis("off")
# ax[1].set_title("Traced Cells")
for a in ax:
a.add_artist(
copy(sf_scalebar),
)
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# read in zoomed lines
sf_img_lines_z = sf_imread(os.path.join(SF_IMG_DIR, "lines_zoomed.png"))
sf_img_lines_z = np.rot90(
np.rot90(sf_img_lines_z)
) # don't want to redo this
# plot zoomed lines
name = "soot_foil_lines_zoomed"
fig, ax = plt.subplots(figsize=(image_height, image_height))
fig.canvas.set_window_title(name)
ax.imshow(sf_img_lines_z, cmap=cmap)
plt.axis("off")
# plt.title("Traced Cells\n(Close-up)")
lines_scale = 900 / 330 # scaled up for quality
arrow_x = 160 * lines_scale
arrow_length = np.array([36, 32, 86, 52, 88, 35, 50]) * lines_scale
arrow_y_top = np.array([-10, 20, 46, 126, 172, 254, 282]) * lines_scale
n_arrows = len(arrow_length)
for i in range(n_arrows):
if i == 0:
arrowstyle = "-|>"
elif i == n_arrows - 1:
arrowstyle = "<|-"
else:
arrowstyle = "<|-|>"
arrow = patches.FancyArrowPatch(
(arrow_x, arrow_y_top[i]),
(arrow_x, arrow_y_top[i] + arrow_length[i]),
arrowstyle=arrowstyle,
mutation_scale=5,
linewidth=0.75,
color=COLOR_SF,
)
plt.gca().add_artist(arrow)
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
# regular vs irregular
irreg_scalebar = get_scale_bar(
2268,
300,
cell_size=25.4,
)
irregular_image_path = os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"foil images",
"2020-10-26",
"Shot 01",
"square.png",
)
img_irregular = sf_imread(irregular_image_path, plot=True)
img_regular = sf_imread(os.path.join(SF_IMG_DIR, "square_regular.png"))
# display foil images
name = "soot_foil_irregular_cells"
fig, ax = plt.subplots(1, 2, figsize=(image_width, image_height))
fig.canvas.set_window_title(name)
ax[0].imshow(img_regular, cmap=cmap)
ax[0].axis("off")
# ax[0].set_title("Regular")
ax[0].add_artist(copy(sf_scalebar))
ax[1].imshow(img_irregular, cmap=cmap)
ax[1].axis("off")
# ax[1].set_title("Irregular")
ax[1].add_artist(copy(irreg_scalebar))
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def soot_foil_px_cal_uncertainty(
plot_width,
plot_height,
save=False,
):
"""
Calculate soot foil pixel location uncertainty and plot measurement
distribution from repeatability test.
NOTE: this function modifies DF_SF_SPATIAL, therefore this should be run
before calculations referencing soot foil uncertainty!
Parameters
----------
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save images
Returns
-------
"""
# add pixel delta calibration precision uncertainty
# estimate using IMG_1983 (2020-12-27 Shot 03)
px_cal_deltas = np.array(
[
2344, # this is what is saved in the .xcf
2347,
2345,
2345,
2345,
2344,
2344,
2345,
2344,
2345,
]
)
u_px_cal_deltas = (
px_cal_deltas.std()
/ np.sqrt(len(px_cal_deltas))
* t.ppf(0.975, len(px_cal_deltas) - 1)
)
# calculate and apply new calibration pixel uncertainty
# existing measurement accounts for sqrt2 from delta
# this applies directly without that because it is a direct delta
# measurement
DF_SF_SPATIAL["u_delta_px"] = np.sqrt(
np.sum(
np.square(
np.array(
[
DF_SF_SPATIAL["u_delta_px"], # bias (preexisting)
u_px_cal_deltas, # precision (new)
]
)
)
)
)
# no need to do this for calibration mm uncertainty because it's a direct
# ruler
# reading, not a measurement of an existing quantity with a ruler
# (i.e. bias only)
name = "soot_foil_px_cal_uncertainty_distribution"
fig = plt.figure(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
sns.distplot(
px_cal_deltas,
hist=False,
color=COLOR_SF,
)
ax_ylim = plt.ylim()
plt.fill_between(
[
px_cal_deltas.mean() + u_px_cal_deltas,
px_cal_deltas.mean() - u_px_cal_deltas,
],
ax_ylim[0],
ax_ylim[1],
alpha=0.25,
color=COLOR_SF,
ec=None,
zorder=-1,
)
plt.axvline(
px_cal_deltas.mean(),
c=COLOR_SF,
ls="--",
alpha=0.7,
zorder=-1,
)
plt.ylim(ax_ylim)
# plt.title(
# "Soot Foil Pixel Calibration Distance\nRepeatability Distribution"
# )
plt.grid(False)
plt.xlabel("Ruler Distance (px)")
plt.ylabel("Probability\nDensity (1/px)")
sns.despine()
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
def find_row_px_loc(row):
row_locs = np.where(row == 255)[0]
double_check = row_locs[
np.abs(np.diff([row_locs, np.roll(row_locs, -1)], axis=0)).flatten() > 1
]
if len(double_check):
meas = double_check[0]
else:
meas = row_locs[0]
return meas
def get_all_image_px_locs(img):
return np.apply_along_axis(find_row_px_loc, 1, img)
def soot_foil_px_delta_uncertainty():
# add measurement pixel location precision uncertainty
# estimate using IMG_1983 (2020-12-27 Shot 03)
images = funcs.post_processing.images.schlieren.find_images_in_dir(
os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"foil images",
"2020-12-27",
"Shot 03",
"uncertainty",
),
".png",
)
img_size = io.imread(images[0]).shape[0] # get image size
n_repeatability_images = len(images)
repeatability_px_locs = (
np.ones(
(
img_size,
n_repeatability_images,
)
)
* np.NaN
)
for i, img_loc in enumerate(images):
img = io.imread(img_loc)
repeatability_px_locs[:, i] = get_all_image_px_locs(img)
# use max std of all rows as uncertainty estimate
u_px_delta_precision = (
np.std(
repeatability_px_locs,
axis=1,
).max()
/ np.sqrt(n_repeatability_images)
* t.ppf(
0.975,
n_repeatability_images - 1,
)
) * np.sqrt(2) # accounts for propagation in delta
u_px_delta_bias = 0.5 * np.sqrt(2) # accounts for propagation in delta
# calculate and apply new measurement pixel location precision uncertainty
uncert_total = np.sqrt(
np.sum(np.square(np.array([u_px_delta_bias, u_px_delta_precision])))
)
uncert = {
"bias": u_px_delta_bias,
"precision": u_px_delta_precision,
"total": uncert_total
}
return uncert
@timed
# noinspection PyUnresolvedReferences
def calculate_soot_foil_cell_size(
# n_schlieren_meas,
iqr_fencing,
estimator=np.mean,
use_cache=True,
save_cache=False,
):
"""
Calculates the mean cell size from soot foil images
Parameters
----------
# n_schlieren_meas : int
Number of schlieren measurements, which is used to trim down the data
set after outliers have been removed -- NOTE: this is being left in
in case it needs to be used again later, however the randomly selected
batch of measurements from the first time this was run has been
preserved and will be used for the sake of continuity.
iqr_fencing : bool
Remove outliers using IQR fencing?
estimator : func
Estimator function to use
use_cache : bool
Use cached data?
save_cache : bool
Overwrite cached data?
Returns
-------
Tuple[np.array, float, float, pd.DataFrame]
* Per-foil measurements (mm)
* Mean cell size (mm)
* Cell size uncertainty (mm)
"""
cache_file = os.path.join(
d_drive,
"Data",
"Processed",
"Data",
"soot_foil_measurement_study.h5",
)
uncert_delta_px = soot_foil_px_delta_uncertainty()
if use_cache:
with pd.HDFStore(cache_file, "r") as store:
all_meas = store.data["measurements"].values
all_total_uncerts = store.data["total_uncertainties"].values
all_cal_px_uncerts = store.data["u_cal_px"].values
all_cal_mm_uncerts = store.data["u_cal_mm"].values
else:
date_shot = (
# remove 4 at random
# np.random.choice(range(19), 4, False)
# Out[3]: array([16, 4, 5, 6])
# date, shot
("2020-11-12", 0),
("2020-11-13", 8),
("2020-11-23", 3),
# ("2020-11-23", 4),
# ("2020-11-23", 6),
# ("2020-11-23", 7),
("2020-11-24", 0),
("2020-11-24", 3),
("2020-11-24", 7),
("2020-11-25", 0),
("2020-12-20", 8),
("2020-12-21", 9),
("2020-12-27", 0),
("2020-12-27", 1),
("2020-12-27", 2),
# ("2020-12-27", 3),
("2020-12-27", 6),
("2020-12-27", 7),
("2020-12-27", 8),
)
u_d_px = uncert_delta_px["total"]
all_meas = []
all_total_uncerts = []
all_dates = []
all_shots = []
all_cal_mm_uncerts = []
all_cal_px_uncerts = []
all_n_deltas = np.ones(len(date_shot)) * np.NaN
for idx, (date, shot) in enumerate(date_shot):
cal_mm, cal_px, u_cal_mm, u_cal_px = DF_SF_SPATIAL[
(DF_SF_SPATIAL["date"] == date) &
(DF_SF_SPATIAL["shot"] == shot)
][["delta_mm", "delta_px", "u_delta_mm", "u_delta_px"]].values[0]
d_px = pp_deltas.get_px_deltas_from_lines(
os.path.join(
d_drive,
"Data",
"Processed",
"Soot Foil",
"foil images",
f"{date}",
f"Shot {shot:02d}",
"composite.png",
),
apply_uncertainty=False,
)
all_n_deltas[idx] = len(d_px)
all_cal_mm_uncerts.append(u_cal_mm)
all_cal_px_uncerts.append(u_cal_px)
# apply uncertainties
d_px = unp.uarray(d_px, u_d_px)
cal_mm = un.ufloat(cal_mm, u_cal_mm)
cal_px = un.ufloat(cal_px, u_cal_px)
# calculate!
d_mm = d_px * cal_mm / cal_px
all_meas.extend(list(unp.nominal_values(d_mm)))
all_total_uncerts.extend(list(unp.std_devs(d_mm)))
n_current_meas = len(d_mm)
all_dates.extend(list([date]*n_current_meas))
all_shots.extend(list([shot]*n_current_meas))
if save_cache:
df_meas = pd.DataFrame([
pd.Series(all_dates, name="date"),
pd.Series(all_shots, name="shot"),
pd.Series(all_meas, name="measurements"),
pd.Series(all_total_uncerts, name="total_uncertainties"),
pd.Series(all_cal_px_uncerts, name="u_cal_px"),
pd.Series(all_cal_mm_uncerts, name="u_cal_mm"),
]).T
with pd.HDFStore(cache_file, "w") as store:
store.put("data", df_meas)
measurements = unp.uarray(
all_meas,
all_total_uncerts,
)
meas_nominal = unp.nominal_values(measurements)
if iqr_fencing:
# remove outliers
mean = meas_nominal.mean()
std = meas_nominal.std()
meas_mask = (meas_nominal <= mean + std * 1.5) & (
meas_nominal >= mean - std * 1.5
)
measurements = measurements[meas_mask]
meas_nominal = meas_nominal[meas_mask]
del mean, std # don't accidentally reuse these!
# scale to match number of samples with schlieren
# reduced_indices = sorted(np.random.choice(
# np.arange(len(measurements)),
# n_schlieren_meas,
# replace=False,
# ))
# reduced_indices = [0, 1, 3, 5, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
#
# measurements = measurements[reduced_indices]
# meas_nominal = meas_nominal[reduced_indices]
# date_shot_index = pd.MultiIndex.from_tuples(date_shot)[reduced_indices]
# read in data
with pd.HDFStore("/d/Data/Processed/Data/data_soot_foil.h5", "r") as store:
df_tube = store.data.set_index(["date", "shot"], drop=True)
# # trim down to only dates/shots currently in use
# df_tube = df_tube.loc[date_shot_index]
# collect population uncertainty
n_measurements = len(measurements)
if n_measurements // 2:
# an even numbered dataset means that median is the mean of the two
# middle numbers. This drops the uncertainty, which is rude. Since
# this is a huge dataset with a small uncertainty, this is done to be
# conservative.
n_measurements -= 1
measurements = measurements[1:]
cell_size_meas = estimator(measurements)
cell_size_uncert_population = (
meas_nominal.std()
/ np.sqrt(n_measurements)
* t.ppf(0.975, n_measurements - 1)
)
# combine uncertainties
cell_size_uncert = np.sqrt(
np.sum(np.square([cell_size_uncert_population, cell_size_meas.std_dev]))
)
uncertainty = {
"instrument":
{
"triple_point_delta_px": uncert_delta_px,
"cal_px": np.mean(all_cal_px_uncerts),
"cal_mm": np.mean(all_cal_mm_uncerts),
"total_mm": cell_size_meas.std_dev,
},
"population": cell_size_uncert_population,
"total": cell_size_uncert,
}
return (
measurements,
cell_size_meas.nominal_value,
uncertainty,
df_tube,
all_meas,
)
@timed
def plot_soot_foil_measurement_distribution(
measurements,
cell_size_meas,
cell_size_uncert,
plot_width,
plot_height,
save=False,
):
"""
Parameters
----------
measurements : np.array
Nominal cell size measurements (mm)
cell_size_meas : float
Nominal mean cell size measurement (mm)
cell_size_uncert : float
Cell size uncertainty (mm)
plot_width : float
Width of plot (in)
plot_height : float
Height of plot (in)
save : bool
Whether or not to save images
Returns
-------
"""
name = "soot_foil_measurement_distribution"
fig, ax = plt.subplots(figsize=(plot_width, plot_height))
fig.canvas.set_window_title(name)
sns.distplot(
measurements,
hist=False,
# rug=True,
ax=ax,
color=COLOR_SF,
)
ax.axvline(
cell_size_meas,
color=COLOR_SF,
ls="--",
alpha=0.7,
zorder=-1,
)
ax_ylim = ax.get_ylim()
ax.fill_between(
[cell_size_meas + cell_size_uncert, cell_size_meas - cell_size_uncert],
ax_ylim[0],
ax_ylim[1],
alpha=0.25,
color=COLOR_SF,
ec=None,
zorder=-1,
)
ax.set_ylim(ax_ylim)
ax.set_xlabel("Cell Size (mm)")
ax.set_ylabel("Probability Density\n(1/mm)")
# ax.set_title("Soot Foil Measurement Distribution")
ax.grid(False)
plt.xlim([0, 100])
sns.despine()
plt.tight_layout()
if save:
plt.savefig(
os.path.join(SAVE_LOC, f"{name}.{PLOT_FILETYPE}"),
dpi=DPI,
)
@timed
def perform_soot_foil_measurement_study(
plot_width,
plot_height,
save,
uncert_pct,
):
cache_file = os.path.join(
d_drive,
"Data",
"Processed",
"Data",
"soot_foil_measurement_study.h5",
)
with
|
pd.HDFStore(cache_file, "r")
|
pandas.HDFStore
|
import argparse
import os
import pathlib
import random
import timeit
import numpy as np
import pandas as pd
def stdin(n_repeat, n_number, i) -> list:
tmp = pathlib.Path('tmp')
for _ in range(n_repeat * n_number):
os.system('python stdin.py {} < data/N100000.txt >> {}'.format(i, tmp))
with tmp.open() as f:
data = [
sum([float(f.readline()) for _ in range(n_number)]) / n_number
for _ in range(n_repeat)
]
os.remove('tmp')
return data
def sort1(A):
A.sort()
def sort2(A):
sorted(A)
def sort3(A):
A.sort(key=lambda x: x[1])
def sort4(A):
from operator import itemgetter
A.sort(key=itemgetter(1))
def sort5(A):
sorted(A, key=lambda x: x[1])
def sort6(A):
from operator import itemgetter
sorted(A, key=itemgetter(1))
def loop1(N):
for _ in range(N):
pass
def loop2(N):
for i in range(N):
i
def loop3(N):
i = 0
while i < N:
i += 1
def loop4(A):
for i in range(len(A)):
A[i]
def loop5(A):
for a in A:
a
def list1(N):
[None] * N
def list2(N):
[None for _ in range(N)]
def list6(N):
[[None] * N for _ in range(N)]
def list7(N):
[[None for _ in range(N)] for _ in range(N)]
def list3(N):
A = []
for i in range(N):
A.append(i)
def list4(N):
A = [None] * N
for i in range(N):
A[i] = i
def list5(N):
[i for i in range(N)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--n_repeat', type=int, default=10)
parser.add_argument('--n_number', type=int, default=10)
parser.add_argument('--out', default='record.csv')
args = parser.parse_args()
record =
|
pd.DataFrame(columns=['time', 'exp', 'func', 'param1', 'param2'])
|
pandas.DataFrame
|
"""
The train and predict script.
This script uses datasets, omegaconf and transformers libraries.
Please install them in order to run this script.
Usage:
$python train.py -config_dir ./configs/bert-base
"""
import argparse
import json
import os
import pickle as pkl
from collections import Counter
import numpy as np
import pandas as pd
import torch
from omegaconf import OmegaConf
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
Trainer,
TrainingArguments,
)
from datasets import load_metric
from src.datasets import *
from src.models import *
from src.utils.mapper import configmapper
from src.utils.misc import seed, tokenize
f1_metric = load_metric("f1")
acc_metric = load_metric("accuracy")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
# return {
# "f1": f1_metric.compute(
# predictions=predictions, references=labels, average="weighted"
# ),
# "acc": acc_metric.compute(predictions=predictions, references=labels),
# }
return f1_metric.compute(
predictions=predictions, references=labels, average="macro"
)
class MyEncoder(json.JSONEncoder):
"""Class to convert NumPy stuff to JSON-writeable."""
def default(self, obj):
"""Convert NumPy stuff to regular Python stuff.
Args:
obj (object): Object to be converted.
Returns:
object: Converted object.
"""
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
dirname = os.path.dirname(__file__)
## Config
parser = argparse.ArgumentParser(
prog="train.py", description="Train a model and predict."
)
parser.add_argument(
"-config_dir",
type=str,
action="store",
help="The configuration for training",
default=os.path.join(dirname, "./configs/bert-base"),
)
parser.add_argument(
"--only_predict",
action="store_true",
help="Whether to just predict, or also train",
default=False,
)
parser.add_argument(
"--load_predictions",
action="store_true",
help="Whether to load_predictions from raw_predictions_file or predict from scratch",
default=False,
)
args = parser.parse_args()
train_config = OmegaConf.load(os.path.join(args.config_dir, "train.yaml"))
dataset_config = OmegaConf.load(os.path.join(args.config_dir, "dataset.yaml"))
seed(train_config.args.seed)
# Load datasets
print("### Loading Datasets ###")
if "bert" in train_config.model_name:
train_dataset = configmapper.get("datasets", dataset_config.dataset_name)(
**dataset_config.train
)
test_dataset = configmapper.get("datasets", dataset_config.dataset_name)(
**dataset_config.test
)
else:
train_df =
|
pd.read_csv(dataset_config.train.file_path)
|
pandas.read_csv
|
from binance import Client, ThreadedWebsocketManager, ThreadedDepthCacheManager
import pandas as pd
import datetime
api_key = "<KEY>"
api_secret = "<KEY>"
client = Client(api_key, api_secret)
def get_pd_daily_histo(pair, since):
##### get historical data
historical = client.get_historical_klines(pair, Client.KLINE_INTERVAL_1DAY, since)
hist_df = pd.DataFrame(historical)
hist_df.columns = ['Open_Time', 'Open', 'High', 'Low', 'Close', 'Volume', 'Close_Time', 'Quote_Asset_Volume',
'Number_of_Trades', 'TB_Base_Volume', 'TB_Quote_Volume', 'Ignore']
hist_df = hist_df.drop(['Quote_Asset_Volume', 'TB_Base_Volume', 'TB_Quote_Volume','Ignore'], axis=1)
hist_df['Open_Time'] = pd.to_datetime(hist_df['Open_Time']/1000, unit='s')
hist_df['Close_Time'] = pd.to_datetime(hist_df['Close_Time']/1000, unit='s')
numeric_columns = ['Open', 'High', 'Low', 'Close', 'Volume']
hist_df[numeric_columns] = hist_df[numeric_columns].apply(pd.to_numeric, axis=1)
return(hist_df)
def get_pd_hourly_histo(pair, since):
##### get historical data
historical = client.get_historical_klines(pair, Client.KLINE_INTERVAL_1HOUR, since)
hist_df =
|
pd.DataFrame(historical)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 15 15:38:35 2021
@author: fionnlagh
"""
import sys
import matplotlib
#matplotlib.use('Agg')
matplotlib.use('TkAgg') # revert above
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
from pathlib import Path
import matplotlib.pyplot as plt
import pickle
import pandas as pd
import math
from scipy.interpolate import griddata
from scipy.optimize import curve_fit
sys.path.append("/home/fionnlagh/forked_amrvac/amrvac/tools/python")
import amrvac_pytools as apt
def dis_2_grid(slice_height, physical_length, resoltion):
# translate ypts height into index value
convert = resoltion/physical_length
return int(round(slice_height*convert))
def grid_2_dis(physical_length, resoltion, clip_domain, positon):
# convert index position to physical length
# physical_length of domain - [x,y]
# resoultion of domian - [x,y]
# clip_domain resolution of smaller regoin to returen correct x vbalue
# positon of interest for conveting into units - [x,y]
physical_length = np.asarray(physical_length)
resoltion = np.asarray(resoltion)
positon = np.asarray(positon)
mid_pt_x = round(clip_domain[0]/2)
# redefine zero poitn to jet centre
positon[0] -= mid_pt_x
convert = physical_length/resoltion
return positon*convert
def side_pts_of_jet_dt(clipped_data, slice_height, DOMIAN, shape):
# gets the side points of the jet at spefified height for td plot
# clipped_data - data set to scan
# slice_height - phyiscal value of where to take slice in x dir
# DOMIAN - phyiscal length of whole domain (before clipping)
# shape - shape of whole domain before clipping
clip_domain = np.shape(clipped_data)
# pick up the sides of jets
xslice_idex = dis_2_grid(slice_height, DOMIAN[1], shape[1])
x_slice = clipped_data[:, xslice_idex]
if np.sum(x_slice) == 0:
jet_sides_index1 = None
jet_sides_index2 = None
side_values1 = None
side_values2 = None
else:
indexs_x = np.nonzero(x_slice)
jet_sides_index1 = [min(indexs_x[0]), xslice_idex]
jet_sides_index2 = [max(indexs_x[0]), xslice_idex]
side_values1 = grid_2_dis(DOMIAN, shape, clip_domain, jet_sides_index1)
side_values2 = grid_2_dis(DOMIAN, shape, clip_domain, jet_sides_index2)
return jet_sides_index1, jet_sides_index2, side_values1, side_values2
def side_pts(clipped_data, slice_height, DOMIAN, shape):
# gets the side points of the jet at spefified height for td plot
# clipped_data - data set to scan
# slice_height - index value of where to take slice in x dir
# DOMIAN - phyiscal length of whole domain (before clipping)
# shape - shape of whole domain before clipping
clip_domain = np.shape(clipped_data)
x_slice = clipped_data[:, slice_height]
if np.sum(x_slice) == 0:
jet_sides_index1 = None
jet_sides_index2 = None
side_values1 = None
side_values2 = None
else:
indexs_x = np.nonzero(x_slice)
jet_sides_index1 = [min(indexs_x[0]), slice_height]
jet_sides_index2 = [max(indexs_x[0]), slice_height]
side_values1 = grid_2_dis(DOMIAN, shape, clip_domain, jet_sides_index1)
side_values2 = grid_2_dis(DOMIAN, shape, clip_domain, jet_sides_index2)
return jet_sides_index1, jet_sides_index2, side_values1, side_values2
def side_pts_of_jet(clipped_data, jet_height, nb_pts, DOMIAN, shape):
# gets multiple pts on jet side at one instance of time
# clipped_data - data set to scan
# jet_height - index value of the jet height
# DOMIAN - phyiscal length of whole domain (before clipping)
# shape - shape of whole domain before clipping
indexs_of_slices = np.linspace(0, jet_height[1], nb_pts, dtype=int)
js_idx_x = []
js_val_x = []
js_idx_y = []
js_val_y = []
for idx in indexs_of_slices:
dumvar1,dumvar2,dumvar3,dumvar4 = side_pts(clipped_data, idx, DOMIAN, shape)
# restructiong data
# all x values
js_idx_x.append(dumvar1[0])
js_idx_x.append(dumvar2[0])
js_val_x.append(dumvar3[0])
js_val_x.append(dumvar4[0])
# all y data
js_idx_y.append(dumvar1[1])
js_idx_y.append(dumvar2[1])
js_val_y.append(dumvar3[1])
js_val_y.append(dumvar4[1])
return js_idx_x, js_idx_y, js_val_x, js_idx_y
def data_slice(data, xranges, yranges, yh_index):
# will produce slices for dt plotting
clipped_data = data[xranges[0]:xranges[-1],
yranges[0]:yranges[-1]]
slice_data = clipped_data[:,yh_index]
return slice_data
def angle_cal(A,B):
# A & B are (x_n,y_n)
Y1 = max(A[1],B[1])
Y2 = min(A[1],B[1])
x1, x2 = A[0], B[0]
m = A-B
if m[1]/m[0] < 0:
theta = -np.arccos((Y1-Y2)/(np.sqrt((x1-x2)**2+(Y1-Y2)**2)))+2*np.pi
else:
theta = np.arccos((Y1-Y2)/(np.sqrt((x1-x2)**2+(Y1-Y2)**2)))
return theta
def func(m, x, c):
return m*x + c
def LoBf(xy_data):
# reutrns a linear line of best fit
x = xy_data[:,0]
y = xy_data[:,1]
# best fit will only return single val, crashin angle func
if min(x)-max(x) == 0:
angle=0
else:
xline = np.linspace(min(x),max(x),100)
popt, pcov = curve_fit(func, x, y)
yline = func(xline, *popt)
if max(yline)-min(yline) < 1e-6:
angle = 0
else:
start_pt, end_pt = np.asarray((xline[0], yline[0])), \
np.asarray((xline[-1], yline[-1]))
angle = angle_cal(start_pt, end_pt)
# print(x, y, yline, start_pt, end_pt)
# print('angle', angle)
return angle
def vec_angle(A,B,C):
a = B-A
b = C-B
unit_vec_A = a/np.linalg.norm(a)
unit_vec_C = b/np.linalg.norm(b)
# print(A,B,C, a, b, np.linalg.norm(a),np.linalg.norm(b))
dot_product = np.dot(unit_vec_A, unit_vec_C)
theta = np.arccos(dot_product)
# print(dot_product, np.arccos(dot_product), math.degrees(np.arccos(dot_product)))
return theta
def distance_cal(A, B):
# A = (x1, y1)
# B = (x2, y2)
# print('NUMBERS!!')
# print(A[0]-B[0], B[1]-B[1],(A[0]-B[0])**2+(B[1]-B[1])**2)
return np.sqrt((A[0]-B[0])**2+(A[1]-B[1])**2)
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
path_2_shared_drive = '/run/user/1001/gvfs/smb-share:server=uosfstore.shef.ac.uk,share=shared/mhd_jet1/User/smp16fm/j'
dir_paths = glob.glob('../T/P300/B60/A60/T30*')
#dir_paths = glob.glob('../T/P300/B80/A80/T5*')
#dir_paths = glob.glob('../T/P300/B60/A60/T15*')
#dir_paths = dir_paths[-8:]
#dir_paths = [dir_paths[-7]]
#dir_paths = dir_paths[-8:-4]
#dir_paths = glob.glob('../T/P*/B50/A*/T*')
#dir_paths = glob.glob('../2D/P*/T*')
#dir_paths = glob.glob('../hight_dt/P*/B*/A*')
#dir_paths = [dir_paths[-3]]
# constants
unit_length = 1e9 # cm
DOMIAN = [5*unit_length, 3*unit_length]
unit_temperature = 1e6 # K
unit_numberdensity = 1e9 # cm^-3
g_cm3_to_kg_m3 = 1e3
dyne_cm2_to_Pa = 1e-1
cm_to_km = 1e-5
cm_to_Mm = 1e-8
s_to_min = 1/60
unit_density = 2.3416704877999998E-015
unit_velocity = 11645084.295622544
unit_pressure = 0.31754922400000002
unit_magenticfield = 1.9976088799077159
unit_time = unit_length/unit_velocity
dt = unit_time/20
unit_mass = unit_density*unit_length**3
unit_specific_energy = (unit_length/unit_time)**2
# otpions
testing = True
plotting_on = False
data_save = False
# NOTE: if name already there it will append to file
max_h_data_fname = 'max_h_data_sj_p2.dat'
big_data_fname = 'big_data_set_sj_p2.dat'
td_plotting = False
td_plot_root_folder = 'td_plots_data_sj/'
td_file_name = 'data.csv'
stop_height_condition = 1e8 #5e6 # 5 cells high
stop_indx_condition = 120 # 24 # ~50s which is min driver time
thresh_hold = 20 #0.4
central_axis_tracking = True
c_data_root = 'c_data/'
central_axis_step_size = 0.1 # Mm
# abdandon methods
method_1 = False
method_2 = False
# chosen method
method_3 = True
method_4 = False
x_pad = 1/2 # Mm
y_pad = 0.75/2 # Mm
dummy_dis = 0
#t0 = 1
#t1 = 80
#nb_steps = 80
t0 = 10
t1 = 20
nb_steps = 2
xres = 4096
yres = 2944
physical_grid_size_xy = DOMIAN/np.array([xres, yres])*cm_to_Mm
peak_hi = 0
big_data = []
data = []
big_data_indexs = []
for path in dir_paths:
FIRST = True
data_c_first = True
JL_data_first = True
h_check = 0
path_parts = path.split('/')
path_parts = path_parts[2:]
path_numerics = np.zeros(len(path_parts))
for j, item in enumerate(path_parts):
# print(item)
path_numerics[j] = float(item[1:])
#old style
# full_paths = glob.glob(path+'/jet_'+path_parts[1]+'_'+path_parts[-1]+'_*.vtu')
full_paths = glob.glob(path + '/jet_'+path_parts[0] + '_'+path_parts[1] +
'_' + path_parts[2] + '_' + path_parts[3] +
'_*.vtu')
# skip first step as no value
# full_paths = full_paths[1:]
# testing breaks in code
full_paths = full_paths[68:69]
# full_paths = full_paths[36:40]
sub_data_1 = []
sub_data_2 = []
physical_time = []
for ind, path_element in enumerate(full_paths):
Full_path = path_2_shared_drive + path_element[2:-8]
ti = int(path_element[-8:-4])
# Reading vtu file, allows to set custom grid poitns
ds0 = apt.load_vtkfile(ti, file=Full_path, type='vtu')
data0 = apt.vtkfiles.rgplot(ds0.trp1, data=ds0, cmap='hot')
plt.close()
var_tr_data, x_grid0, y_grid0 = data0.get_data(xres=xres, yres=yres)
bin_data = np.where(var_tr_data < 15, 0, 1)
# dims in [y,x]
shape = np.shape(bin_data)
# This mid point doesnt corospond to jet centre
if FIRST == True:
# These don't work for first time step
indexs_x = np.nonzero(bin_data[:, 0])[0]
mid_pt_x = int(round((min(indexs_x) + (max(indexs_x) -
min(indexs_x))/2)))
clip_range_x = round(0.1*shape[0])
scan_range_x = [mid_pt_x-clip_range_x, mid_pt_x+clip_range_x]
mid_pt_y = round(shape[1]/2)
clip_range_y = round(0.2*shape[1])
scan_range_y = [0, mid_pt_y+clip_range_y]
x_extent = np.asarray(scan_range_x)*physical_grid_size_xy[0] - \
2.547205e+09*cm_to_Mm
cf = sum(x_extent)/2
x_extent -= cf
y_extent = np.asarray(scan_range_y)*physical_grid_size_xy[1]
x_pad_dex_size = int(np.ceil(x_pad/physical_grid_size_xy[0]))
y_pad_dex_size = int(np.ceil(y_pad//physical_grid_size_xy[0]))
FIRST = False
# clips data around jet
sorted_data = bin_data[scan_range_x[0]:scan_range_x[-1],
scan_range_y[0]:scan_range_y[-1]]
# All indexs that belong to jet bc
indexs = np.nonzero(sorted_data)
# index for top of jet
jet_top_index = np.argmax(indexs[1])
jet_top_pixel_pos = [indexs[0][jet_top_index],
indexs[1][jet_top_index]]
# need to fix x postion as its zero point is not at jet centre
values = grid_2_dis(DOMIAN, shape, np.shape(sorted_data),
jet_top_pixel_pos)
# top
height_x = values[0]*cm_to_Mm
height_y = values[1]*cm_to_Mm
physical_time = ti*dt
sub_data_1.append((physical_time, height_y))
# stops the loop for creating data
if (height_y < stop_height_condition*cm_to_Mm and
ind > stop_indx_condition):
break
# plot side values every 1 Mm interval
for hi in range(1, int(np.floor(height_y))+1):
# print(hi, h_check, h_check-hi, h_check-hi<0)
if h_check-hi < 0:
td_first = True
h_check = hi
if hi > peak_hi:
peak_hi = hi
slice_height = hi/cm_to_Mm
jet_sides_index1, jet_sides_index2, val1, val2 = side_pts_of_jet_dt(sorted_data, slice_height, DOMIAN, shape)
if jet_sides_index1 == None:
pass
else:
dis_x = (val2[0]-val1[0])*cm_to_km
dis_y = slice_height*cm_to_Mm
side_xL = val1[0]*cm_to_km
side_xR = val2[0]*cm_to_km
# side_y.append(slice_height*cm_to_Mm)
# side_y.append(slice_height*cm_to_Mm)
side_time = ti*dt
sub_data_2.append((side_time, dis_x, dis_y,
side_xL, side_xR))
# This wont work for tilted jets or any asymetries, its a quick fix
if testing == True:
plt.scatter((jet_sides_index1[0]+scan_range_x[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf,
jet_sides_index1[1]*physical_grid_size_xy[1],
s=40, color='blue')
plt.scatter((jet_sides_index2[0]+scan_range_x[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf,
jet_sides_index2[1]*physical_grid_size_xy[1],
s=40, color='blue')
if td_plotting == True:
td_save_path = td_plot_root_folder + \
full_paths[ind].split('/')[-1][:-9] + \
'/'+str(hi)+'Mm'
Path(td_save_path).mkdir(parents=True, exist_ok=True)
height_km = dis_y
height_index = jet_sides_index2[1]
datarho = apt.vtkfiles.rgplot(ds0.rho, data=ds0,
cmap='hot')
plt.close()
datarho, dummy_x, dummy_y = datarho.get_data(xres=xres,
yres=yres)
rho_slice = data_slice(datarho, scan_range_x, scan_range_y,
height_index)*g_cm3_to_kg_m3
datarho = []
dataTe = apt.vtkfiles.rgplot(ds0.T, data=ds0, cmap='hot')
plt.close()
dataTe, dummy_x, dummy_y = dataTe.get_data(xres=xres,
yres=yres)
Te_slice = data_slice(dataTe, scan_range_x, scan_range_y,
height_index)
dataTe = []
dataVx = apt.vtkfiles.rgplot(ds0.v1, data=ds0, cmap='hot')
plt.close()
dataVx, dummy_x, dummy_y = dataVx.get_data(xres=xres,
yres=yres)
Vx_slice = data_slice(dataVx, scan_range_x, scan_range_y,
height_index)*cm_to_km
dataVx = []
dataVy = apt.vtkfiles.rgplot(ds0.v2, data=ds0, cmap='hot')
dataVy, dummy_x, dummy_y = dataVy.get_data(xres=xres,
yres=yres)
plt.close()
Vy_slice = data_slice(dataVy, scan_range_x, scan_range_y,
height_index)*cm_to_km
dataVy = []
td_xvales = [scan_range_x[0]*physical_grid_size_xy[0] -
2.547205e+09*cm_to_Mm - cf,
scan_range_x[1]*physical_grid_size_xy[0] -
2.547205e+09*cm_to_Mm-cf]
td_xranges = np.linspace(td_xvales[0], td_xvales[1],
len(Vy_slice))
df_td = pd.DataFrame(np.transpose(np.array([np.ones(len(Vy_slice))*ti*dt, td_xranges, rho_slice, Te_slice, Vx_slice, Vy_slice])),
columns=['time [s]', 'x [Mm]', 'density [kg m-3]', 'Te [k]', 'vx [km s-1]', 'vy [km s-1]'])
if td_first == True:
# print('writting')
df_td.to_csv(td_save_path+'/'+td_file_name,
index=False,
columns=['time [s]', 'x [Mm]',
'density [kg m-3]',
'Te [k]', 'vx [km s-1]',
'vy [km s-1]'])
td_first = False
else:
df_td.to_csv(td_save_path+'/'+td_file_name, mode='a',
index=False, header=None)
# test putting side tracking here
# need to add more points to using above
if central_axis_tracking == True:
if height_y < 1:
pass
else:
# +1 ensures endpts remain
nb_step = int((height_y/central_axis_step_size)+1)
hi_locs = np.linspace(0, height_y, nb_step,
endpoint=True)/cm_to_Mm
central_pts = []
central_sides = []
for c_pts in hi_locs:
cjet_sides_index1, cjet_sides_index2, cval1, cval2 = side_pts_of_jet_dt(sorted_data, c_pts, DOMIAN, shape)
if cjet_sides_index1 is not None:
central_sides.append((cjet_sides_index1,
cjet_sides_index2))
central_pts.append(np.add(cjet_sides_index1,
cjet_sides_index2)//2)
else:
print('Cenrtal axis pt missed')
continue
central_pts = np.reshape(central_pts, np.shape(central_pts))
if testing == True:
# plt.plot(central_pts[:,0], central_pts[:,1])
plt.scatter((central_pts[:, 0]+scan_range_x[0])*physical_grid_size_xy[0]
-2.547205e+09*cm_to_Mm-cf,
central_pts[:, 1]*physical_grid_size_xy[1],
s=40, color='yellow', marker="*", zorder=3)
# need to calc length of jet
central_pts_phy = np.zeros(np.shape(central_pts))
central_pts_phy[:, :1] = (central_pts[:, :1]+scan_range_x[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf
central_pts_phy[:, -1:] = central_pts[:, -1:]*physical_grid_size_xy[1]
# vectorization of dis_Cal func
p2p_dis = np.sqrt(np.sum((central_pts_phy[:-1]-central_pts_phy[1:])**2,
axis=1))
p2p_dis_array = np.zeros([np.size(p2p_dis), 3])
p2p_dis_array[:, 1:] = central_pts_phy[1:]
for i in range(1, len(p2p_dis)):
p2p_dis_array[i, 0] = sum(p2p_dis[:i+1])
# jet_length = sum(p2p_dis)
jet_length = p2p_dis_array[-1][0]
# print(jet_length)
if data_save == True:
df_JL_data = pd.DataFrame([[jet_length, physical_time]],
columns=['Jet length [Mm]',
'Time [s]'])
if JL_data_first:
data_c_save_path = c_data_root+full_paths[ind].split('/')[-1][:-9]
Path(data_c_save_path).mkdir(parents=True,
exist_ok=True)
df_JL_data.to_csv(data_c_save_path+'/'+full_paths[ind].split('/')[-1][:-9]+'_'+'df_jl.csv',
index = False,
columns=['Jet length [Mm]',
'Time [s]'])
JL_data_first = False
else:
df_JL_data.to_csv(data_c_save_path+'/'+full_paths[ind].split('/')[-1][:-9]+'_'+'df_jl.csv',
mode='a', index=False, header=None)
#-------------------------------------------
if method_1 == True:
# trying method of avg angles
for hi_indx in range(1,len(central_pts)-1):
p1,p2 = angle_cal(central_pts[hi_indx-1],
central_pts[hi_indx]), \
angle_cal(central_pts[hi_indx],
central_pts[hi_indx+1])
perp_avg_tilt = np.mean([p1,p2])-np.pi/2
m_grad = 1/np.tan(perp_avg_tilt)
const = central_pts[hi_indx][1] - \
m_grad*central_pts[hi_indx][0]
x_slit = np.linspace(0, clip_range_x*2, 50)
line = m_grad*x_slit+const
if testing == True:
plt.plot((x_slit+scan_range_x[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf,
line*physical_grid_size_xy[1], 'r-')
# ------------------------------------------------------
# method 2: middle angles
if method_2 == True:
for hi_indx in range(1, len(central_pts)-1):
vec_A = central_pts[hi_indx-1]
vec_B = central_pts[hi_indx]
vec_C = central_pts[hi_indx+1]
vec_ang = vec_angle(vec_A, vec_B, vec_C)
width_angle = vec_ang/2
m_grad = np.tan(width_angle)
const = vec_B[1]-m_grad*vec_B[0]
x_slit = np.linspace(0, clip_range_x*2, 50)
line = m_grad*x_slit+const
if testing == True:
plt.plot((x_slit+scan_range_x[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf,
line*physical_grid_size_xy[1], 'b--')
# ------------------------------------------------------
# method 3: top angles
if method_3 == True:
for hi_indx in range(1, int(np.floor(height_y))+1):
current_x_pad_dex_size = x_pad_dex_size
current_y_pad_dex_size = y_pad_dex_size
# +1 matches it with central_pts as 1 element is lost with dis calc
c_index = np.argmin(abs(p2p_dis_array[:, 0]-hi_indx))+1
# if value fall at top of arry angle cant be calc
if c_index+1 >= len(p2p_dis_array):
pass
else:
p1 = angle_cal(central_pts[c_index], central_pts[c_index+1])
perp_avg_tilt = p1-np.pi/2
m_grad = 1/np.tan(perp_avg_tilt)
# current method
const = central_pts[c_index][1] - \
m_grad*central_pts[c_index][0]
z_line_switches = [0]
# makes sure that more than 1 edge is detected
while_count = 0
while sum(np.abs(z_line_switches)) < 2:
print(while_count)
while_count += 1
# defines search region
x_search = (central_sides[c_index][0][0] -
current_x_pad_dex_size,
central_sides[c_index][1][0] +
current_x_pad_dex_size)
y_search = (central_sides[c_index][0][1] -
current_y_pad_dex_size,
central_sides[c_index][0][1] +
current_y_pad_dex_size)
# grid in phy units
points = np.array((y_grid0[scan_range_x[0]+x_search[0]:scan_range_x[0]+x_search[1],
y_search[0]:y_search[1]].flatten(),
x_grid0[scan_range_x[0]+x_search[0]:scan_range_x[0]+x_search[1],
y_search[0]:y_search[1]].flatten())).T*cm_to_Mm
values = (bin_data[scan_range_x[0]+x_search[0]:scan_range_x[0]+x_search[1],
y_search[0]:y_search[1]]).flatten()
line_dis_phy = np.sqrt(((x_search[0]-x_search[-1])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf)**2+((y_search[0]-y_search[-1])*physical_grid_size_xy[1])**2)
nb_pts_for_line = int(line_dis_phy//0.05)
x_slit = np.linspace(x_search[0], x_search[1],
nb_pts_for_line)
x_slit_phy = (x_slit+scan_range_x[0])*physical_grid_size_xy[0] - \
2.547205e+09*cm_to_Mm-cf
line = m_grad*x_slit+const
line_phy = line*physical_grid_size_xy[1]
xi = np.array(list(zip(line_phy, x_slit_phy)))
z_line_vale = griddata(points, values, xi)
# z_line_vale = z_line_vale[~np.isnan(z_line_vale)]
z_line_vale[np.where(np.isnan(z_line_vale))] = 0
z_line_vale = np.where(z_line_vale < 1, 0, 1)
z_line_switches = np.diff(z_line_vale)
# expand search area
if sum(np.abs(z_line_switches)) < 2:
# print('while not broken', sum(np.abs(z_line_switches)))
current_x_pad_dex_size += 5
current_y_pad_dex_size += 5
continue
# print('while will be broken', sum(np.abs(z_line_switches)))
# make sure only 2 pts are sleceted
LR_edge_fix = np.argwhere(abs(z_line_switches) > 0)
LR_edge_fix_index = [np.min(LR_edge_fix), np.max(LR_edge_fix)]
spatial_locs_widths = xi[LR_edge_fix_index]
# old method resultes in > 2pts
# z_line_side_indexs = np.argwhere(abs(z_line_switches)>0)
# spatial_locs_widths = xi[z_line_side_indexs][:,0]
# xi = [[y1,x1],[y2,x2]]
# Will be give Mm
tilt_widths = distance_cal(spatial_locs_widths[0],
spatial_locs_widths[1])
data_c = np.asarray((float(path_parts[0][1:]),
float(path_parts[1][1:]),
float(path_parts[2][1:]),
float(path_parts[3][1:]),
p2p_dis_array[c_index][0],
p2p_dis_array[c_index][-1],
tilt_widths,
physical_time,
p2p_dis_array[-1][0],
p2p_dis_array[-1][-1]))
if data_save == True:
df_dc = pd.DataFrame([data_c],
columns=['Driver time [s]',
'Magnetic field strength [B]',
'Amplitude [km/s]',
'Tilt angle [degree]',
'Jet length [Mm]',
'Jet height [Mm]',
'Jet width [Mm]',
'Time [s]',
'Max len [Mm]',
'Max height [Mm]'])
if data_c_first == True:
# print('writting')
data_c_save_path = c_data_root+full_paths[ind].split('/')[-1][:-9]
Path(data_c_save_path).mkdir(parents=True, exist_ok=True)
df_dc.to_csv(data_c_save_path+'/'+full_paths[ind].split('/')[-1][:-9]+'_'+td_file_name,
index=False, columns=['Driver time [s]',
'Magnetic field strength [B]',
'Amplitude [km/s]',
'Tilt angle [degree]',
'Jet length [Mm]',
'Jet height [Mm]',
'Jet width [Mm]',
'Time [s]',
'Max len [Mm]',
'Max height [Mm]'])
data_c_first = False
else:
df_dc.to_csv(data_c_save_path+'/'+full_paths[ind].split('/')[-1][:-9]+'_'+td_file_name,
mode='a', index=False, header=None)
if testing == True:
# Physical grid checking
# Issue with grid aligment due to how yt written data, most likely cause by the sterech grids.
# width are correctly measure but are shift leftward due to difference in physical value for index pts of the grid and line
extra_cf = (x_grid0[:,0][scan_range_x[0]+x_search[0]])*cm_to_Mm-min(x_slit_phy)
plt.scatter(spatial_locs_widths[:,1:]-extra_cf,spatial_locs_widths[:,:-1], color='red', marker='s', zorder=2)
# plt.plot((x_slit+scan_range_x[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf,line*physical_grid_size_xy[1], 'g-o')
cmap = 'gray'
# plt.imshow(np.rot90(var_tr_data[scan_range_x[0]:scan_range_x[-1], scan_range_y[0]:scan_range_y[-1]]), cmap=cmap, extent = [x_extent[0], x_extent[1], y_extent[0],y_extent[1]])
# plt.plot(x_slit_phy,line_phy, 'g-', zorder=1)
# test to purely size slice area
plt.plot(x_slit_phy-extra_cf,line_phy, 'g-', zorder=1)
plt.imshow(np.rot90(bin_data[scan_range_x[0]:scan_range_x[-1], scan_range_y[0]:scan_range_y[-1]]),
cmap=cmap, extent=[x_extent[0], x_extent[1], y_extent[0],y_extent[1]])
plt.imshow(np.rot90(bin_data[scan_range_x[0]+x_search[0]:scan_range_x[0]+x_search[1], y_search[0]:y_search[1]]),
cmap='Spectral', extent=[(scan_range_x[0]+x_search[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf,
(scan_range_x[0]+x_search[1])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf,
y_search[0]*physical_grid_size_xy[1],y_search[1]*physical_grid_size_xy[1]])
#
plt.xlim(-1,1)
plt.ylim(0,2)
plt.xlabel('Y [Mm]')
plt.ylabel('X [Mm]')
plt.savefig('sharc_run/fig_for_paper/example_of_tilt_jet_code.png', dpi=200, bbox_inches='tight')
plt.show()
if method_4 == True:
for hi_indx in range(1,int(np.floor(height_y))+1):
current_x_pad_dex_size = x_pad_dex_size
current_y_pad_dex_size = y_pad_dex_size
# +1 matches it with central_pts as 1 element is lost with dis calc
c_index = np.argmin(abs(p2p_dis_array[:,0]-hi_indx))+1
pts_of_influence = 3 # need to be moved but here for convenice
if (c_index + pts_of_influence >= len(p2p_dis_array)) and (c_index-pts_of_influence<0):
pass
else:
p1 = LoBf(central_pts[c_index-pts_of_influence:c_index+pts_of_influence])
perp_avg_tilt = p1-np.pi/2
m_grad = 1/np.tan(perp_avg_tilt)
# current method
const = central_pts[c_index][1]-m_grad*central_pts[c_index][0]
z_line_switches = [0]
# makes sure that more than 1 edge is detected
while_count = 0
while sum(np.abs(z_line_switches)) < 2:
print(while_count)
while_count += 1
# defines search region
x_search = (central_sides[c_index][0][0]-current_x_pad_dex_size,
central_sides[c_index][1][0]+current_x_pad_dex_size)
y_search = (central_sides[c_index][0][1]-current_y_pad_dex_size,
central_sides[c_index][0][1]+current_y_pad_dex_size)
# grid in phy units
points = np.array((y_grid0[scan_range_x[0]+x_search[0]:scan_range_x[0]+x_search[1],
y_search[0]:y_search[1]].flatten(),
x_grid0[scan_range_x[0]+x_search[0]:scan_range_x[0]+x_search[1],
y_search[0]:y_search[1]].flatten())).T*cm_to_Mm
values = (bin_data[scan_range_x[0]+x_search[0]:scan_range_x[0]+x_search[1],
y_search[0]:y_search[1]]).flatten()
line_dis_phy = np.sqrt(((x_search[0]-x_search[-1])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf)**2+((y_search[0]-y_search[-1])*physical_grid_size_xy[1])**2)
nb_pts_for_line = int(line_dis_phy//0.05)
x_slit = np.linspace(x_search[0], x_search[1],
nb_pts_for_line)
x_slit_phy = (x_slit+scan_range_x[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf
line = m_grad*x_slit + const
line_phy = line*physical_grid_size_xy[1]
xi = np.array(list(zip(line_phy, x_slit_phy)))
z_line_vale = griddata(points, values, xi)
z_line_vale[np.where(np.isnan(z_line_vale))] = 0
z_line_vale = np.where(z_line_vale<1, 0, 1)
z_line_switches = np.diff(z_line_vale)
# expand search area
if sum(np.abs(z_line_switches)) < 2:
# print('while not broken', sum(np.abs(z_line_switches)))
current_x_pad_dex_size += 5
current_y_pad_dex_size += 5
continue
# print('while will be broken', sum(np.abs(z_line_switches)))
# make sure only 2 pts are sleceted
LR_edge_fix = np.argwhere(abs(z_line_switches) > 0)
LR_edge_fix_index = [np.min(LR_edge_fix), np.max(LR_edge_fix)]
spatial_locs_widths = xi[LR_edge_fix_index]
# Will be give Mm
tilt_widths = distance_cal(spatial_locs_widths[0],
spatial_locs_widths[1])
if testing == True:
# Physical grid checking
# Issue with grid aligment due to how yt written data, most likely cause by the sterech grids.
# width are correctly measure but are shift leftward due to difference in physical value for index pts of the grid and line
extra_cf = (x_grid0[:,0][scan_range_x[0]+x_search[0]])*cm_to_Mm-min(x_slit_phy)
plt.scatter(spatial_locs_widths[:,1:]-extra_cf,spatial_locs_widths[:,:-1], color='pink', marker='P', zorder=2)
# test to purely size slice area
plt.plot(x_slit_phy-extra_cf, line_phy, 'c:', zorder=2)
if testing == True:
# testing
cmap = 'gray'
plt.scatter((jet_top_pixel_pos[0]+scan_range_x[0])*physical_grid_size_xy[0]-2.547205e+09*cm_to_Mm-cf,
jet_top_pixel_pos[1]*physical_grid_size_xy[1], s=40, color='red')
# image
# plt.imshow(sorted_data, cmap=cmap)
plt.imshow(np.rot90(var_tr_data[scan_range_x[0]:scan_range_x[-1], scan_range_y[0]:scan_range_y[-1]]),
cmap=cmap, extent=[x_extent[0], x_extent[1],
y_extent[0], y_extent[1]])
plt.xlim(-1, 1)
# plt.xlim(-1,1)
plt.ylim(0, 8)
plt.gca().set_aspect(0.5, adjustable='box')
plt.xlabel('x (Mm)')
plt.ylabel('y (Mm)')
plt.colorbar()
plt.show()
#saves testing image
# Path('image_check/'+full_paths[ind].split('/')[-1][:-9]).mkdir(parents=True, exist_ok=True)
# plt.savefig('image_check/'+full_paths[ind].split('/')[-1][:-9]+'/jet_P'+str(int(path_numerics[0]))+'_B' +
# str(int(path_numerics[1])) +
# 'A_' + str(int(path_numerics[2])) +
# 'T_'+str(ti).zfill(4) + '.png',
# format='png', dpi=500)
# plt.clf()
# data frame to nest data in
df_sub1 = (pd.DataFrame(sub_data_1, columns=['time [s]', 'Height [Mm]'],
index = [i for i in range(len(sub_data_1))]))
df_sub2 = (pd.DataFrame(sub_data_2,
columns=['side time [s]', 'jet Width [km]',
'height [Mm]', 'jet side left [km]',
'jet side right [km]'],
index = [i for i in range(len(sub_data_2))]))
# big_data.append(pd.concat([df_sub1, df_sub2], axis=1))
# big_data_indexs.append(path_numerics.astype(int)) # first data set
# data.append(np.hstack([path_numerics,max(sub_data_1, key=lambda x: float(x[1]))[1]]))
# df = pd.DataFrame(data, columns=['driver time [s]',
# 'magnetic field strength [G]',
# 'amplitude [km s-1]',
# 'Tilt [deg]',
# 'max height [Mm]'],
# index = [i for i in range(np.shape(data)[0])])
big_data = pd.concat([df_sub1, df_sub2], axis=1)
big_data_indexs = path_numerics.astype(int) # first data set
df_collect = pd.DataFrame([{'idx': big_data_indexs, 'dfs': big_data}])
data = np.hstack([path_numerics, max(sub_data_1, key=lambda x: float(x[1]))[1]])
df = pd.DataFrame([data], columns=['driver time [s]',
'magnetic field strength [G]',
'amplitude [km s-1]',
'Tilt [deg]',
'max height [Mm]'])
if data_save == True:
if os.path.exists(max_h_data_fname):
data_max_h_t0 = pd.read_pickle(max_h_data_fname)
# print('I add to file ' + max_h_data_fname + '!!!!!!!!!!' )
dummy_max_h = data_max_h_t0.append(df, ignore_index=True)
dummy_max_h.to_pickle(max_h_data_fname)
else:
# print('I made file ' + max_h_data_fname + '!!!!!!!!!!')
df.to_pickle(max_h_data_fname)
if os.path.exists(big_data_fname):
big_data_t0 =
|
pd.read_pickle(big_data_fname)
|
pandas.read_pickle
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 00:13:06 2020
@author: sahand
"""
from rake_nltk import Rake
import pandas as pd
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
st = set(stopwords.words('english'))
path = '/home/sahand/GoogleDrive/Data/'
# data_address = path+"Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/1990-2019/1990-2019 abstract_title"
data_address = path+"Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/1990-2019/1990-2019 abstract_title"
df1 = pd.read_csv(data_address,names=['abstract'])
labels = pd.read_csv(path+'Corpus/AI 4k/embeddings/clustering/k10/Doc2Vec patent_wos_ai corpus DEC 200,500,10 k10 labels')
df1['label'] = labels['label']
corpus = []
for cluster in df1.groupby('label').groups:
corpus.append( ' '.join(df1[df1['label']==cluster]['abstract'].values.tolist()))
# =============================================================================
# TFIDF
# =============================================================================
all_keys = []
all_keyscores = []
for cor in corpus:
text = cor
text = text.replace('.',' ')
text = re.sub(r'\s+',' ',re.sub(r'[^\w \s]','',text) ).lower()
corpus_n = re.split('chapter \d+',text)
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(corpus_n)
names = vectorizer.get_feature_names()
data = vectors.todense().tolist()
# Create a dataframe with the results
df = pd.DataFrame(data, columns=names)
df = df[filter(lambda x: x not in list(st) , df.columns)]
N = 10;
keys = []
keyscores = []
for i in df.iterrows():
keyscores.append(i[1].sort_values(ascending=False)[:N].values.tolist())
keys.append(list(i[1].sort_values(ascending=False)[:N].index))
all_keys.append(keys)
all_keyscores.append(keyscores)
all_keys_df = pd.DataFrame(np.array(all_keys).squeeze())
all_keys_df.to_csv()
# =============================================================================
# Rake -- won't work with long text
# =============================================================================
r = Rake()
r.extract_keywords_from_text(corpus[0])
r.get_ranked_phrases()
#%%# ==========================================================================
# From taxonomy
#
# The issue is that, too many possible words are out there in kw list. Like, "mind" or "eye". These are correct. But out of context.
# We have to bee too specific if we wamt to rely om author kerywords, like the kw from 4 AI journals.
# =============================================================================
import numpy as np
import pandas as pd
import re
from tqdm import tqdm
from sciosci.assets import text_assets as kw
from sciosci.assets import keyword_dictionaries as kd
from gensim.parsing.preprocessing import strip_multiple_whitespaces
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from multiprocessing import Pool
range_s,range_e = 400000,480000
stops = ['a','an','we','result','however','yet','since','previously','although','propose','proposed','this','...']
stop_words = list(set(stopwords.words("english")))+stops
path = '/home/sahand/GoogleDrive/Data/'
keywords = list(set(pd.read_csv(path+'Corpus/Taxonomy/TAI Taxonomy.csv',sep='===',names=['keyword'])['keyword'].values.tolist()))
keywords = keywords+list(set(pd.read_csv(path+'Corpus/Taxonomy/AI ALL Scopus n>2')['keywords'].values.tolist()))
keywords = keywords+list(set(pd.read_csv(path+'Corpus/Taxonomy/CSO.3.3-taxonomy.csv')['keywords'].values.tolist()))
# lemmatize
keywords = [kw.string_pre_processing(x,stemming_method='None',lemmatization='DEF',stop_word_removal=True,stop_words_extra=stops,verbose=False,download_nltk=False) for x in tqdm(keywords)]
keywords = [x for x in tqdm(keywords) if len(x)>2]
keywords = [kw.replace_british_american(strip_multiple_whitespaces(kw.replace_british_american(strip_multiple_whitespaces(keyword),kd.gb2us)),kd.gb2us) for keyword in tqdm(keywords)]
keywords = [k.strip().lower() for k in tqdm(keywords)]
keywords = np.array(keywords)
# pub_idx = pd.read_csv(path+'Corpus/Dimensions AI unlimited citations/clean/publication idx')[:]
abstracts = pd.read_csv(path+'Corpus/Dimensions All/clean/abstract_title method_b')[range_s:range_e]
idx = abstracts.index
pub_idx = abstracts[['id']]
abstracts = abstracts['abstract'].values.tolist()
tmp = []
for ab in tqdm(abstracts):
try:
tmp.append(kw.replace_british_american(strip_multiple_whitespaces(kw.replace_british_american(strip_multiple_whitespaces(ab),kd.gb2us)),kd.gb2us))
except:
tmp.append('')
abstracts = tmp
# abstracts = [kw.replace_british_american(strip_multiple_whitespaces(kw.replace_british_american(strip_multiple_whitespaces(ab),kd.gb2us)),kd.gb2us) for ab in tqdm(abstracts)]
# pd.DataFrame(keywords).to_csv(path+'Corpus/Taxonomy/AI kw merged US',index=False,header=False)
# =============================================================================
# abstract = word_tokenize(abstract)
# abstract = [word for word in abstract if not word in stop_words]
#
# extraction = [word for word in abstract if word in keywords]
# matches = [keyword in abstract for keyword in keywords]
# selection = keywords[matches]
# =============================================================================
abstracts_s = [strip_multiple_whitespaces(' '+(' '.join(re.split('( |,|\.|\!|\?|\(|\))',abstract)))+' ') for abstract in tqdm(abstracts)]
keywords_s = [' '+keyword+' ' for keyword in tqdm(keywords)]
def extract(abstracts,keywords):
pubkeywords = []
errors = []
for i,abstract in tqdm(enumerate(abstracts),total=len(abstracts)):
try:
pubkeywords.append([x for x in keywords if x in abstract])
except:
pubkeywords.append([])
errors.append(i)
print('errors:'+str(errors))
return pubkeywords,errors
extracted,errors = extract(abstracts_s,keywords_s)
extracted = list(extracted)
extracted_df = [str(list(row))[1:-1] for row in extracted]
extracted_df = pd.DataFrame(extracted_df)
extracted_df.index = idx
extracted_df['id'] = pub_idx
extracted_df.columns = ['kw','id']
extracted_df.to_csv(path+'Corpus/Dimensions All/clean/kw from taxonomy/keyword US p-'+str(int(range_s/80000)),header=True)
#%% ===========================================================================
# # concat multiple parts
# =============================================================================
import pandas as pd
import numpy as np
from tqdm import tqdm
tqdm.pandas()
path = '/home/sahand/GoogleDrive/Data/'
def retract_check(string):
try:
if string.startswith('retracted'):
return True
except:
pass
return False
abstracts =
|
pd.read_csv(path+'Corpus/Dimensions All/clean/abstract_title method_b')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ...pvtpy.black_oil import Pvt,Oil,Water,Gas
from scipy.optimize import root_scalar
from .inflow import OilInflow, GasInflow
from ...utils import intercept_curves
from typing import Union
## Incompressible pressure drop
def potential_energy_change(
z1:Union[int,float]=None,
z2=None,
delta_z=None,
length=None,
ge=1,
angle=None,
inc=None,
p1=0):
"""potential_energy_change [ Δp PE accounts for the pressure change due to the weight of the column of fluid (the hydrostatic head); it
will be zero for flow in a horizontal pipe.
In this equation, Δz is the difference in elevation between positions 1 and 2, with z increasing upward. θ
is defined as the angle between horizontal and the direction of flow. Thus, θ is +90° for upward, vertical
flow, 0° for horizontal flow, and –90° for downward flow in a vertical well (Figure 7-4). For flow in a
straight pipe of length L with flow direction θ,]
Parameters
----------
z1 : [type], optional
[description], by default None
z2 : [type], optional
[description], by default None
delta_z : [type], optional
[description], by default None
length : [type], optional
[description], by default None
ge : int, optional
[description], by default 1
angle : [type], optional
[description], by default None
inc : [type], optional
[description], by default None
p1 : int, optional
[description], by default 0
Returns
-------
[type]
[description]
"""
# Assert height difference types
if delta_z is None:
if length is None:
assert isinstance(z1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(z2,(float,int,np.ndarray,np.int64,np.float64)), f"{type(z1)} {type(z2)}"
z1 = np.atleast_1d(z1)
z2 = np.atleast_1d(z2)
#assert z1.shape == (1,) and z2.shape == (1,)
delta_z = z1-z2
else:
assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64))
length = np.atleast_1d(length)
#assert length.shape == (1,)
if angle is None:
assert isinstance(inc,(float,int,np.ndarray,np.int64,np.float64))
inc = np.atleast_1d(inc)
assert inc <= 90 and inc >= -90
sign = np.sign(inc)
angle = (90 - np.abs(inc)) * sign
else:
# Assert angle between -90 and 90
assert isinstance(angle,(float,int,np.ndarray,np.int64,np.float64))
angle = np.atleast_1d(angle)
assert angle <= 90 and angle >= -90
delta_z = length * np.sin(np.radians(angle))
else:
assert isinstance(delta_z,(float,int,np.ndarray,np.int64,np.float64))
delta_z = np.atleast_1d(delta_z)
#assert delta_z.shape == (1,)
#Assert ge be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0, f"{ge} {type(ge)} not allowed"
#Calculate Delta P
delta_p = 0.433 * ge * delta_z
#Calculate P2
p2 = p1 + delta_p
return delta_p, p2
def kinetic_energy_change(d1=None,d2=None, ge=1,rate=None,p1=0):
"""
Δp KE is the pressure drop resulting from a change in the velocity of the fluid between positions 1 and 2.
It will be zero for an incompressible fluid unless the cross-sectional area of the pipe is different at the
two positions of interest.
Petroleum Production Systems, Economides. Chapter 7 7.2.3.2. Δp KE, the Pressure Drop Due to Kinetic Energy Change. Page 172
"""
assert isinstance(d1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(d2,(float,int,np.ndarray,np.int64,np.float64))
d1 = np.atleast_1d(d1)
d2 = np.atleast_1d(d2)
#Assert Specifi Gravity be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0
ge = np.atleast_1d(ge)
# Rate in bbl/d
assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0
rate = np.atleast_1d(rate)
#Estimate Density in lb/ft3
rho = 62.4 * ge
#Estimate delta Pressure in psi
delta_p = 1.53e-8 * np.power(rate,2) * rho * ((1/np.power(d1,4))-(1/np.power(d2,4)))
p2 = p1 + delta_p
return delta_p, p2
def reynolds_number(rate,rho,d,mu):
"""
Reynolds Number where q is in bbl/d, ρ in lb m /ft 3 , D in in., and μ in cp.
"""
nre = (1.48 * rate * rho) / (d * mu)
return nre
def frictional_pressure_drop(
rate=None,
epsilon=0.001,
ge=1,
d=None,
mu=1,
length=None):
# Rate in bbl/d
assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0
rate = np.atleast_1d(rate)
# pipe relative roughness
assert isinstance(epsilon,(float,int,np.ndarray,np.int64,np.float64))
epsilon = np.atleast_1d(epsilon)
#Assert Specifi Gravity be positive
assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0
ge = np.atleast_1d(ge)
assert isinstance(d,(float,int,np.ndarray,np.int64,np.float64))
d = np.atleast_1d(d)
assert isinstance(mu,(float,int,np.ndarray,np.int64,np.float64))
mu = np.atleast_1d(mu)
assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64))
length = np.atleast_1d(length)
#Estimate Density in lb/ft3
rho = 62.4 * ge
#Reynolds Number
nre = reynolds_number(rate,rho,d,mu)
#Friction Factor
if nre == 0:
ff = 0
else:
ff = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)
#Velocity ft/s
u = (4*rate*5.615)/(np.pi*np.power(d/12,2)*86400)
delta_p = (2 * ff * rho * np.power(u,2) * length)/(32.17 * (d/12) * 144)
delta_p *= -1
return delta_p
def one_phase_pressure_profile(
p1=0,
ge=1,
epsilon=0.001,
md=None,
tvd=None,
d = None,
rate = None,
mu=None,
backwards=1
):
assert isinstance(md,(int,float,list,np.ndarray))
md = np.atleast_1d(md)
if tvd is None:
tvd = md
else:
assert isinstance(tvd,(int,float,list,np.ndarray))
tvd = np.atleast_1d(tvd)
assert isinstance(d,(int,float,list,np.ndarray))
if isinstance(d,(int,float)):
d = np.full(md.shape,d)
else:
d = np.atleast_1d(d)
assert isinstance(rate,(int,float, np.ndarray))
rate = np.atleast_1d(rate)
assert isinstance(mu,(int,float, np.ndarray))
mu = np.atleast_1d(mu)
assert isinstance(p1,(int,float, np.ndarray))
p1 = np.atleast_1d(p1)
assert isinstance(ge,(int,float, np.ndarray))
ge = np.atleast_1d(ge)
assert isinstance(epsilon,(int,float, np.ndarray))
epsilon = np.atleast_1d(epsilon)
assert md.shape[0] == tvd.shape[0] == d.shape[0]
n = md.shape[0]
#Create arrays
pressure = np.zeros(n)
ppe = np.zeros(n)
pke = np.zeros(n)
pf = np.zeros(n)
delta_p = np.zeros(n)
gradient = np.zeros(n)
pressure[0] = p1
for i in range(1,n):
#Potential Energy Change
ppe[i], _ = potential_energy_change(
z1=tvd[i-1],
z2=tvd[i],
ge= ge,
)
#Kinetic Energy Change
pke[i], _ = kinetic_energy_change(
d1=d[i-1],
d2=d[i],
rate=rate,
ge=ge,
)
#Frictional Pressure drop
pf[i] = frictional_pressure_drop(
rate=rate,
epsilon=epsilon,
ge=ge,
d=d[i],
mu=mu,
length=np.abs(md[i-1]-md[i])
) * backwards
delta_p[i] = ppe[i] + pke[i] + pf[i]
pressure[i] = pressure[i-1] + delta_p[i]
gradient[i] = (pressure[i] - pressure[i-1])/np.abs(tvd[i] - tvd[i-1])
# Create dataframe
pressure_profile = pd.DataFrame({
'md':md,
'tvd':tvd,
'diameter':d,
'pressure':pressure,
'ppe': ppe,
'pke': pke,
'pf' : pf,
'delta_p': delta_p,
'gradient': gradient
}).set_index('md')
p2 = pressure[-1]
return pressure_profile, p2
## Gas Outflow functions
def gas_pressure_profile_correlation(thp,sg,depth):
assert isinstance(thp,(float,int,np.ndarray,np.int64,np.float64))
thp = np.atleast_1d(thp)
assert thp.ndim == 1
assert isinstance(sg,(float,int,np.ndarray,np.int64,np.float64))
sg = np.atleast_1d(sg)
assert sg.shape == (1,)
assert isinstance(depth,(list,float,int,np.ndarray))
depth = np.atleast_1d(depth)
assert sg.ndim == 1
pwf = thp*np.exp(3.47e-5*depth)
return pwf
def gas_pressure_profile(
md = None,
inc = None,
thp = None,
rate = None,
gas_obj = None,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20):
"""
To calculate the pressure drop in a gas well, the compressibility of the fluid must be considered. When
the fluid is compressible, the fluid density and fluid velocity vary along the pipe, and these variations
must be included when integrating the mechanical energy balance equation.
Petroleum Production Systems, Economides. Chapter 7 7.3. Single-Phase Flow of a Compressible, Newtonian Fluid. Page 175
"""
# Assert the right types and shapes for input
assert isinstance(md, (np.ndarray,pd.Series))
md = np.atleast_1d(md)
assert md.ndim ==1
assert isinstance(inc, (int,float,np.ndarray,pd.Series))
if isinstance(inc,np.ndarray):
assert inc.shape == md.shape
else:
inc = np.full(md.shape,inc)
angle = np.radians(90 - inc)
assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
thp = np.atleast_1d(thp)
assert thp.shape == (1,)
assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None
assert isinstance(di, (int,float,np.ndarray))
if isinstance(di,np.ndarray):
assert di.shape == md.shape
else:
di = np.full(md.shape,di)
assert isinstance(rate, (int,float,np.ndarray))
rate = np.atleast_1d(rate)
assert rate.shape == (1,)
assert gas_obj.sg is not None
#Create the variables
pressure_profile = np.zeros(md.shape)
temperature_profile = np.zeros(md.shape)
pressure_gradient = np.zeros(md.shape)
pressure_profile[0] = thp
temperature_profile[0] = surf_temp
interations = np.zeros(md.shape)
if gas_obj.chromatography is not None:
df_rho = gas_obj.chromatography.get_rhog(p=thp,t=surf_temp, rhog_method='real_gas')
else:
df_rho = gas_obj.pvt.interpolate(thp,property='rhog')
grad_guess = df_rho['rhog'].values*(0.433/62.4)
#Loop over depth
for i in range(1,md.shape[0]):
err = tol + 0.01
dz = np.sin(angle[i])*(md[i]-md[i-1])
gas_sg = gas_obj.sg
it = 0
while err>= tol and it <= max_iter:
p_guess = grad_guess*(md[i]-md[i-1])*np.sin(angle[i]) + pressure_profile[i-1]
#Interpolate pvt
df_pvt = gas_obj.pvt.interpolate(p_guess)
#Reynolds Number
#nre = (4*28.97*gas_obj.sg*rate*14.7)/(np.pi*di[i]*df_pvt['mug'].values*10.73*520)
nre = 20.09*(gas_sg*rate)/(di[i]*df_pvt['mug'].values)
#Friction Factor
friction = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)
#Temperature
temperature_profile[i] = dz * (temp_grad/100) + temperature_profile[i-1]
#S
s = (-0.0375*gas_obj.sg*dz)/(df_pvt['z'].values*(temperature_profile[i]+460))
#Calculate next pressure by parts for easily read
a = np.exp(-s) * np.power(pressure_profile[i-1],2)
b = (friction*np.power(df_pvt['z'].values*(temperature_profile[i]+460)*rate,2))/(np.sin(angle[i])*np.power(di[i],5))
c = 1 - np.exp(-s)
p_new = np.sqrt(a - (2.685e-3*b*c))
grad_new = (p_new - pressure_profile[i-1])/dz
err = np.abs(grad_guess-grad_new)/grad_new
grad_guess = grad_new
it +=1
pressure_gradient[i] = grad_new
pressure_profile[i] = p_new
interations[i] = it
df_dict = {
'pressure':pressure_profile,
'pressure_gradient': pressure_gradient,
'temperature': temperature_profile,
'iterations': interations
}
df = pd.DataFrame(df_dict, index = md)
pwf = pressure_profile[-1]
return df, pwf
def gas_upward_pressure(
md = None,
inc = None,
pwf = None,
rate = None,
gas_obj = None,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20,
guess=None,
grad_guess = [0.02,0.05]
):
if guess is None:
grad = np.atleast_1d(grad_guess)
delta_h = np.abs(md[-1] - md[0])
guess = pwf - grad * delta_h
else:
assert isinstance(guess,(list,np.ndarray))
guess = np.atleast_1d(guess)
def solve(x):
_,_pwf = gas_pressure_profile(
md = md,
inc = inc,
thp = x,
rate = rate,
gas_obj = gas_obj,
di=di,
surf_temp=surf_temp,
temp_grad=temp_grad,
epsilon = epsilon,
tol = tol,
max_iter=max_iter,
)
return pwf - _pwf
sol = root_scalar(solve, x0=guess[0],x1=guess[1])
return sol.root
def gas_outflow_curve(
md = None,
inc = None,
thp = None,
gas_obj = None,
rate=None,
min_rate=100,
max_rate=8000,
n_rate=20,
di=2.99,
surf_temp=80,
temp_grad=1,
epsilon = 0.0006,
tol = 0.05,
max_iter=20,
operating_point = None,
op_n = 30
):
# Assert the right types and shapes for input
assert isinstance(md, (np.ndarray,pd.Series)) and md.ndim ==1
md = np.atleast_1d(md)
assert isinstance(inc, (int,float,np.ndarray,pd.Series))
if isinstance(inc,np.ndarray):
assert inc.shape == md.shape
else:
inc = np.full(md.shape,inc)
angle = np.radians(90 - inc)
assert isinstance(thp, (int,float,list,np.ndarray))
thp = np.atleast_1d(thp)
assert thp.ndim == 1
assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None
assert isinstance(di, list)
assert isinstance(rate, (int,float,list,np.ndarray,type(None)))
if rate is None:
rate = np.linspace(min_rate,max_rate,n_rate)
else:
rate = np.atleast_1d(rate)
assert rate.ndim == 1
assert gas_obj.sg is not None
pwf = np.zeros(rate.shape[0]*thp.shape[0]*len(di))
thp_arr = np.zeros(pwf.shape)
di_arr = np.zeros(pwf.shape)
gas_arr = np.zeros(pwf.shape)
name_list = []
i = 0
for p in thp:
for d in di:
for q in rate:
_,pwf[i] = gas_pressure_profile(
md = md,
inc = inc,
thp = p,
rate = q,
gas_obj = gas_obj,
surf_temp=surf_temp,
temp_grad=temp_grad,
di=d
)
gas_arr[i] = q
thp_arr[i] = p
di_arr[i] = d
case_name = f'thp-{p}_di-{d}'
name_list.append(case_name)
i += 1
#df = pd.DataFrame(pwf,columns=name_list,index=rate)
arr=np.column_stack((pwf,thp_arr,di_arr))
df = pd.DataFrame(arr,columns=['pwf','thp','di'],index=gas_arr)
df['case'] = name_list
df.index.name = 'gas'
op =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import sys
import seaborn as sns
sns.set()
sns.set_context("paper")
from sklearn import metrics
# get colors from https://medialab.github.io/iwanthue/ or artenatevly from http://phrogz.net/css/distinct-colors.html
colors_cycle = ["#a257d4",
"#e090bf",
"#64c9a3",
"#4b68ae",
"#dc8c2f",
"#cd41a7",
"#d9344f",
"#bc599a",
"#afa1e8",
"#48c1d8",
"#b54545",
"#919233",
"#9a78be",
"#59602a",
"#4e8e2c",
"#9db935",
"#9b563c",
"#e482df",
"#5995d3",
"#6a5198",
"#b05f84",
"#b563c3",
"#5f6b18",
"#a55c21",
"#5754c2",
"#277257",
"#4f9b5e",
"#8b6b29",
"#b8381c",
"#ad2f62",
"#97ba6d",
"#45c37c",
"#5fc250",
"#8c4c7b",
"#e06e87",
"#e2672a",
"#db7756",
"#974858",
"#35743b",
"#bbaf6c",
"#8c4099",
"#e44586",
"#ed5c4c",
"#389c84",
"#cfae3d",
"#eda377",
"#778749",
"#c5935a",
"#de8784",
"#757eec"]
def plot_cluster_composition(fraction_sites, directory, level, normalise=False, label='primary_site', shuffled=False,
algorithm='topsbm'):
sns.set(font_scale=0.8)
df_clusters = pd.read_csv("%s/%s/%s_level_%d_clusters.csv" % (directory, algorithm, algorithm, level), header=[0])
x = np.arange(1, 1 + len(df_clusters.columns))
fig = plt.figure(figsize=(15, 8))
ax = fig.subplots()
fraction_bar_plot(x, fraction_sites, ax)
ax.set_xlabel("cluster", fontsize=20)
if normalise:
ax.set_ylabel("fraction of nodes", fontsize=22)
else:
ax.set_ylabel("number of nodes", fontsize=20)
ax.set_title("%s%s distribution across clusters" % ("Shuffled " if shuffled else '', label), fontsize=20)
ax.legend(ncol=3, loc='upper right')
ax.tick_params(axis='both', labelsize=20)
plt.show()
fig.savefig("%s/%s/%s%sclustercomposition_l%d_%s.pdf" % (
directory, algorithm, "shuffled" if shuffled else '', "fraction_" if normalise else '', int(level), label))
def fraction_bar_plot(x, fraction_sites, ax=None):
global current_color
current_color = -1
if ax is None:
fig = plt.figure(figsize=(15, 8))
ax = fig.subplots()
bottom = np.zeros(len(x))
ymax = 0
for site, data in fraction_sites.items():
if np.max(data) == 0:
continue
ax.bar(x, data, label=site, bottom=bottom, color=get_color_cycle())
bottom = bottom + data
def get_Palette(site):
palette_map = dict({'Brain': 'Blues',
'Breast': 'Reds',
'Kidney': 'Greens',
'Lung': 'Oranges',
'Thyroid': 'Greys',
'Uterus': 'Purples',
'Prostate': 'BuGn',
'Ovary': 'BuPu',
'Lymph Nodes': 'OrRd',
'Soft Tissue': 'PuRd',
'Esophagus': 'YlGn',
'Stomach': 'YlRd',
'Bone Marrow': 'PuBuGn',
'Skin': 'YlOrRd',
'Adipose Tissue': 'YlOrBr',
'Blood': 'RdPu',
'Pancreas': 'OrRd',
'Testis': 'GnBu'})
for k in palette_map.keys():
if k in site:
return palette_map[k]
current_color = -1
def get_color_cycle():
global current_color
current_color += 1
if current_color >= len(colors_cycle):
current_color = 0
return colors_cycle[current_color]
def get_cluster_given_l(l, directory, algorithm='topsbm'):
df_clusters = pd.read_csv("%s/%s/%s_level_%d_clusters.csv" % (directory, algorithm, algorithm, l), header=[0],
index_col=None)
cluster = {}
for i, c in enumerate(df_clusters.columns):
cluster[i] = df_clusters[c].dropna().values
return cluster
def get_topic_given_l(l, directory, algorithm='topsbm'):
df_topics = pd.read_csv("%s/%s/%s_level_%d_topics.csv" % (directory, algorithm, algorithm, l), header=[0])
topic = {}
for i, c in enumerate(df_topics.columns):
topic[i] = df_topics[c].dropna().values
return topic
def get_fraction_sites(cluster, df_files, label='primary_site', normalise=False):
fraction_sites = {}
c_fraction_site = {}
for site in df_files[label].dropna().unique():
fraction_sites[site] = []
c_fraction_site[site] = 0
for i, c in enumerate(cluster):
for sample in cluster[i]:
foundsample = get_file(sample, df_files)
if foundsample is not None:
c_fraction_site[foundsample[label]] += 1
else:
if 'unknown' in c_fraction_site.keys():
c_fraction_site['unknown'] +=1
else:
c_fraction_site['unknown'] = 1
fraction_sites['unknown']=[]
for site in fraction_sites:
if normalise:
norm = float(len(cluster[i]))
else:
norm = 1
if norm > 0:
fraction_sites[site].append(c_fraction_site[site] / norm)
else:
fraction_sites[site].append(np.nan)
c_fraction_site[site] = 0
df =
|
pd.DataFrame(data=fraction_sites)
|
pandas.DataFrame
|
# Define the collection of helper functions that are used to generate the different
# permutations of the recipes & re-format for stitching.
import pandas as pd
import pkg_resources
import stitches.fx_util as util
import stitches.fx_match as match
def get_num_perms(matched_data):
""" A function to give you the number of potential permutations from a
matched set of data. Ie Taking in the the results of `match_neighborhood(target, archive)`.
:param matched_data: data output from match_neighborhood.
:return: A list with two entries. First, the total number of potential permutations of the
matches that cover 1850-2100 of the target data in the matched_data dataframe. The second, a data frame with
the break down of how many matches are in each period of the target data
"""
# Check inputs
util.check_columns(matched_data, {'target_variable', 'target_experiment', 'target_ensemble',
'target_model', 'target_start_yr', 'target_end_yr', 'target_year',
'target_fx', 'target_dx'})
dat = matched_data.drop_duplicates()
dat_count = dat.groupby(["target_variable", "target_experiment", "target_ensemble", "target_model",
"target_start_yr", "target_end_yr", "target_year", "target_fx",
"target_dx"]).size().reset_index(name='n_matches')
dat_count = dat_count.sort_values(["target_year"])
dat_min = dat_count.groupby(["target_variable", "target_experiment", "target_ensemble", "target_model"])[
'n_matches'].min().reset_index(name='minNumMatches')
dat_prod = dat_count.groupby(["target_variable", "target_experiment", "target_ensemble", "target_model"])[
'n_matches'].prod().reset_index(name='totalNumPerms')
dat_count_merge = dat_min.merge(dat_prod)
out = [dat_count_merge, dat_count]
return out
def remove_duplicates(md, archive):
""" A function that makes sure that within a single given matched recipe that
there each archive point used is unique. When two target tgav windows in
the trajectory match to the same archive window, the target window with
smaller Euclidean distance keeps the match, and the other target window
gets re-matched with its nearest-neighbor match from a new archive, the
previous one with all matched points removed.
:param md: A data frame with results of matching for a single
tgav recipe. Either because match_neighborhood was
used specifically to return NN or because the multiple
matches have been permuted into new recipes and then
split with this function being applied to each recipe.
:param archive: data frame object consisting of the tas archive to use
for re-matching duplicate points.
:return: data frame with same structure as raw matched, with duplicate matches replaced.
"""
if len(md["target_year"].unique()) < util.nrow(md):
raise TypeError(f"You have multiple matches to a single target year, this function can only accept a matched "
f"data frame of singular matches between target & archive data.")
# Intialize everything that gets updated on each iteration of the while loop:
# 1. the data frame of matched_data -> make a copy of the argument md to initialize
# 2. the data frame of duplicates is calculated for the first time.
matched_data = md.copy()
# Check to see if in the matched data frame if there are any repeated values.
md_archive = matched_data[['archive_experiment', 'archive_variable', 'archive_model',
'archive_ensemble', 'archive_start_yr', 'archive_end_yr',
'archive_year', 'archive_fx', 'archive_dx']]
duplicates = matched_data.merge(md_archive[md_archive.duplicated()], how="inner")
# As long as duplicates exist, rematch the target windows with the larger
# dist l2 to each archive chunk, add back in, iterate to be safe.
# By matching on new_archive = archive - matches that were used in md,
# we don't introduce new duplicates when we rematch. So the while loop is
# probably over cautious but it does only execute one iteration.
while util.nrow(duplicates) > 0:
# within each iteration of checking duplicates,
# pull out the one with smallest dist_l2 -
# this is the one that gets to keep the archive match, and we use
# as an index to work on the complement of (in case the same
# archive point gets matched for more than 2 target years)
grouped = duplicates.groupby(['archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble',
'archive_start_yr', 'archive_end_yr', 'archive_year',
'archive_fx', 'archive_dx'])
# Pick which of the target points will continue to be matched with the archive
# pair.
dat = []
for name, group in grouped:
min_value = min(group['dist_l2'])
dat.append(group.loc[group['dist_l2'] == min_value])
duplicates_min = pd.concat(dat)
# target points contained in duplicates-duplicates_min
# are the ones that need a new archive match.
filter_col = [col for col in duplicates if col.startswith('target_')]
points_to_rematch = duplicates[filter_col].loc[(~duplicates['target_year'].isin(duplicates_min['target_year']))]
new_names = list(map(lambda x: x.replace('target_', ''), points_to_rematch.columns))
points_to_rematch.columns = new_names
# Because we know that none of the archive values can be reused in the match,
# discard the ones already used (eg in matched_data)
# from the updated archive that will be used in the rematching.
cols = [col for col in matched_data if col.startswith('archive_')]
rm_from_archive = matched_data[cols]
new_names = list(map(lambda x: x.replace('archive_', ''), rm_from_archive.columns))
rm_from_archive.columns = new_names
# Use our anti_join utility function to return the rows of archive that are
# not in rm_from_archive
new_archive = util.anti_join(archive, rm_from_archive,
bycols=['model', 'experiment', 'variable', 'ensemble',
'start_yr', 'end_yr', 'year', 'fx', 'dx'])
# Find new matches for the data the target data that is missing the archive pair. Because we
# are only interested in completing our singular recipe the tol must be 0.
rematched = match.match_neighborhood(target_data=points_to_rematch, archive_data=new_archive,
tol=0)
# Now, we update our key data frames for the next iteration of the while loop:
# 1. matched_data gets updated to be rematched + (previous matched_data minus the targets
# that were rematched).
# 2. duplicates gets recreated, checking for duplicates in our updated matched_data.
# update matched_data:
# first, drop the target windows that got rematched from the current matched_data:
matched_data_minus_rematched_targ_years = matched_data.loc[
~(matched_data['target_year'].isin(rematched['target_year']))].copy()
matched_data = pd.concat([matched_data_minus_rematched_targ_years, rematched]) \
.sort_values('target_year').reset_index(drop=True)
# Identify duplicates in the updated matched_datafor the next iteration of the while loop
md_archive = matched_data[['archive_experiment', 'archive_variable', 'archive_model',
'archive_ensemble', 'archive_start_yr', 'archive_end_yr',
'archive_year', 'archive_fx', 'archive_dx']]
duplicates = matched_data.merge(md_archive[md_archive.duplicated()], how="inner")
# Clean up for the next while loop iteration
del (duplicates_min, points_to_rematch, rm_from_archive, rematched,
matched_data_minus_rematched_targ_years)
return matched_data
def permute_stitching_recipes(N_matches, matched_data, archive, optional=None, testing=False):
""" A function to sample from input `matched_data` (the the results
of `match_neighborhood(target, archive, tol)` to produce permutations
of possible stitching recipes that will match the target data.
matched_data is the output from match_neighborhood
If matched_data has only one target SSP-ensemble member:
Then you get N_matches of new realizations, there's no collapse
within those but there may be collapse across other ensemble
members.
If matched_data has multiple target SSP-ensemble members (same ssp):
The code _attempts_ to give N_matches generated realizations for
each target ensemble member. Fewer may be possible, etc. There is no collapse
across the generated realizations. This is basically targeting the
SSP as a whole, not necessarily the actual ensemble members.
To look at multiple target SSPs, basically call this function
separately for each because we do not care about collapse.
:param N_matches: int the number of desired stitching recipes.
:param matched_data: data output from match_neighborhood.
:param archive: the archive data to use for re-matching duplicate points
:param optional: a previous output of this function that contains a list of already created recipes
to avoid re-making (this is not implemented).
:param testing: Boolean True/False. Defaults to False. If True, then pd.DataFrame.sample() uses
the argument random_state=stitch_ind so that the behavior can be reliably replicated
without setting global seeds.
:return: data frame with same structure as raw matched, with duplicate matches replaced.
"""
# Check inputs
util.check_columns(matched_data, {'target_variable', 'target_experiment', 'target_ensemble',
'target_model', 'target_start_yr', 'target_end_yr', 'target_year',
'target_fx', 'target_dx', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'archive_start_yr',
'archive_end_yr', 'archive_year', 'archive_fx', 'archive_dx', 'dist_dx',
'dist_fx', 'dist_l2'})
# Initialize quantities updated on every iteration of the while loop:
# 1. A copy of the matched data
# 2. perm_guide
# Initialize matched_data_int for iteration through the while loop:
# make a copy of the data to work with to be sure we don't touch original argument
matched_data_int = matched_data.drop_duplicates().copy()
# identifying how many target windows are in a trajectory we want to
# create so that we know we have created a full trajectory with no
# missing widnows; basically a reference for us to us in checks.
num_target_windows = util.nrow(matched_data_int["target_year"].unique())
# Initialize perm_guide for iteration through the while loop.
# the permutation guide is one of the factors that the while loop
# will run checks on, must be initialized.
# Perm_guide is basically a dataframe where each target window
# lists the number of archive matches it has.
num_perms = get_num_perms(matched_data_int)
perm_guide = num_perms[1]
# how many target trajectories are we matching to,
# how many collapse-free ensemble members can each
# target support, and order them according to that
# for construction.
targets = num_perms[0].sort_values(["minNumMatches"]).reset_index()
# Add a column of a target id name, differentiate between the different input
# streams we are emulating.
# We specifically emulate starting with the realization that can support
# the fewest collapse-free generated realizations and work in increasing
# order from there. We iterate over the different realizations to facilitate
# checking for duplicates across generated realizations across target
# realizations.
targets['target_ordered_id'] = ['A' + str(x) for x in targets.index]
if util.nrow(num_perms[0]["target_experiment"].unique()) > 1:
raise TypeError(
f"function permute_stitching_recipes should be applied to separate data frames for each target experiment of interest (multiple target ensemble members for a single target experiment is fine)")
# max number of permutations per target without repeating across generated
# ensemble members.
N_data_max = min(num_perms[0]['minNumMatches'])
if N_matches > N_data_max:
# TODO this should be written up as a proper python message statement.
print("You have requested more recipes than possible for at least one target trajectories, returning what can")
# Initialize the number of matches to either 0 or the input read from optional:
if type(optional) is str:
print('initialize to the read-in: has not been translated')
else:
recipe_collection = pd.DataFrame()
# Loop over each target ensemble member, creating N_matches generated
# realizations via a while loop before moving to the next target.
for target_id in targets['target_ordered_id'].unique():
# subset the target info, the target df contains meta information about the run we
# and the number of permutations and such.
target = targets.loc[targets["target_ordered_id"] == target_id].copy()
# initialize a recipes data frame holder for each target, for
# the while loop to iterate on
recipes_col_by_target = pd.DataFrame()
var_name = target['target_variable'].unique()[0]
exp = target['target_experiment'].unique()[0]
mod = target['target_model'].unique()[0]
ens = target['target_ensemble'].unique()[0]
# While the following conditions are met continue to generate new recipes.
# 1. While we have fewer matches than requested for the target ensemble_member,
# keep going.
# 2. Filter the perm_guide to just the target ensemble member in this loop and
# make sure there are at least num_target_windows of time windows: basically
# make sure there is at least one remaining archive match to draw from for
# each target window in this target ensemble. Note this means the perm_guide
# must be updated at the end of every while loop iteration.
#
# Initialize these conditions so we enter the while loop, then update again at the
# end of each iteration:
if util.nrow(recipes_col_by_target) == 0:
condition1 = True
elif util.nrow(recipes_col_by_target['stitching_id'].unique()) < N_matches:
condition1 = True
else:
condition1 = False
perm_rows = util.nrow(
perm_guide.loc[(perm_guide['target_variable'] == var_name) & (perm_guide['target_experiment'] == exp) &
(perm_guide['target_model'] == mod) & (perm_guide['target_ensemble'] == ens)]
.copy()
.drop_duplicates())
if perm_rows == num_target_windows:
condition2 = True
else:
condition2 = False
# And an integer index to initialize the count of stitched
# trajectories for each target
stitch_ind = 1
# Run the while loop!
while all([condition1, condition2]):
# Group matched data for a single target by the chunks of the target information.
# Right now a single target chunk may have multiple matches with archive points. The
# next several steps of the while loop will create a one to one paring between the
# target and archive data, then check to make sure that the pairing meets the requirements
# for what we call a recipe.
grouped_targets = []
grouped_targets = matched_data_int.loc[(matched_data_int['target_variable'] == var_name) &
(matched_data_int['target_experiment'] == exp) &
(matched_data_int['target_model'] == mod) &
(matched_data_int['target_ensemble'] == ens)].copy().groupby(
['target_variable', 'target_experiment', 'target_ensemble', 'target_model',
'target_start_yr', 'target_end_yr'])
# For each target window group,
# Randomly select one of the archive matches to use.
# This creates one_one_match, a candidate recipe.
one_one_match = []
for name, group in grouped_targets:
if testing:
one_one_match.append(group.sample(1, replace=False, random_state=stitch_ind))
else:
one_one_match.append(group.sample(1, replace=False))
one_one_match = pd.concat(one_one_match)
one_one_match = one_one_match.reset_index(drop=True).copy()
# Before we can accept our candidate recipe, one_one_match,
# we run it through a lot of tests.
# Force one_one_match to meet our first condition,
# that each archive data point in the recipe must be unique.
# Then give it a stitching id
new_recipe = []
new_recipe = remove_duplicates(one_one_match, archive)
stitching_id = exp + '~' + ens + '~' + str(stitch_ind)
new_recipe["stitching_id"] = stitching_id
new_recipe = new_recipe.reset_index(drop=True).copy()
# Make sure the new recipe isn't missing any years:
if ~new_recipe.shape[0] == num_target_windows:
raise TypeError(f"problem: the new single recipe is missing years of data!")
# Make sure that no changes were made to the target years.
if sum(~new_recipe['target_start_yr'].isin(set(matched_data_int['target_start_yr']))) > 0:
raise TypeError(f"problem the new single recipe target years!")
# TODO add a check to make sure that the correct number of rows are being returned.
# Compare the new_recipe to the previously drawn recipes across all target
# ensembles.
# There is no collapse within each target ensemble because we remove the constructed
# new_recipe from the matched_data at the end of each iteration of the while loop -
# The sampled points CAN'T be used again for the current target ensemble member
# for loop iteration, or for any other target ensemble members. Meaning we
# avoid envelope collapse when targeting multiple realizations (you don't have
# realization 1 and realization 4 2070 getting matched to the same archive point.
# The code below is checking to make sure that our new_recipe doesn't exist
# in the saved recipe_collection. This shouldn't be possible with how we update
# our matched_data_int on every loop, but just to be cautious, we check.
# Again, the challenge is seeing if our entire sample has
# been included in recipes before, not just a row or two.
if util.nrow(recipe_collection) != 0:
# If previous recipes exist, we must create a comparison
# data frame that checks each existing recipe in recipe_collection
# against new_recipe and record True/False
#
# Compare the new recipe with the existing collection of all recipes.
cols_to_use = ['target_variable', 'target_experiment',
'target_model', 'target_start_yr', 'target_end_yr', 'archive_experiment',
'archive_variable', 'archive_model', 'archive_ensemble', 'archive_start_yr',
'archive_end_yr']
grouped_collection = recipe_collection.groupby(['stitching_id'])
comparison = []
for name, group in grouped_collection:
df1 = group[cols_to_use].copy().reset_index(drop=True).sort_index(axis=1)
df2 = new_recipe[cols_to_use].copy().reset_index(drop=True).sort_index(axis=1)
comparison.append(df1.equals(df2))
# end for loop
# end if statement
else:
# Otherwise, this is the first recipe we've done at all, so we set comparison manually
# so that the next if statement triggers just like it was appending a new recipe to an
# existing list.
comparison = [False]
# end else
# If the new_recipe is not unique (aka, any(comparison) == True), then
# we don't want it and we don't want to do anything else in this iteration of
# the while loop. We DON'T update the matched_points or conditions, so the
# while loop is forced to re-run so that another random draw is done to create
# a new candidate new_recipe.
# We check for what we want: the new recipe is the first or all(comparsion)==False.
# In either case, we are safe to keep new_recipe and update all the data frames
# for the next iteration of the while loop.
if all(comparison) == False:
# add new_recipe to the list of recipes for this target ensemble
recipes_col_by_target = pd.concat([recipes_col_by_target, new_recipe])
# And we remove it from the matched_points_int so the archive
# values used in this new_recipe can't be used to construct
# subsequent realizations for this target ensemble member.
# This updated matched_data_int is used in each iteration
# of the while loop. Since we are removing the constructed
# new_recipe from the matched_data_int at the end of each
# iteration of the while loop, the sample points can't be
# randomly drawn again for the next generated trajectory
# of the current target ensemble member for loop iteration.
# Now each (target_window, archive_window) combination must
# be removed from matched data for all target ensemble members,
# not just the one we are currently operating on.
# This ensures that we don't get collapse in the generated
# envelope across target ensemble members (e.g you don't
# have realization 1 and realization 4 2070 getting matched
# to the same archive point).
# Use an anti-join
matched_data_int = util.anti_join(matched_data_int, new_recipe.drop(['stitching_id'], axis=1).copy(),
bycols=["target_year", "target_start_yr", "target_end_yr",
"archive_experiment", "archive_variable", "archive_model",
"archive_ensemble", "archive_start_yr", "archive_end_yr",
"archive_year"]).copy()
# update permutation count info with the revised matched data so
# the while loop behaves - this makes sure that every target window
# in the perm_guide actually has at least one matched archive point
# available for draws .
# That way, we don't try to construct a trajectory with fewer years
# than the targets.
num_perms = get_num_perms(matched_data_int)
perm_guide = num_perms[1]
# Use the updated perm_guide to update
# the while loop conditions:
# Condition 1:
# If we haven't reached the N_matches goal for this target ensemble
if util.nrow(recipes_col_by_target) == 0:
condition1 = True
elif util.nrow(recipes_col_by_target['stitching_id'].unique()) < N_matches:
condition1 = True
else:
condition1 = False
# end updating Condition 1
# Condition 2:
# make sure each target window in the updated perm guide has at least one archive match available
# to draw on the next iteration.
perm_rows = util.nrow(
perm_guide.loc[
(perm_guide['target_variable'] == var_name) & (perm_guide['target_experiment'] == exp) &
(perm_guide['target_model'] == mod) & (perm_guide['target_ensemble'] == ens)]
.copy()
.drop_duplicates())
if perm_rows == num_target_windows:
condition2 = True
else:
condition2 = False
# end updating condition 2
# Add to the stitch_ind, to update the count of stitched
# trajectories for each target ensemble member.
stitch_ind += 1
# end if statement
# end the while loop for this target ensemble member
# Add the collection of the recipes for each of the targets into single df.
recipe_collection = recipe_collection.append(recipes_col_by_target)
# end the for loop over target ensemble members
# do outputs
out = recipe_collection.reset_index(drop=True).copy()
return out[['target_variable', 'target_experiment', 'target_ensemble',
'target_model', 'target_start_yr', 'target_end_yr', 'target_year',
'target_fx', 'target_dx', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'archive_start_yr',
'archive_end_yr', 'archive_year', 'archive_fx', 'archive_dx', 'dist_dx',
'dist_fx', 'dist_l2', 'stitching_id']]
def handle_transition_periods(rp):
""" Go through the recipe and when there is a transition period, aka the archive years span both the
historical and future scenarios go through and insert in an extra period so that they don't do
this over lap any more.
:param rp: a data frame of the recipe.
:return: a data frame of of the recipe with no over lapping historical/future experiments, this is now ready to join with pangeo information.
"""
util.check_columns(rp, {'target_variable', 'target_experiment', 'target_ensemble',
'target_model', 'target_start_yr', 'target_end_yr', 'target_year',
'target_fx', 'target_dx', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'archive_start_yr',
'archive_end_yr', 'archive_year', 'archive_fx', 'archive_dx', 'dist_dx',
'dist_fx', 'dist_l2', 'stitching_id'})
def internal_func(x):
# First check to see the archive period spans the historical to future scenario
transition_period = (
(2014 in range(x["archive_start_yr"], x["archive_end_yr"] + 1)) &
(2015 in range(x["archive_start_yr"], x["archive_end_yr"] + 1))
)
if transition_period:
target_yrs = list(range(x['target_start_yr'], x['target_end_yr']+1))
archive_yrs = list(range(x['archive_start_yr'], x['archive_end_yr']+1))
hist_cut_off = 2014 # the final complete year of the historical experiment
historical_yrs = list(filter(lambda x: x <= hist_cut_off, archive_yrs))
future_yrs = list(set(archive_yrs).difference(set(historical_yrs)))
# This is the information that is constant between the historical and future periods.
constant_info = x.loc[x.index.isin({'archive_variable', 'archive_model',
'archive_ensemble', 'stitching_id'})]
# Construct the historical period information
d = {'target_start_yr': min(target_yrs),
'target_end_yr': target_yrs[len(historical_yrs)-1],
'archive_experiment': 'historical',
'archive_start_yr': min(historical_yrs),
'archive_end_yr': max(historical_yrs)}
ser = pd.Series(data=d, index=['target_start_yr', 'target_end_yr', 'archive_experiment',
'archive_start_yr', 'archive_end_yr'])
historical_period = ser.append(constant_info).to_frame().transpose()
# Check to make sure the lenths of time are correct
targ_len = historical_period['target_end_yr'].values - historical_period['target_start_yr'].values
arch_len = historical_period['archive_end_yr'].values - historical_period['archive_start_yr'].values
if targ_len != arch_len:
raise TypeError(f"probelm with the length of the historical archive & target yrs")
# Now construct the future period information
d = {'target_start_yr': target_yrs[len(historical_yrs)],
'target_end_yr': target_yrs[len(target_yrs) - 1],
'archive_experiment': x['archive_experiment'],
'archive_start_yr': min(future_yrs),
'archive_end_yr': max(future_yrs)}
ser = pd.Series(data=d, index=['target_start_yr', 'target_end_yr', 'archive_experiment',
'archive_start_yr', 'archive_end_yr'])
future_period = ser.append(constant_info).to_frame().transpose()
# Check to make sure the lenths of time are correct
targ_len = future_period['target_end_yr'].values - future_period['target_start_yr'].values
arch_len = future_period['archive_end_yr'].values - future_period['archive_start_yr'].values
if not targ_len == arch_len:
raise TypeError(f"probelm with the length of the historical archive & target yrs")
# Combine the period information
out = pd.concat([historical_period, future_period]).reset_index(drop=True)
else:
out = x.to_frame().transpose().reset_index(drop=True)
out = out[['target_start_yr', 'target_end_yr', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'stitching_id', 'archive_start_yr',
'archive_end_yr']]
return out
# Note that data frame returned might not be identical in shape to the
# recipe read in because any periods that cover the historical period
# will be split into two rows.
ser = rp.apply(internal_func, axis=1)
out = pd.concat(ser.values.tolist()).reset_index(drop=True)
return out
def handle_final_period(rp):
""" Go through a recipe and ensure that all of the periods have the same archive
and target period length, if not update to reflect the target period length.
Otherwise you'll end up with extra years in the stitched data. This is really
only an issue for the final period of target data because sometimes that period is somewhat short.
OR if the normal sized target window gets matched to the final period of data from one
of the archive matches. Since the final period is typically pnly one year shorter than the
full window target period in this case, we simply repeat the final archive year to get
enough matches.
:param rp: a data frame of the recipe.
:return: a recipe data frame that has target and archive periods of the same length.
"""
# Define an internal function that checks row by row if we are working
# with the final period & if that is a problem, if so handle it.
def internal_func(x):
len_target = x['target_end_yr'] - x['target_start_yr']
len_archive = x['archive_end_yr'] - x['archive_start_yr']
if len_target == len_archive:
# No problem return the the row as is
out = x.to_frame().transpose().reset_index(drop=True)
out = out[['target_start_yr', 'target_end_yr', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'stitching_id', 'archive_start_yr',
'archive_end_yr']]
elif len_target < len_archive:
# Figure out how much shorter the target period is than the archive period.
out = x.to_frame().transpose().reset_index(drop=True)
out = out[['target_start_yr', 'target_end_yr', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'stitching_id', 'archive_start_yr',
'archive_end_yr']]
out['archive_end_yr'] = out['archive_end_yr'] - 1
else:
# TODO need to may need to revisit, just added an extra year to the archive length but that seems somewhat pretty sus.
# Figure out how much shorter the target period is than the archive period.
out = x.to_frame().transpose().reset_index(drop=True)
out = out[['target_start_yr', 'target_end_yr', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'stitching_id', 'archive_start_yr',
'archive_end_yr']]
out['archive_start_yr'] = out['archive_start_yr'] - 1
return out
ser = rp.apply(internal_func, axis=1)
out = pd.concat(ser.values.tolist()).reset_index(drop=True)
return out
def generate_gridded_recipe(messy_recipe, res='mon'):
""" Using a messy recipe create the messy recipe that can be used in the
stitching process. TODO I think that when the permuate recipe function
is fixed, I thinkn that funciton should be nested into here so that this function
takes in a matched df & generates the recpies.
:param messy_recipe: a data frame generated by the permuate_recpies
:param res: string mon or day
:return: a recipe data frame
"""
# Check inputs
util.check_columns(messy_recipe, {'target_variable', 'target_experiment', 'target_ensemble',
'target_model', 'target_start_yr', 'target_end_yr', 'target_year',
'target_fx', 'target_dx', 'archive_experiment', 'archive_variable',
'archive_model', 'archive_ensemble', 'archive_start_yr',
'archive_end_yr', 'archive_year', 'archive_fx', 'archive_dx', 'dist_dx',
'dist_fx', 'dist_l2', 'stitching_id'})
if res not in ['mon', 'day']:
# TODO figure out a better way to handle the tas resolution matching, will also need
# to figure out how to translate this to non tas variables as well.
raise TypeError(f"generate_gridded_recipe: does not recognize the res input")
# Clean up the recipe
dat = handle_transition_periods(messy_recipe)
dat = handle_final_period(dat)
# Make sure that if there are historical years of data being used assign
# the experiment name to historical.
dat.loc[dat['archive_end_yr'] <= 2014, "archive_experiment"] = "historical"
# Now that we have the formatted recipe add the pangeo tas information!!
ptable_path = pkg_resources.resource_filename('stitches', 'data/pangeo_table.csv')
pangeo_table = pd.read_csv(ptable_path)
if res == 'mon':
table = 'Amon'
else:
table = 'day'
tas_meta_info = pangeo_table.loc[(pangeo_table['variable'] == 'tas') &
(pangeo_table['domain'] == table)]
tas_meta_info = tas_meta_info[['model', 'experiment', 'ensemble', 'variable', 'zstore']]
tas_meta_info = tas_meta_info.rename(columns={"model": "archive_model",
"experiment": "archive_experiment",
"ensemble": "archive_ensemble",
"variable": "archive_variable"})
out = dat.merge(tas_meta_info, how="inner")
out = out.reset_index(drop=True).copy()
out = out.sort_values(['stitching_id', 'target_start_yr', 'target_end_yr']).copy()
out = out.reset_index(drop=True).copy()
return out
def make_recipe(target_data, archive_data, N_matches, res="mon", tol=0.1, non_tas_variables=None):
""" Generate a stitching recipe.
:param target_data: a pandas data frame of climate information to emulate.
:param archive_data: a pandas data frame of temperature data to use as the archive to match on.
:param N_matches: a int to the maximum number of matches per target data
:param res: str of mon or day to indicate the resolution of the stitched data
:param tol: float value indicating the tolerance to use in the matching process, default set to 0.1
:param non_tas_variables: a list of variables other than tas to stitch together, when using the default set to None only tas will be stitched together.
:return: pandas data frame of a formatted recpie
"""
# Check the inputs
util.check_columns(target_data, set(['experiment', 'variable', 'ensemble', 'model', 'start_yr',
'end_yr', 'year', 'fx', 'dx']))
util.check_columns(archive_data, set(['experiment', 'variable', 'ensemble', 'model', 'start_yr',
'end_yr', 'year', 'fx', 'dx']))
if not type(N_matches) is int:
raise TypeError(f"N_matches: must be an integer")
if not type(tol) is float:
raise TypeError(f"tol: must be a float")
#if not type(non_tas_variables) in [None, list]:
# raise TypeError(f"non_tas_variables: must be None or a list")
# If there are non tas variables to be stitched subset the archive to limit
# the coverage to only the entries with the complete coverage.
if type(non_tas_variables) == list:
if res not in ['mon', 'day']:
raise TypeError(f"does not recognize the res input")
if 'tas' in non_tas_variables:
raise TypeError(f"non_tas_variables: cannot contain tas")
pt_path = pkg_resources.resource_filename('stitches', 'data/pangeo_table.csv')
pangeo_table =
|
pd.read_csv(pt_path)
|
pandas.read_csv
|
"""
adjust_coordinates.py
xx
Handles the primary functions
"""
import os
from adjust_graphene_sheet import functions
import sys
import numpy as np
import pandas as pd
import re
def adjust_coordinates(filename="graphene.gro", outfile="graphene_adjusted.gro"):
install_dir=os.path.dirname(functions.__file__)
print(install_dir)
filename_withpath=os.path.join(install_dir,'data',filename)
try:
print("Opening file...\n ",filename_withpath)
f=open(filename_withpath)
except IOError as e:
print("Unable to open" + filename +". Please check the file")
coord=[]
box=[]
n_atoms=0
for idx,line in enumerate(f):
if idx==1:
n_atoms=int(line.split()[0])
if idx>=2:
parts=line.split()
if len(parts)>3:
coord.append([parts[0],parts[1],parts[2],float(parts[3]),float(parts[4]), float(parts[5])])
elif len(parts)==3:
box.append([parts[0],parts[1], parts[2]])
box_x=float(box[0][0])
box_y=float(box[0][1])
print(box_x,box_y)
labels=["resid","atom_type","index","x","y","z"]
df=
|
pd.DataFrame(coord,columns=labels)
|
pandas.DataFrame
|
#!/usr/bin/env python
import os
import sys
import csv
import json
import sqlite3
import pandas as pd
class Databases:
def __init__(self, capture, configfile=None, maxcol=2000):
scriptdir = os.path.dirname(os.path.abspath(__file__))
if configfile is None:
configfile = os.path.join(scriptdir, 'config.py')
self.capture = capture
config = dict()
exec(open(configfile).read(), config)
db_dir = config['dbdir']
db_general = os.path.join(db_dir, config['dbgeneral'])
db_capture = os.path.join(db_dir, '{}.sqlite'.format(capture))
self.table_poscons = config['poscontable']
self.table_badsamples = config['badsampletable']
self.table_badregion = config['badregiontable']
self.conn = sqlite3.connect(db_general)
self.capconn = sqlite3.connect(db_capture)
def create_bad_region_table(self):
"""Create a table to hold empirical exluded regions."""
sql = """CREATE TABLE IF NOT EXISTS {}
(capture text NOT NULL,
target text NOT NULL,
gen text NOT NULL,
PRIMARY KEY(capture, target))
""".format(self.table_badregion)
c = self.conn.cursor()
c.execute(sql)
self.conn.commit()
def create_bad_sample_table(self):
"""Create a table to hold samples to be exluded from archive."""
sql = """CREATE TABLE IF NOT EXISTS {}
(serie text NOT NULL,
sample text NOT NULL,
code text,
PRIMARY KEY(serie,sample))
""".format(self.table_badsamples)
c = self.conn.cursor()
c.execute(sql)
self.conn.commit()
def create_annot_table(self, dfannot):
"""Create a table from the annotation dataframe."""
sql = """CREATE TABLE IF NOT EXISTS {}annot
(target text NOT NULL,
gen text NOT NULL,
PRIMARY KEY(target, gen))
""".format(self.capture.lower())
c = self.capconn.cursor()
c.execute(sql)
[c.execute("INSERT INTO {}annot VALUES ('{}', '{}')".format(
self.capture, i, dfannot.loc[i]['Gen']))
for i in dfannot.index.unique()]
self.capconn.commit()
return
def create_doc_table(self):
"""Create a table to hold DoC data."""
sql = '''CREATE TABLE IF NOT EXISTS {}
(SAMPLE TEXT NOT NULL,
SERIE TEXT NOT NULL,
DATA TEXT NOT NULL,
PRIMARY KEY(SAMPLE, SERIE))
'''.format(self.capture.lower())
c = self.capconn.cursor()
c.execute(sql)
self.capconn.commit()
return
def add_poscontrols(self, inputfile):
"""Add poscontroles from file to table"""
table = self.table_poscons
c = self.conn.cursor()
with open(inputfile, 'r') as f:
for line in f:
if not line:
continue
cap, gen, sample, soort = line.split()
sql = """INSERT INTO {}
VALUES ('{}', '{}', '{}', '{}')
""".format(table, cap, gen, sample, soort)
try:
c.execute(sql)
except sqlite3.IntegrityError:
pass
self.conn.commit()
def add_badsamples(self, inputfile):
"""Add badsamples from file to table"""
table = self.table_badsamples
c = self.conn.cursor()
with open(inputfile, 'r') as f:
for line in f.read().splitlines():
if not line.strip():
continue
serie, sample, code = line.split()[:3]
sql = """INSERT INTO {}
VALUES ('{}', '{}', '{}')
""".format(table, serie, sample, code)
try:
c.execute(sql)
except sqlite3.IntegrityError:
pass
self.conn.commit()
@staticmethod
def parse_docfile(docfile):
target_coverage = list()
with open(docfile) as f:
fin = csv.reader(f, delimiter='\t')
_ = next(fin)
for line in fin:
target, _total, mean, *_ = line
target = target.replace(':', '_')
target = target.replace('-', '_')
target_coverage.append((target, mean))
return target_coverage
def add_data_to_db(self, sample, serie, data):
sql = """INSERT INTO {}
(SAMPLE, SERIE, DATA)
VALUES ('{}', '{}', '{}')
""".format(self.capture.lower(), sample, serie, json.dumps(data))
c = self.capconn.cursor()
try:
c.execute(sql)
except sqlite3.IntegrityError:
pass
else:
self.capconn.commit()
return
def get_regions_to_exclude(self):
c = self.conn.cursor()
sql = """SELECT DISTINCT fragments
FROM {}
WHERE (capture='{}')
""".format(self.table_badregion, self.capture)
c.execute(sql)
fragments = [val for tup in c.fetchall() for val in tup]
return fragments
def get_bad_samples(self):
"""Get badsample ID's from table and return a list."""
c = self.conn.cursor()
sql = """SELECT DISTINCT sample
FROM {}""".format(self.table_badsamples)
c.execute(sql)
badsamples = [val for tup in c.fetchall() for val in tup]
return badsamples
def get_positive_controls_dict(self):
"""Get positive control info from table and return a dict."""
c = self.conn.cursor()
sql = """SELECT DISTINCT sample, gene
FROM {}
WHERE (capture='{}')
""".format(self.table_poscons, self.capture)
c.execute(sql)
poscons = {sample: gene for (sample, gene) in c.fetchall()}
return poscons
def get_annot(self):
"""Get annotation(target-gene) from table and return a df."""
sql = """SELECT target, gen
FROM {}annot""".format(self.capture.lower())
annot =
|
pd.read_sql(sql, self.capconn)
|
pandas.read_sql
|
import json
import io
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
import pandas as pd
import numpy as np
import plotly.express as px
from dash.dependencies import Output, Input, State
from datetime import datetime, timedelta
from server import app
import plotly.graph_objects as go
import plotly.express as px
from sqlalchemy import create_engine
from flask import send_file
import os
from joblib import Parallel, delayed
from dash.exceptions import PreventUpdate
import time
import re
def discriminated_antis(all_antis):
try:
df_抗菌药物 = pd.read_csv(r'./抗菌药物字典.csv')
except:
df_抗菌药物 = pd.read_csv(r'./抗菌药物字典.csv', encoding='gbk')
def isanti(x):
df_抗菌药物['药品'] = x.抗菌药物
df1 = df_抗菌药物[df_抗菌药物['规则等级']==1]
if x.抗菌药物 in list(df1['匹配规则'].values):
return df1[df1['匹配规则']==x.抗菌药物].reset_index(drop=True).loc[0]['抗菌药物通用名']
else:
df2 = df_抗菌药物[df_抗菌药物['规则等级']==2]
df2['是否匹配'] = df2.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df2['匹配长度'] = df2.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df2[~df2['是否匹配'].isnull()].shape[0]==0:
df3 = df_抗菌药物[df_抗菌药物['规则等级']==3]
df3['是否匹配'] = df3.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df3['匹配长度'] = df3.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df3[~df3['是否匹配'].isnull()].shape[0]==0:
df4 = df_抗菌药物[df_抗菌药物['规则等级']==4]
df4['是否匹配'] = df4.apply(lambda y: y.抗菌药物通用名 if re.match(y.匹配规则, y.药品) else np.nan, axis=1)
df4['匹配长度'] = df4.apply(lambda y: 0 if pd.isnull(y.是否匹配) else len( y.匹配规则 ), axis=1)
if df4[~df4['是否匹配'].isnull()].shape[0]==0:
return np.nan
else:
return df4[~df4['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
else:
return df3[~df3['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
else:
return df2[~df2['是否匹配'].isnull()][['抗菌药物通用名','匹配长度']].drop_duplicates().sort_values(by=['匹配长度'], ascending=False).reset_index(drop=True)['抗菌药物通用名'].loc[0]#返回正则匹配成功且匹配长度最长
all_antis['抗菌药物通用名'] = all_antis.apply(isanti, axis=1)
return all_antis
# ----------------------------------------------------------------------------------------------------- 一级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物-菌检出-药敏一级第一张图数据
def get_first_lev_first_fig_date(engine):
res_数据时间缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
# 问题类别、问题数据量统计、全数据统计
bus_dic = {
'给药': "select '给药' as 业务类型 ,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS where BEGINTIME is not null group by substr(BEGINTIME,1,7)",
'菌检出': " select '菌检出' as 业务类型 , count(1) as num ,substr(REQUESTTIME,1,7) as month from BACTERIA where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) ",
'药敏': " select '药敏' as 业务类型 , count(1) as num ,substr(REQUESTTIME,1,7) as month from DRUGSUSCEPTIBILITY where REQUESTTIME is not null group by substr(REQUESTTIME,1,7) ",
}
for bus in bus_dic:
res_数据时间缺失及汇总 = res_数据时间缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
print('抗菌药物-菌检出-药敏一级图一',bus)
return res_数据时间缺失及汇总
# 更新抗菌药物-菌检出-药敏一级图一
@app.callback(
Output('anti_bar_drug_first_level_first_fig','figure'),
Output('anti_bar_drug_first_level_first_fig_data','data'),
Input('anti_bar_drug_first_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_first_fig(anti_bar_drug_first_level_first_fig_data,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
engine = create_engine(db_con_url['db'])
if anti_bar_drug_first_level_first_fig_data is None:
anti_bar_drug_first_level_first_fig_data = {}
anti_bar_drug_first_level_first_fig = get_first_lev_first_fig_date(engine)
anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'] = anti_bar_drug_first_level_first_fig.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_first_fig_data['btime'] = btime
anti_bar_drug_first_level_first_fig_data['etime'] = etime
anti_bar_drug_first_level_first_fig_data = json.dumps(anti_bar_drug_first_level_first_fig_data)
else:
anti_bar_drug_first_level_first_fig_data = json.loads(anti_bar_drug_first_level_first_fig_data)
if db_con_url['hosname'] != anti_bar_drug_first_level_first_fig_data['hosname']:
anti_bar_drug_first_level_first_fig = get_first_lev_first_fig_date(engine)
anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'] = anti_bar_drug_first_level_first_fig.to_json(orient='split',date_format='iso')
anti_bar_drug_first_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_first_fig_data = json.dumps(anti_bar_drug_first_level_first_fig_data)
else:
anti_bar_drug_first_level_first_fig = pd.read_json(anti_bar_drug_first_level_first_fig_data['anti_bar_drug_first_level_first_fig'], orient='split')
anti_bar_drug_first_level_first_fig_data = dash.no_update
#
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig[(anti_bar_drug_first_level_first_fig['month']>=btime) & (anti_bar_drug_first_level_first_fig['month']<=etime)]
anti_bar_drug_first_level_first_fig = anti_bar_drug_first_level_first_fig.sort_values(['month','业务类型'])
fig1 = px.line(anti_bar_drug_first_level_first_fig, x='month', y='num', color='业务类型',
color_discrete_sequence=px.colors.qualitative.Dark24)
# 设置水平图例及位置
fig1.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
fig1.update_yaxes(title_text="业务数据量")
fig1.update_xaxes(title_text="时间")
return fig1,anti_bar_drug_first_level_first_fig_data
# # ----------------------------------------------------------------------------------------------------- 一级图二 ----------------------------------------------------------------------------------------------------------------------
# # 获取抗菌药物-菌检出-药敏一级第二张图数据
def get_first_lev_second_fig_date(engine,btime,etime):
res_数据关键字缺失及汇总 = pd.DataFrame(columns=['业务类型', '科室', '科室名称', 'num'])
bus_dic = {'8种耐药菌检出': f""" select '8种耐药菌检出' as 业务类型, t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from BACTERIA where BACTERIA in ('大肠埃希菌', '鲍曼不动杆菌', '肺炎克雷伯菌', '金黄色葡萄球菌', '铜绿假单胞菌', '屎肠球菌', '粪肠球菌')
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
""",
"限制级特殊级抗菌药物使用" : f"""select '限制级特殊级抗菌药物使用' as 业务类型,t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from ANTIBIOTICS where ALEVEL in ('限制类', '特殊类')
and substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
""",
'药敏结果为耐药': f""" select '药敏结果为耐药' as 业务类型,t1.dept as 科室,t2.label as 科室名称,t1.num from
(select dept,count(1) as num from DRUGSUSCEPTIBILITY where SUSCEPTIBILITY like '%耐药%'
and substr(REQUESTTIME,1,7)>='{btime}' and substr(REQUESTTIME,1,7)<='{etime}' and dept is not null
group by dept) t1,s_departments t2
where t1.dept=t2.code(+) order by t1.num desc
"""
}
for bus in bus_dic:
temp = pd.read_sql(bus_dic[bus],con=engine)
temp = temp[0:8]
res_数据关键字缺失及汇总 = res_数据关键字缺失及汇总.append(temp)
return res_数据关键字缺失及汇总
# 更新一级图二
@app.callback(
Output('anti_bar_drug_first_level_second_fig','figure'),
Output('anti_bar_drug_first_level_second_fig_data','data'),
# Output('rank_month_choice','min'),
# Output('rank_month_choice','max'),
# Output('rank_month_choice','value'),
# Output('rank_month_choice','marks'),
Input('anti_bar_drug_first_level_second_fig_data','data'),
# Input('rank_month_choice','value'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# Input('rank_month_choice','marks'),
# prevent_initial_call=True
)
# def update_first_level_second_fig(anti_bar_drug_first_level_second_fig_data,rank_month_choice,db_con_url,count_time,marks):
def update_first_level_second_fig(anti_bar_drug_first_level_second_fig_data,db_con_url,count_time):
# def unixTimeMillis(dt):
# return int(time.mktime(dt.timetuple()))
#
# def unixToDatetime(unix):
# return pd.to_datetime(unix, unit='s')
#
# def getMarks(start, end, Nth=100):
# result = {}
# for i, date in enumerate(daterange):
# result[unixTimeMillis(date)] = str(date.strftime('%Y-%m'))
if db_con_url is None :
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
min = dash.no_update
max = dash.no_update
value = dash.no_update
marks = dash.no_update
if anti_bar_drug_first_level_second_fig_data is None:
anti_bar_drug_first_level_second_fig_data = {}
first_level_second_fig_data = get_first_lev_second_fig_date(engine,btime,etime)
anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps(anti_bar_drug_first_level_second_fig_data)
# end_date = datetime(int(etime[0:4]), int(etime[5:7]), 1)
# start_date = datetime(int(btime[0:4]), int(btime[5:7]), 1)
# daterange = pd.date_range(start=btime+'-01', periods=((end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)), freq='M')
# min = unixTimeMillis(daterange.min())
# max = unixTimeMillis(daterange.max())
# value = [unixTimeMillis(daterange.min()), unixTimeMillis(daterange.max())]
# marks = getMarks(daterange.min(), daterange.max())
else:
anti_bar_drug_first_level_second_fig_data = json.loads(anti_bar_drug_first_level_second_fig_data)
if db_con_url['hosname'] != anti_bar_drug_first_level_second_fig_data['hosname']:
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split',date_format='iso')
anti_bar_drug_first_level_second_fig_data['hosname'] = db_con_url['hosname']
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps( anti_bar_drug_first_level_second_fig_data)
# end_date = datetime(int(etime[0:4]), int(etime[5:7]), 1)
# start_date = datetime(int(btime[0:4]), int(btime[5:7]), 1)
# daterange = pd.date_range(start=btime + '-01', periods=( (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)), freq='M')
# min = unixTimeMillis(daterange.min())
# max = unixTimeMillis(daterange.max())
# value = [unixTimeMillis(daterange.min()), unixTimeMillis(daterange.max())]
# print(value)
# marks = getMarks(daterange.min(), daterange.max())
else:
if anti_bar_drug_first_level_second_fig_data['btime'] != btime or anti_bar_drug_first_level_second_fig_data['etime'] != etime:
# if rank_month_choice is not None and len(rank_month_choice)>0:
# print(rank_month_choice)
# btime1 = time.gmtime(rank_month_choice[0])
# etime1 = time.gmtime(rank_month_choice[1])
# btime = f"{btime1.tm_year}-0{btime1.tm_mon}" if btime1.tm_mon<10 else f"{btime1.tm_year}-{btime1.tm_mon}"
# etime = f"{etime1.tm_year}-0{etime1.tm_mon}" if etime1.tm_mon<10 else f"{etime1.tm_year}-{etime1.tm_mon}"
# print(btime,etime)
first_level_second_fig_data = get_first_lev_second_fig_date(engine, btime, etime)
anti_bar_drug_first_level_second_fig_data[ 'first_level_second_fig_data'] = first_level_second_fig_data.to_json(orient='split', date_format='iso')
anti_bar_drug_first_level_second_fig_data['btime'] = btime
anti_bar_drug_first_level_second_fig_data['etime'] = etime
anti_bar_drug_first_level_second_fig_data = json.dumps(anti_bar_drug_first_level_second_fig_data)
else:
first_level_second_fig_data = pd.read_json(anti_bar_drug_first_level_second_fig_data['first_level_second_fig_data'], orient='split')
anti_bar_drug_first_level_second_fig_data = dash.no_update
# print("一级第二张图数据:")
# print(rank_month_choice)
# print(marks)
bar = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='8种耐药菌检出']
anti = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='限制级特殊级抗菌药物使用']
drug = first_level_second_fig_data[first_level_second_fig_data['业务类型']=='药敏结果为耐药']
bar = bar.sort_values(['num'], ascending=True)
anti = anti.sort_values(['num'], ascending=True)
drug = drug.sort_values(['num'], ascending=True)
fig = make_subplots(rows=1,cols=3)
fig.add_trace(
go.Bar(x=anti['num'], y=anti['科室名称'], orientation='h', name='给药', marker_color=px.colors.qualitative.Dark24[0]),
row=1, col=1
)
fig.add_trace(
go.Bar(x=drug['num'], y=drug['科室名称'], orientation='h', name='药敏',
marker_color=px.colors.qualitative.Dark24[1]),
row=1, col=2,
)
fig.add_trace(
go.Bar(x=bar['num'],y=bar['科室名称'],orientation='h',name='菌检出', marker_color=px.colors.qualitative.Dark24[2]),
row=1,col=3
)
# 设置水平图例及位置
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
))
return fig,anti_bar_drug_first_level_second_fig_data
# return fig,anti_bar_drug_first_level_second_fig_data,min ,max ,value ,marks
# # ----------------------------------------------------------------------------------------------------- 二级图一 ----------------------------------------------------------------------------------------------------------------------
# 获取抗菌药物二级第一张图数据
def get_second_lev_first_fig_date(engine,btime,etime):
res_数据科室信息缺失及汇总 = pd.DataFrame(columns=['业务类型', 'num', 'month' ])
bus_dic = {'用药目的': f" select '用药目的缺失' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7) ",
'药物等级': f" select '药物等级缺失' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1 where (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7) ",
'医嘱开始时间大于结束时间': f" select '医嘱开始时间大于结束时间' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1 where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') and BEGINTIME is not null and ENDTIME is not null and BEGINTIME>endtime group by substr(BEGINTIME,1,7) ",
'医嘱时间在出入院时间之外' : f""" select '医嘱时间在出入院时间之外' as 业务类型,count(1) as num ,substr(BEGINTIME,1,7) as month from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}')
group by substr(BEGINTIME,1,7)
""",
}
for bus in bus_dic:
res_数据科室信息缺失及汇总 = res_数据科室信息缺失及汇总.append(pd.read_sql(bus_dic[bus],con=engine))
return res_数据科室信息缺失及汇总
# 更新二级图一
@app.callback(
Output('anti_second_level_first_fig','figure'),
Output('anti_second_level_first_fig_data','data'),
Input('anti_second_level_first_fig_data','data'),
Input("db_con_url", "data"),
Input("count_time", "data"),
# prevent_initial_call=True
)
def update_first_level_third_fig(anti_second_level_first_fig_data,db_con_url,count_time):
if db_con_url is None:
return dash.no_update
else:
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
if anti_second_level_first_fig_data is None:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data={}
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split', date_format='iso')
anti_second_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
anti_second_level_first_fig_data = json.loads(anti_second_level_first_fig_data)
if db_con_url['hosname'] != anti_second_level_first_fig_data['hosname']:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split',date_format='iso')
anti_second_level_first_fig_data['hosname'] = db_con_url['hosname']
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
if anti_second_level_first_fig_data['btime'] != btime or anti_second_level_first_fig_data['etime'] != etime:
anti_second_level_first_fig = get_second_lev_first_fig_date(engine, btime, etime)
anti_second_level_first_fig_data['anti_second_level_first_fig'] = anti_second_level_first_fig.to_json(orient='split',date_format='iso')
anti_second_level_first_fig_data['btime'] = btime
anti_second_level_first_fig_data['etime'] = etime
anti_second_level_first_fig_data = json.dumps(anti_second_level_first_fig_data)
else:
anti_second_level_first_fig = pd.read_json(anti_second_level_first_fig_data['anti_second_level_first_fig'], orient='split')
anti_second_level_first_fig_data = dash.no_update
fig_概览一级_科室映射缺失 = go.Figure()
bus_opts = anti_second_level_first_fig[['业务类型']].drop_duplicates().reset_index(drop=True)
# res_数据科室信息缺失及汇总 = anti_second_level_first_fig.sort_values(['month','业务类型'])
print(anti_second_level_first_fig)
for tem,bus in bus_opts.iterrows():
print(tem,)
print(bus,)
temp = anti_second_level_first_fig[anti_second_level_first_fig['业务类型']==bus['业务类型']]
print(temp)
temp = temp.sort_values(['month'])
if temp.shape[0]>0:
fig_概览一级_科室映射缺失.add_trace(
go.Scatter(x=temp['month'], y=temp['num'], name=bus['业务类型'] ,marker_color=px.colors.qualitative.Dark24[tem] )
)
fig_概览一级_科室映射缺失.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
)
)
fig_概览一级_科室映射缺失.update_yaxes(title_text="问题数量")
fig_概览一级_科室映射缺失.update_xaxes(title_text="月份")
return fig_概览一级_科室映射缺失,anti_second_level_first_fig_data
# 下载二级图一明细
@app.callback(
Output('anti_second_level_first_fig_date_detail', 'data'),
Input('anti_second_level_first_fig_data_detail_down','n_clicks'),
Input("db_con_url", "data"),
Input("count_time", "data"),
prevent_initial_call=True,
)
def download_first_level_third_fig_data_detail(n_clicks,db_con_url,count_time):
if db_con_url is None :
return dash.no_update
else:
if n_clicks is not None and n_clicks>0:
n_clicks = 0
db_con_url = json.loads(db_con_url)
count_time = json.loads(count_time)
engine = create_engine(db_con_url['db'])
btime = count_time['btime'][0:7]
etime = count_time['etime'][0:7]
bus_dic = {
'用药目的缺失': f" select * from ANTIBIOTICS where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') ",
'药物等级缺失': f" select t1.* from ANTIBIOTICS t1 where (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') ",
'医嘱开始时间大于结束时间': f" select t1.* from ANTIBIOTICS t1 where (substr(BEGINTIME,1,7)>='{btime}' and substr(BEGINTIME,1,7)<='{etime}') and BEGINTIME is not null and ENDTIME is not null and BEGINTIME>endtime ",
'医嘱时间在出入院时间之外': f""" select t1.* from ANTIBIOTICS t1,overall t2 where
( t1.BEGINTIME is not null and t1.ENDTIME is not null and t2.in_time is not null and t2.out_time is not null)
and t1.caseid = t2.caseid
and (t1.BEGINTIME<t2.IN_TIME or t1.BEGINTIME > t2.OUT_TIME or t1.ENDTIME<t2.IN_TIME or t1.ENDTIME > t2.OUT_TIME )
and (substr(t1.BEGINTIME,1,7)>='{btime}' and substr(t1.BEGINTIME,1,7)<='{etime}') group by substr(BEGINTIME,1,7)
""",
}
output = io.BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
for key in bus_dic.keys():
try:
temp =
|
pd.read_sql(bus_dic[key], con=engine)
|
pandas.read_sql
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
from difflib import SequenceMatcher
desired_width = 320
pd.set_option('display.width', desired_width)
|
pd.set_option('display.max_columns', 10)
|
pandas.set_option
|
from flask import Flask, request, jsonify
import pandas as pd
import pickle
app = Flask(__name__)
pkl_model = pickle.load(open('model.pkl', 'rb'))
@app.route('/predict',methods=['POST'])
def predict():
data = pd.DataFrame(request.json)
index = data.index
prediction = pkl_model.predict(data)
return
|
pd.Series(prediction,name='target',index=index)
|
pandas.Series
|
import json
from datetime import date
import boto3
import pandas as pd
from pandas.io.json import json_normalize
#date = date.today()
def aggregate(date):
s3_client = boto3.client('s3')
merged = pd.DataFrame()
object_list = []
s3_objects = s3_client.list_objects_v2(Bucket='sdd-s3-basebucket',
Prefix='airquality/{}/{}/{}/'.format(str(date.year).zfill(4),
str(date.month).zfill(2),
str(date.day).zfill(2)))
if 'Contents' not in s3_objects:
return []
print("Found " + str(len(s3_objects['Contents'])) + " elements")
for key in s3_objects['Contents']:
airqualityObject = s3_client.get_object(Bucket='sdd-s3-basebucket', Key=key['Key'])
object_body = str(airqualityObject["Body"].read(), 'utf-8')
airquality_json = json_normalize(json.loads(object_body))
object_list.append(pd.DataFrame(airquality_json))
merged = pd.concat(object_list)
merged['airquality.aqi'] =
|
pd.to_numeric(merged['airquality.aqi'], errors='coerce')
|
pandas.to_numeric
|
import pandas as pd
import pytest
from rdtools.normalization import normalize_with_expected_power
from pandas import Timestamp
import numpy as np
@pytest.fixture()
def times_15():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='15T')
@pytest.fixture()
def times_30():
return pd.date_range(start='20200101 12:00', end='20200101 13:00', freq='30T')
@pytest.fixture()
def pv_15(times_15):
return pd.Series([1.0, 2.5, 3.0, 2.2, 2.1], index=times_15)
@pytest.fixture()
def expected_15(times_15):
return pd.Series([1.2, 2.3, 2.8, 2.1, 2.0], index=times_15)
@pytest.fixture()
def irradiance_15(times_15):
return pd.Series([1000.0, 850.0, 950.0, 975.0, 890.0], index=times_15)
@pytest.fixture()
def pv_30(times_30):
return pd.Series([1.0, 3.0, 2.1], index=times_30)
@pytest.fixture()
def expected_30(times_30):
return pd.Series([1.2, 2.8, 2.0], index=times_30)
@pytest.fixture()
def irradiance_30(times_30):
return pd.Series([1000.0, 950.0, 890.0], index=times_30)
def test_normalize_with_expected_power_uniform_frequency(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 1.0,
Timestamp('2020-01-01 12:30:00', freq='15T'): 1.0784313725490198,
Timestamp('2020-01-01 12:45:00', freq='15T'): 1.0612244897959184,
Timestamp('2020-01-01 13:00:00', freq='15T'): 1.0487804878048783}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_energy_option(pv_15, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_15, expected_15, irradiance_15, pv_input='energy')
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:00:00', freq='15T'): np.nan,
Timestamp('2020-01-01 12:15:00', freq='15T'): 5.714285714285714,
Timestamp('2020-01-01 12:30:00', freq='15T'): 4.705882352941177,
Timestamp('2020-01-01 12:45:00', freq='15T'): 3.5918367346938775,
Timestamp('2020-01-01 13:00:00', freq='15T'): 4.097560975609756}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '15T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:00:00', freq='15T'): np.nan,
Timestamp('2020-01-01 12:15:00', freq='15T'): 231.25,
Timestamp('2020-01-01 12:30:00', freq='15T'): 225.0,
Timestamp('2020-01-01 12:45:00', freq='15T'): 240.625,
Timestamp('2020-01-01 13:00:00', freq='15T'): 233.125}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '15T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalize_with_expected_power_low_freq_pv(pv_30, expected_15, irradiance_15):
norm, insol = normalize_with_expected_power(
pv_30, expected_15, irradiance_15)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:30:00', freq='30T'): 0.9302325581395349,
Timestamp('2020-01-01 13:00:00', freq='30T'): 1.1333333333333333}
)
expected_norm.name = 'energy_Wh'
expected_norm.index.freq = '30T'
expected_insol = pd.Series(
{Timestamp('2020-01-01 12:30:00', freq='30T'): 456.25,
Timestamp('2020-01-01 13:00:00', freq='30T'): 473.75}
)
expected_insol.name = 'energy_Wh'
expected_insol.index.freq = '30T'
pd.testing.assert_series_equal(norm, expected_norm)
pd.testing.assert_series_equal(insol, expected_insol)
def test_normalized_with_expected_power_low_freq_expected(pv_15, expected_30, irradiance_30):
norm, insol = normalize_with_expected_power(
pv_15, expected_30, irradiance_30)
expected_norm = pd.Series(
{Timestamp('2020-01-01 12:15:00', freq='15T'): 1.09375,
|
Timestamp('2020-01-01 12:30:00', freq='15T')
|
pandas.Timestamp
|
import pandas as pd
import numpy as np
import h5py, os, json, sys, shutil
from uuid import uuid4
from pythologist_image_utilities import map_image_ids
from pythologist_reader.qc import QC
from pythologist import CellDataFrame
"""
These are classes to help deal with cell-level image data
"""
class CellFrameGeneric(object):
"""
A generic CellFrameData object
"""
def __init__(self):
self._processed_image_id = None
self._images = {} # Database of Images
self._id = uuid4().hex
self.frame_name = None
self.data_tables = {
'cells':{'index':'cell_index',
'columns':['x','y','phenotype_index',
'region_index']},
'cell_tags':{'index':'db_id',
'columns':['tag_index','cell_index']},
'cell_measurements':{'index':'measurement_index',
'columns':['cell_index','statistic_index','feature_index','channel_index','value']},
'measurement_features':{'index':'feature_index',
'columns':['feature_label']},
'measurement_channels':{'index':'channel_index',
'columns':['channel_label','channel_abbreviation','image_id']},
'measurement_statistics':{'index':'statistic_index',
'columns':['statistic_label']},
'phenotypes':{'index':'phenotype_index',
'columns':['phenotype_label']},
'segmentation_images':{'index':'db_id',
'columns':['segmentation_label','image_id']},
'regions':{'index':'region_index',
'columns':['region_label','region_size','image_id']},
'cell_interactions':{'index':'db_id',
'columns':['cell_index','neighbor_cell_index','pixel_count','touch_distance']},
'tags':{'index':'tag_index',
'columns':['tag_label']}
}
self._data = {} # Do not acces directly. Use set_data_table and get_data_table to access.
for x in self.data_tables.keys():
self._data[x] = pd.DataFrame(columns=self.data_tables[x]['columns'])
self._data[x].index.name = self.data_tables[x]['index']
@property
def id(self):
"""
Returns the project UUID4
"""
return self._id
@property
def shape(self):
"""
Returns the (tuple) shape of the image (rows,columns)
"""
return self.processed_image.shape
@property
def processed_image_id(self):
"""
Returns (str) id of the frame object
"""
return self._processed_image_id
@property
def processed_image(self):
"""
Returns (numpy.array) of the processed_image
"""
return self._images[self._processed_image_id].copy()
def set_processed_image_id(self,image_id):
"""
Args:
image_id (str): set the id of the frame object
"""
self._processed_image_id = image_id
@property
def table_names(self):
"""
Return a list of data table names
"""
return list(self.data_tables.keys())
def set_data(self,table_name,table):
"""
Set the data table
Args:
table_name (str): the table name
table (pd.DataFrame): the input table
"""
# Assign data to the standard tables. Do some column name checking to make sure we are getting what we expect
if table_name not in self.data_tables: raise ValueError("Error table name doesn't exist in defined formats")
if set(list(table.columns)) != set(self.data_tables[table_name]['columns']): raise ValueError("Error column names don't match defined format\n"+\
str(list(table.columns))+"\n"+\
str(self.data_tables[table_name]['columns']))
if table.index.name != self.data_tables[table_name]['index']: raise ValueError("Error index name doesn't match defined format")
self._data[table_name] = table.loc[:,self.data_tables[table_name]['columns']].copy() # Auto-sort, and assign a copy so we aren't ever assigning by reference
def set_regions(self,regions,use_processed_region=True,unset_label='undefined',verbose=False):
"""
Alter the regions in the frame
Args:
regions (dict): a dictionary of mutually exclusive region labels and binary masks
if a region does not cover all the workable areas then it will be the only label
and the unused area will get the 'unset_label' as a different region
use_processed_region (bool): default True keep the processed region subtracted
unset_label (str): name of unset regions default (undefined)
"""
# delete our current regions
regions = regions.copy()
image_ids = list(self.get_data('mask_images')['image_id'])
image_ids = [x for x in image_ids if x != self.processed_image_id]
for image_id in image_ids: del self._images[image_id]
labels = list(regions.keys())
ids = [uuid4().hex for x in labels]
sizes = [regions[x].sum() for x in labels]
remainder = np.ones(self.processed_image.shape)
if use_processed_region: remainder = self.processed_image
for i,label in enumerate(labels):
my_image = regions[label]
if use_processed_region: my_image = my_image&self.processed_image
self._images[ids[i]] = my_image
remainder = remainder & (~my_image)
if verbose: sys.stderr.write("Remaining areas after setting are "+str(remainder.sum().sum())+"\n")
if remainder.sum().sum() > 0:
labels += [unset_label]
sizes += [remainder.sum().sum()]
ids += [uuid4().hex]
self._images[ids[-1]] = remainder
regions[unset_label] = remainder
regions2 = pd.DataFrame({'region_label':labels,
'region_size':sizes,
'image_id':ids
})
regions2.index.name = 'region_index'
self.set_data('regions',regions2)
def get_label(x,y,regions_dict):
for label in regions_dict:
if regions_dict[label][y][x] == 1: return label
return np.nan
raise ValueError("Coordinate is out of bounds for all regions.")
recode = self.get_data('cells').copy()
recode['new_region_label'] = recode.apply(lambda x: get_label(x['x'],x['y'],regions),1)
## see how many we need to drop because the centroid fall in an unprocessed region
if verbose: sys.stderr.write(str(recode.loc[recode['new_region_label'].isna()].shape[0])+" cells with centroids beyond the processed region are being dropped\n")
recode = recode.loc[~recode['new_region_label'].isna()].copy()
recode = recode.drop(columns='region_index').reset_index().\
merge(regions2[['region_label']].reset_index(),
left_on='new_region_label',right_on='region_label').\
drop(columns=['region_label','new_region_label']).set_index('cell_index')
self.set_data('cells',recode)
return
def get_data(self,table_name):
"""
Get the data table
Args:
table_name (pandas.DataFrame): the table you access by name
"""
return self._data[table_name].copy()
def read_hdf(self,h5file,location=''):
if location != '': location = location.split('/')
else: location = []
f = h5py.File(h5file,'r')
subgroup = f
for x in location:
subgroup = subgroup[x]
table_names = [x for x in subgroup['data']]
for table_name in table_names:
loc = '/'.join(location+['data',table_name])
#print(loc)
self.set_data(table_name,pd.read_hdf(h5file,loc))
# now get images
image_names = [x for x in subgroup['images']]
for image_name in image_names:
self._images[image_name] = np.array(subgroup['images'][image_name])
self.frame_name = subgroup['meta'].attrs['frame_name']
self._id = subgroup['meta'].attrs['id']
self.set_processed_image_id(subgroup['meta'].attrs['processed_image_id'])
return
def to_hdf(self,h5file,location='',mode='w'):
f = h5py.File(h5file,mode)
f.create_group(location+'/data')
f.create_group(location+'/images')
#f.create_group(location+'/meta')
f.close()
for table_name in self.data_tables.keys():
data_table = self.get_data(table_name)
data_table.to_hdf(h5file,
location+'/data/'+table_name,
mode='a',
format='table',
complib='zlib',
complevel=9)
f = h5py.File(h5file,'a')
for image_id in self._images.keys():
f.create_dataset(location+'/images/'+image_id,data=self._images[image_id],compression='gzip',compression_opts=9)
dset = f.create_dataset(location+'/meta', (100,), dtype=h5py.special_dtype(vlen=str))
dset.attrs['frame_name'] = self.frame_name
dset.attrs['processed_image_id'] = self.processed_image_id
dset.attrs['id'] = self._id
f.close()
def cell_map(self):
"""
Return a dataframe of cell ID's and locations
"""
if 'cell_map' not in list(self.get_data('segmentation_images')['segmentation_label']): return None
cmid = self.get_data('segmentation_images').set_index('segmentation_label').loc['cell_map','image_id']
return map_image_ids(self.get_image(cmid)).rename(columns={'id':'cell_index'})
def cell_map_image(self):
"""
Return a the image of cells by ID's
"""
if 'cell_map' not in list(self.get_data('segmentation_images')['segmentation_label']): return None
cmid = self.get_data('segmentation_images').set_index('segmentation_label').loc['cell_map','image_id']
return self.get_image(cmid)
def edge_map(self):
"""
Return a dataframe of cells by ID's of coordinates only on the edge of the cells
"""
if 'edge_map' not in list(self.get_data('segmentation_images')['segmentation_label']): return None
cmid = self.get_data('segmentation_images').set_index('segmentation_label').loc['edge_map','image_id']
return map_image_ids(self.get_image(cmid)).\
rename(columns={'id':'cell_index'})
def edge_map_image(self):
"""
Return an image of edges of integers by ID
"""
if 'edge_map' not in list(self.get_data('segmentation_images')['segmentation_label']): return None
cmid = self.get_data('segmentation_images').set_index('segmentation_label').loc['edge_map','image_id']
return self.get_image(cmid)
def segmentation_info(self):
"""
Return a dataframe with info about segmentation like cell areas and circumferences
"""
# handle the case where there is no edge data
if self.edge_map() is None:
return pd.DataFrame(index=self.get_data('cells').index,columns=['edge_pixels','area_pixels'])
return self.edge_map().reset_index().groupby(['cell_index']).count()[['x']].rename(columns={'x':'edge_pixels'}).\
merge(self.cell_map().reset_index().groupby(['cell_index']).count()[['x']].rename(columns={'x':'area_pixels'}),
left_index=True,
right_index=True).reset_index().set_index('cell_index')
def interaction_map(self):
"""
Returns:
pandas.DataFrame: return a dataframe of which cells are in contact with one another
"""
return self.get_data('cell_interactions')
def set_interaction_map(self,touch_distance=1):
"""
Measure the cell-cell contact interactions
Args:
touch_distance (int): optional default is 1 distance to look away from a cell for another cell
"""
full = self.cell_map()
edge = self.edge_map()
if full is None or edge is None: return None
d1 = edge.reset_index()
d1['key'] = 1
d2 = pd.DataFrame({'mod':[-1*touch_distance,0,touch_distance]})
d2['key'] = 1
d3 = d1.merge(d2,on='key').merge(d2,on='key')
d3['x'] = d3['x'].add(d3['mod_x'])
d3['y'] = d3['y'].add(d3['mod_y'])
d3 = d3[['x','y','cell_index','key']].rename(columns={'cell_index':'neighbor_cell_index'})
im = full.reset_index().merge(d3,on=['x','y']).\
query('cell_index!=neighbor_cell_index').\
drop_duplicates().groupby(['cell_index','neighbor_cell_index']).count()[['key']].reset_index().\
rename(columns={'key':'pixel_count'})
im['touch_distance'] = touch_distance
im.index.name='db_id'
self.set_data('cell_interactions',im)
@property
def thresholds(self):
raise ValueError('Override this to use it.')
def get_channels(self,all=False):
"""
Return a dataframe of the Channels
Args:
all (bool): default False if all is set to true will also include excluded channels (like autofluoresence)
Returns:
pandas.DataFrame: channel information
"""
if all: return self.get_data('measurement_channels')
d = self.get_data('measurement_channels')
return d.loc[~d['channel_label'].isin(self.excluded_channels)]
def get_regions(self):
return self.get_data('regions')
def get_labeled_raw(self,feature_label,statistic_label,all=False,channel_abbreviation=True):
"""
Like get raw but add frame labels
"""
df = self.get_raw(feature_label,statistic_label,all=all,channel_abbreviation=channel_abbreviation).reset_index()
df['frame_name'] = self.frame_name
df['frame_id'] = self.id
return df.set_index(['frame_name','frame_id','cell_index'])
def get_raw(self,feature_label,statistic_label,all=False,channel_abbreviation=True):
"""
Get the raw data
Args:
feature_label (str): name of the feature
statistic_label (str): name of the statistic to extract
all (bool): default False if True put out everything including excluded channels
channel_abbreviation (bool): default True means use the abbreivations if available
Returns:
pandas.DataFrame: the dataframe
"""
stats = self.get_data('measurement_statistics').reset_index()
stats = stats.loc[stats['statistic_label']==statistic_label,'statistic_index'].iloc[0]
feat = self.get_data('measurement_features').reset_index()
feat = feat.loc[feat['feature_label']==feature_label,'feature_index'].iloc[0]
#region = self.get_data('regions').reset_index()
#region = region.loc[region['region_label']==region_label,'region_index'].iloc[0]
measure = self.get_data('cell_measurements')
measure = measure.loc[(measure['statistic_index']==stats)&(measure['feature_index']==feat)]
channels = self.get_data('measurement_channels')
if not all: channels = channels.loc[~channels['channel_label'].isin(self.excluded_channels)]
measure = measure.merge(channels,left_on='channel_index',right_index=True)
measure = measure.reset_index().pivot(index='cell_index',columns='channel_label',values='value')
if not channel_abbreviation: return measure
temp = dict(zip(self.get_data('measurement_channels')['channel_label'],
self.get_data('measurement_channels')['channel_abbreviation']))
return measure.rename(columns=temp)
def default_raw(self):
# override this
return None
def copy(self):
mytype = type(self)
them = mytype()
for x in self.data_tables.keys():
them._data[x] = self._data[x].copy()
return them
@property
def excluded_channels(self):
raise ValueError("Must be overridden")
def binary_calls(self):
"""
Return all the binary feature calls (alias)
"""
return phenotype_calls()
def phenotype_calls(self):
"""
Return all the binary feature calls
"""
phenotypes = self.get_data('phenotypes')['phenotype_label'].dropna().tolist()
temp = pd.DataFrame(index=self.get_data('cells').index,columns=phenotypes)
temp = temp.fillna(0)
temp = temp.merge(self.cell_df()[['phenotype_label']],left_index=True,right_index=True)
for phenotype in phenotypes:
temp.loc[temp['phenotype_label']==phenotype,phenotype]=1
return temp.drop(columns='phenotype_label').astype(np.int8)
def scored_calls(self):
# Must be overridden
return None
@property
def cdf(self):
"""
Return the pythologist.CellDataFrame of the frame
"""
# get our region sizes
region_count = self.get_data('regions').groupby('region_label').count()['region_size']
if region_count[region_count>1].shape[0]>0: raise ValueError("duplicate region labels not supported") # add a saftey check
region_sizes = self.get_data('regions').set_index('region_label')['region_size'].astype(int).to_dict()
# get our cells
temp1 = self.get_data('cells').drop(columns='phenotype_index').\
merge(self.get_data('regions'),
left_on='region_index',
right_index=True).drop(columns=['image_id','region_index','region_size'])
temp1['regions'] = temp1.apply(lambda x: region_sizes,1)
temp2 = self.scored_calls()
if temp2 is not None:
temp2 = temp2.apply(lambda x:
dict(zip(
list(x.index),
list(x)
))
,1).reset_index().rename(columns={0:'scored_calls'}).set_index('cell_index')
temp1 = temp1.merge(temp2,left_index=True,right_index=True)
else:
temp1['scored_calls'] = temp1.apply(lambda x: {},1)
temp3 = self.phenotype_calls().apply(lambda x:
dict(zip(
list(x.index),
list(x)
))
,1).reset_index().rename(columns={0:'phenotype_calls'}).set_index('cell_index')
temp1 = temp1.merge(temp3,left_index=True,right_index=True)
#temp1['phenotypes_present'] = json.dumps(list(
# sorted([x for x in self.get_data('phenotypes')['phenotype_label'] if x is not np.nan])
# ))
temp4 = None
# extract default values only if we have whole cell
#if "Whole Cell" in self.get_data('measurement_features')['feature_label'].tolist():
temp4 = self.default_raw()
if temp4 is not None:
temp4 = temp4.apply(lambda x:
dict(zip(
list(x.index),
list(x)
))
,1).reset_index().rename(columns={0:'channel_values'}).set_index('cell_index')
temp1 = temp1.merge(temp4,left_index=True,right_index=True)
else:
temp1['channel_values'] = np.nan
#temp5 = self.interaction_map().groupby('cell_index').\
# apply(lambda x: json.dumps(list(sorted(x['neighbor_cell_index'])))).reset_index().\
# rename(columns={0:'neighbor_cell_index'}).set_index('cell_index')
# Get neighbor data .. may not be available for all cells
# Set a default of a null frame and only try and set if there are some neighbors present
neighbors = pd.DataFrame(index=self.get_data('cells').index,columns=['neighbors'])
if self.interaction_map().shape[0] > 0:
neighbors = self.interaction_map().groupby('cell_index').\
apply(lambda x:
dict(zip(
x['neighbor_cell_index'].astype(int),x['pixel_count'].astype(int)
))
).reset_index().rename(columns={0:'neighbors'}).set_index('cell_index')
# only do edges if we have them by setting a null value for default
edge_length = pd.DataFrame(index=self.get_data('cells').index,columns=['edge_length'])
if self.edge_map() is not None:
edge_length = self.edge_map().reset_index().groupby('cell_index').count()[['x']].\
rename(columns={'x':'edge_length'})
edge_length['edge_length'] = edge_length['edge_length'].astype(int)
cell_area = pd.DataFrame(index=self.get_data('cells').index,columns=['cell_area'])
if self.cell_map() is not None:
cell_area = self.cell_map().reset_index().groupby('cell_index').count()[['x']].\
rename(columns={'x':'cell_area'})
cell_area['cell_area'] = cell_area['cell_area'].astype(int)
temp5 = cell_area.merge(edge_length,left_index=True,right_index=True).merge(neighbors,left_index=True,right_index=True,how='left')
temp5.loc[temp5['neighbors'].isna(),'neighbors'] = temp5.loc[temp5['neighbors'].isna(),'neighbors'].apply(lambda x: {}) # these are ones we actuall have measured
temp1 = temp1.merge(temp5,left_index=True,right_index=True,how='left')
temp1.loc[temp1['neighbors'].isna(),'neighbors'] = np.nan # These we were not able to measure
temp1['frame_name'] = self.frame_name
temp1['frame_id'] = self.id
temp1 = temp1.reset_index()
temp1 = temp1.sort_values('cell_index').reset_index(drop=True)
temp1['sample_name'] = 'undefined'
temp1['project_name'] = 'undefined'
temp1['sample_id'] = 'undefined'
temp1['project_id'] = 'undefined'
def _get_phenotype(d):
if d!=d: return np.nan # set to null if there is nothing in phenotype calls
vals = [k for k,v in d.items() if v == 1]
return np.nan if len(vals) == 0 else vals[0]
temp1['phenotype_label'] = temp1.apply(lambda x:
_get_phenotype(x['phenotype_calls'])
,1)
# Let's tack on the image shape
temp1['frame_shape'] = temp1.apply(lambda x: self.shape,1)
return CellDataFrame(temp1)
def binary_df(self):
temp1 = self.phenotype_calls().stack().reset_index().\
rename(columns={'level_1':'binary_phenotype',0:'score'})
temp1.loc[temp1['score']==1,'score'] = '+'
temp1.loc[temp1['score']==0,'score'] = '-'
temp1['gated'] = 0
temp1.index.name = 'db_id'
return temp1
def cell_df(self):
celldf = self.get_data('cells').\
merge(self.get_data('regions').rename(columns={'image_id':'region_image_id'}),
left_on='region_index',
right_index=True).\
merge(self.get_data('phenotypes'),left_on='phenotype_index',right_index=True).\
merge(self.segmentation_info(),left_index=True,right_index=True,how='left')
return celldf.drop(columns=['phenotype_index','region_index'])
def complete_df(self):
# a dataframe for every cell that has everything
return
def get_image(self,image_id):
"""
Args:
image_id (str): get the image by this id
Returns:
numpy.array: an image representing a 2d array
"""
return self._images[image_id].copy()
class CellSampleGeneric(object):
def __init__(self):
self._frames = {}
self._key = None
self._id = uuid4().hex
self.sample_name = np.nan
return
@property
def id(self):
"""
Return the UUID4 str
"""
return self._id
def create_cell_frame_class(self):
return CellFrameGeneric()
@property
def frame_ids(self):
"""
Return the list of frame IDs
"""
return sorted(list(self._frames.keys()))
@property
def key(self):
"""
Return a pandas.DataFrame of info about the sample
"""
return self._key
def get_frame(self,frame_id):
"""
Args:
frame_id (str): the ID of the frame you want to access
Returns:
CellFrameGeneric: the cell frame
"""
return self._frames[frame_id]
@property
def cdf(self):
"""
Return the pythologist.CellDataFrame of the sample
"""
output = []
for frame_id in self.frame_ids:
temp = self.get_frame(frame_id).cdf
temp['sample_name'] = self.sample_name
temp['sample_id'] = self.id
output.append(temp)
output = pd.concat(output).reset_index(drop=True)
output.index.name = 'db_id'
output['project_name'] = 'undefined'
output['project_id'] = 'undefined'
return CellDataFrame(pd.DataFrame(output))
def to_hdf(self,h5file,location='',mode='w'):
#print(mode)
f = h5py.File(h5file,mode)
#f.create_group(location+'/meta')
#f.create_dataset(location+'/meta/id',data=self.id)
#f.create_dataset(location+'/meta/sample_name',data=self.sample_name)
if location+'/meta' in f:
del f[location+'/meta']
dset = f.create_dataset(location+'/meta', (100,), dtype=h5py.special_dtype(vlen=str))
dset.attrs['sample_name'] = self.sample_name
dset.attrs['id'] = self._id
if location+'/frames' in f:
del f[location+'/frames']
f.create_group(location+'/frames')
f.close()
for frame_id in self.frame_ids:
frame = self._frames[frame_id]
frame.to_hdf(h5file,
location+'/frames/'+frame_id,
mode='a')
self._key.to_hdf(h5file,location+'/info',mode='r+',format='table',complib='zlib',complevel=9)
def read_hdf(self,h5file,location=''):
if location != '': location = location.split('/')
else: location = []
f = h5py.File(h5file,'r')
subgroup = f
for x in location:
subgroup = subgroup[x]
self._id = subgroup['meta'].attrs['id']
self.sample_name = subgroup['meta'].attrs['sample_name']
frame_ids = [x for x in subgroup['frames']]
for frame_id in frame_ids:
cellframe = self.create_cell_frame_class()
loc = '/'.join(location+['frames',frame_id])
#print(loc)
cellframe.read_hdf(h5file,location=loc)
self._frames[frame_id] = cellframe
#self.frame_name = str(subgroup['frames'][frame_id]['meta']['frame_name'])
#self._id = str(subgroup['frames'][frame_id]['meta']['id'])
loc = '/'.join(location+['info'])
#print(loc)
self._key = pd.read_hdf(h5file,loc)
f.close()
return
def cell_df(self):
frames = []
for frame_id in self.frame_ids:
frame = self.get_frame(frame_id).cell_df().reset_index()
key_line = self.key.set_index('frame_id').loc[[frame_id]].reset_index()
key_line['key'] = 1
frame['key'] = 1
frame = key_line.merge(frame,on='key').drop(columns = 'key')
frames.append(frame)
frames =
|
pd.concat(frames)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
# In[22]:
import numpy as np
import pandas as pd
from scipy.stats import mannwhitneyu, ttest_ind
from statsmodels.stats.multitest import multipletests
import matplotlib.pyplot as plt
import seaborn as sns
import random
import os
from functools import reduce
sns.set(rc={'figure.figsize':(25,20)})
plt.rcParams.update({'font.size': 22})
plt.rcParams['xtick.labelsize'] = 15
plt.rcParams['ytick.labelsize'] = 15
pd.set_option('display.max_columns',None)
pd.set_option('display.max_colwidth', None)
random.seed = 0
np.random.seed = 0
# helper functions:
def parse_df_from_tree_on(data_dir,on,use_dir_as_on= True):
dict_df_sp = {}
for sp in os.listdir(data_dir):
sp_path = os.path.join(data_dir, sp)
dict_df_sp[sp] = reduce(
lambda df1, df2:
|
pd.merge(df1, df2, on=on)
|
pandas.merge
|
import luigi
import os
import pandas as pd
from db import extract
from db import sql
from forecast import util
import shutil
import luigi.contrib.hadoop
from sqlalchemy import create_engine
from pysandag.database import get_connection_string
from pysandag import database
from db import log
class EmpPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return None
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
# db_run_id = log.new_run(name='emp_run_log', run_id=db_run_id['max'].iloc[0])
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
dem_sim_rates = extract.create_df('dem_sim_rates', 'dem_sim_rates_table',
rate_id=self.dem_id, index=None)
dem_sim_rates.to_hdf('temp/data.h5', 'dem_sim_rates', mode='a')
econ_sim_rates = extract.create_df('econ_sim_rates', 'econ_sim_rates_table',
rate_id=self.econ_id, index=None)
econ_sim_rates.to_hdf('temp/data.h5', 'econ_sim_rates', mode='a')
in_query = getattr(sql, 'inc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop = pop.join(pop_mil)
pop['persons'] = (pop['persons'] - pop['mil_mildep'])
pop = pop.reset_index(drop=False)
pop['age_cat'] = ''
pop.loc[pop['age'].isin(list(range(0, 5))), ['age_cat']] = '00_04'
pop.loc[pop['age'].isin(list(range(5, 10))), ['age_cat']] = '05_09'
pop.loc[pop['age'].isin(list(range(10, 15))), ['age_cat']] = '10_14'
pop.loc[pop['age'].isin(list(range(15, 18))), ['age_cat']] = '15_17'
pop.loc[pop['age'].isin(list(range(18, 20))), ['age_cat']] = '18_19'
pop.loc[pop['age'].isin(list(range(20, 21))), ['age_cat']] = '20_20'
pop.loc[pop['age'].isin(list(range(21, 22))), ['age_cat']] = '21_21'
pop.loc[pop['age'].isin(list(range(22, 25))), ['age_cat']] = '22_24'
pop.loc[pop['age'].isin(list(range(25, 30))), ['age_cat']] = '25_29'
pop.loc[pop['age'].isin(list(range(30, 35))), ['age_cat']] = '30_34'
pop.loc[pop['age'].isin(list(range(35, 40))), ['age_cat']] = '35_39'
pop.loc[pop['age'].isin(list(range(40, 45))), ['age_cat']] = '40_44'
pop.loc[pop['age'].isin(list(range(45, 50))), ['age_cat']] = '45_49'
pop.loc[pop['age'].isin(list(range(50, 55))), ['age_cat']] = '50_54'
pop.loc[pop['age'].isin(list(range(55, 60))), ['age_cat']] = '55_59'
pop.loc[pop['age'].isin(list(range(60, 62))), ['age_cat']] = '60_61'
pop.loc[pop['age'].isin(list(range(62, 65))), ['age_cat']] = '62_64'
pop.loc[pop['age'].isin(list(range(65, 67))), ['age_cat']] = '65_66'
pop.loc[pop['age'].isin(list(range(67, 70))), ['age_cat']] = '67_69'
pop.loc[pop['age'].isin(list(range(70, 75))), ['age_cat']] = '70_74'
pop.loc[pop['age'].isin(list(range(75, 80))), ['age_cat']] = '75_79'
pop.loc[pop['age'].isin(list(range(80, 85))), ['age_cat']] = '80_84'
pop.loc[pop['age'].isin(list(range(85, 103))), ['age_cat']] = '85_99'
pop = pd.DataFrame(pop['persons'].groupby([pop['yr'], pop['age_cat'], pop['sex'], pop['race_ethn']]).sum())
pop.to_hdf('temp/data.h5', 'pop', mode='a')
class MilPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return EmpPopulation(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
dem_sim_rates = pd.read_hdf('temp/data.h5', 'dem_sim_rates')
in_query = getattr(sql, 'inc_mil_gc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex'])
pop_mil = pop_mil.loc[pop_mil['mildep'] == 'Y']
pop = pop.join(pop_mil)
pop.rename(columns={'persons': 'mil_gc_pop'}, inplace=True)
pop.rename(columns={'mil_mildep': 'mil_hh_pop'}, inplace=True)
pop = pop.reset_index(drop=False)
pop = pd.DataFrame(pop[['mil_gc_pop', 'mil_hh_pop']].groupby([pop['yr']]).sum())
pop.to_hdf('temp/data.h5', 'mil_pop', mode='a')
class LaborForceParticipationRates(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return EmpPopulation(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
lfpr = extract.create_df('lfp_rates', 'lfp_rates_table', rate_id=econ_sim_rates.lfpr_id[0], index=['yr', 'age_cat', 'sex', 'race_ethn'])
lfpr.to_hdf('temp/data.h5', 'lfpr', mode='a')
class LaborForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return LaborForceParticipationRates(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
pop = pd.read_hdf('temp/data.h5', 'pop')
lfpr = pd.read_hdf('temp/data.h5', 'lfpr')
labor_force = pop.join(lfpr)
labor_force['labor_force'] = (labor_force['persons'] * labor_force['lfpr']).round()
labor_force = labor_force.iloc[~labor_force.index.get_level_values('age_cat').isin(['00_04', '05_09', '10_14'])]
labor_force.to_hdf('temp/data.h5', 'labor_force', mode='a')
class CohortUrRate(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return LaborForce(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
cohort_ur = extract.create_df('cohort_ur', 'cohort_ur_table', rate_id=econ_sim_rates.ur1_id[0], index=['yr', 'age_cat', 'sex', 'race_ethn'])
cohort_ur.to_hdf('temp/data.h5', 'cohort_ur', mode='a')
yearly_ur = extract.create_df('yearly_ur', 'yearly_ur_table', rate_id=econ_sim_rates.ur2_id[0], index=['yr'])
yearly_ur.to_hdf('temp/data.h5', 'yearly_ur', mode='a')
class WorkForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return CohortUrRate(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
labor_force = pd.read_hdf('temp/data.h5', 'labor_force')
cohort_ur = pd.read_hdf('temp/data.h5', 'cohort_ur')
yearly_ur = pd.read_hdf('temp/data.h5', 'yearly_ur')
work_force = labor_force.join(cohort_ur)
work_force['unemployed'] = (work_force['labor_force'] * work_force['ur2']).round()
computed_ur = work_force.reset_index(drop=False)
computed_ur = pd.DataFrame(computed_ur[['labor_force', 'unemployed']].groupby([computed_ur['yr']]).sum())
computed_ur['computed_ur'] = (computed_ur['unemployed'] / computed_ur['labor_force'])
computed_ur = computed_ur.join(yearly_ur)
computed_ur['adjustment'] = (computed_ur['ur1'] / computed_ur['computed_ur'])
work_force = work_force.join(computed_ur['adjustment'])
work_force['unemployed'] = (work_force['unemployed'] * work_force['adjustment']).round()
work_force['work_force'] = (work_force['labor_force'] - work_force['unemployed'])
work_force.to_hdf('temp/data.h5', 'work_force', mode='a')
# Code to check if after adjustment ur matches target
'''
computed_ur = work_force.reset_index(drop=False)
computed_ur = pd.DataFrame(computed_ur[['labor_force', 'unemployed']].groupby([computed_ur['yr']]).sum())
computed_ur['computed_ur'] = (computed_ur['unemployed'] / computed_ur['labor_force'])
print computed_ur
'''
class LocalWorkForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return WorkForce(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
out_commuting = extract.create_df('out_commuting', 'out_commuting_table', rate_id=econ_sim_rates.oc_id[0], index=['yr'])
work_force =
|
pd.read_hdf('temp/data.h5', 'work_force')
|
pandas.read_hdf
|
#!/usr/local/bin/python3
__author__ = "<NAME>"
import pandas as pd
import numpy as np
from normalize_comments import *
COMMENT_WORDS_THRESHOLD = 4
CONSTRUCTIVENESS_SCORE_THRESHOLD = 0.6
class ConstructivenessDataCollector:
'''
A class to collect training and test data for constructiveness
from different resources
'''
def __init__(self):
'''
'''
# initialize a dataframe for the training data
self.training_df = pd.DataFrame(columns=['comment_text', 'constructive', 'source'])
self.test_df = pd.DataFrame(columns=['comment_counter', 'comment_text', 'constructive'])
self.training_df_normalized = None
self.test_df_normalized = None
def get_positive_examples(self):
'''
:return:
'''
positive_df = self.training_df[self.training_df['constructive'] == 1]
return positive_df
def get_negative_examples(self):
'''
:return:
'''
negative_df = self.training_df[self.training_df['constructive'] == 0]
return negative_df
def normalize_comment_text(self, column = 'comment_text', mode = 'train'):
#start = timer()
#print('Start time: ', start)
if mode.startswith('train'):
df = self.training_df
else:
df = self.test_df
df_processed = parallelize(df, run_normalize)
#end = timer()
#print('Total time taken: ', end - start)
if mode.startswith('train'):
self.training_df_normalized = df_processed
else:
self.test_df_normalized = df_processed
def collect_training_data_from_CSV(self, data_csv, frac = 1.0, source = 'SOCC',
cols_dict={'constructive': 'constructive',
'comment_text': 'comment_text',
'comment_word_count': 'commentWordCount'}):
'''
:param data_csv:
:param frac:
:param cols_dict:
:return:
'''
df = pd.read_csv(data_csv, skipinitialspace=True)
df = df.sample(frac = frac)
if not cols_dict['comment_word_count'] in df:
df[cols_dict['comment_word_count']] = df[cols_dict['comment_text']].apply(lambda x: len(x.split()))
df.rename(columns={cols_dict['comment_text']: 'comment_text',
cols_dict['comment_word_count']:'comment_word_count',
cols_dict['constructive']: 'constructive'}, inplace = True)
df['source'] = source
# Select comments selected by NYT moderators as NYT pick and where the length
# of the comment is > COMMENT_WORDS_THRESHOLD
df['constructive'] = df['constructive'].apply(lambda x: 1 if x > CONSTRUCTIVENESS_SCORE_THRESHOLD else 0)
self.training_df = pd.concat([self.training_df, df[['comment_text', 'constructive', 'source',
'crowd_toxicity_level',
'constructive_characteristics',
'non_constructive_characteristics',
'toxicity_characteristics']]])
self.normalize_comment_text(mode='train')
#self.write_csv(output_csv)
def collect_positive_examples(self, positive_examples_csv, frac = 1.0, source = 'NYTPicks',
cols_dict = {'constructive':'editorsSelection',
'comment_text':'commentBody',
'comment_word_count': 'commentWordCount'}):
'''
:param positive_examples_csv:
:param frac:
:param cols_dict:
:return:
'''
df = pd.read_csv(positive_examples_csv, skipinitialspace=True)
df = df.sample(frac = frac)
if not cols_dict['comment_word_count'] in df:
df[cols_dict['comment_word_count']] = df[cols_dict['comment_text']].apply(lambda x: len(x.split()))
df.rename(columns={cols_dict['comment_text']: 'comment_text',
cols_dict['comment_word_count']:'comment_word_count',
cols_dict['constructive']: 'constructive'}, inplace = True)
df['source'] = source
df['crowd_toxicity_level'] = np.NaN
df['constructive_characteristics'] = np.NaN
df['non_constructive_characteristics'] = np.NaN
df['toxicity_characteristics'] = np.NaN
# Select comments selected by NYT moderators as NYT pick and where the length
# of the comment is > COMMENT_WORDS_THRESHOLD
positive_df = df[
(df['constructive'] == 1) & (df['comment_word_count'] > COMMENT_WORDS_THRESHOLD)]
self.training_df = pd.concat([self.training_df, positive_df[['comment_text', 'constructive', 'source',
'crowd_toxicity_level',
'constructive_characteristics',
'non_constructive_characteristics',
'toxicity_characteristics'
]]])
self.normalize_comment_text(mode='train')
return positive_df
def collect_negative_examples(self, negative_examples_csv, frac = 1.0, source = 'YNACC',
cols_dict = {'comment_text': 'text',
'constructive': 'constructiveclass'}):
'''
:param negative_examples_csv:
:param frac:
:param cols_dict:
:return:
'''
if negative_examples_csv.endswith('tsv'):
df =
|
pd.read_csv(negative_examples_csv, sep='\t')
|
pandas.read_csv
|
import warnings
from functools import lru_cache
import networkx as nx
import numpy as np
import pandas as pd
from kgextension.endpoints import DBpedia
from kgextension.sparql_helper import endpoint_wrapper
from kgextension.utilities import link_validator
from kgextension.uri_helper import uri_querier
def get_result_df(df, result_type, prefix, merged_df, column):
"""Helper function for unqualified and qualified relation generator. It
helps to create the result dataframe and reduce the duplicated codes from
the two main functions.
Arguments:
df (pd.DataFrame):The result dataframe dummies.
result_type (str): The type of result chosen from boolean, count,
relative count or tf-idf.
prefix (str): Prefix set automatically by the generator.
merged_df (pd.DataFrame): The original dataframe inputed by users.
column (str): Name of the attribute containing entities that should
be found.
Returns:
pd.DataFrame: The final dataframe.
"""
# group values based on different tasks
finaldf = []
if result_type == "boolean":
finaldf = df.groupby("value").any().iloc[:,1:]
elif result_type in ["count", "relative", "tfidf"]:
finaldf = df.groupby("value").sum()
# If result_type is "relative" or "tfidf", calculate the relative counts per row
if result_type in ["relative", "tfidf"]:
# Calculate the relative counts by dividing each row by its sum, fillna(0) to replace missings created by division by zero (when sum=0)
finaldf_relative = finaldf.copy()
finaldf_relative = finaldf.div(finaldf.sum(axis=1), axis=0).fillna(0)
# If result_type is "tfidf", use the table of relative counts to create the table of tfidf-values
if result_type == "tfidf":
# Calculate idf values
N = len(merged_df)
nt = finaldf[finaldf >= 1].count(axis=0)
idf = np.log(N/nt).replace(np.inf, 0)
# Multiply relative counts with idf values
finaldf_relative = finaldf_relative.multiply(idf, axis="columns")
finaldf = finaldf_relative.copy()
else:
raise AttributeError('Wrong result_type, try "boolean", "count", "relative" or "tfidf".')
# add column prefix
finaldf = finaldf.add_prefix(prefix)
# adjust type
type_ = "int64" if result_type == "count" else ("float" if result_type in ["relative", "tfidf"] else "bool")
finaldf = finaldf.astype(type_)
final_col = finaldf.columns
# merge with original df; then find and replace all NaN to 0/False
finaldf = pd.merge(
merged_df, finaldf, left_on=column, right_on="value", how="outer")
if result_type == "boolean":
finaldf[final_col] = finaldf[final_col].fillna(False)
else:
finaldf[final_col] = finaldf[final_col].fillna(0)
return finaldf
def hierarchy_query_creator(
col, hierarchy_relation, max_hierarchy_depth, uri_data_model):
"""Creates a Sparql query to retrieve the hierarchy of classes/categories.
Args:
col (pd.Series): pd.Series containing the URIs.
hierarchy_relation (str): A hierarchy relation, e.g.
http://www.w3.org/2004/02/skos/core#broader.
max_hierarchy_depth (int): The maximum number of hierarchy levels
added based on the original resources. If None is passed,
transitive hierarchies are created, this may lead to a timeout.
uri_data_model (bool): If false formulates query for endpoints.
Returns:
str: The SPARQL Query for hierarchy retrieval.
"""
# Create Sparql "list" of the resources
values = "(<"+col.str.cat(sep=">) (<")+">) "
# create the hierarchy variables and add them to SELECT
if max_hierarchy_depth and not uri_data_model:
# create as many variables as needed for the specified depth of the query
hierarchy_selectors = ["?hierarchy_selector" + str(i+1)
for i in range(max_hierarchy_depth)]
variables = ["?value"] + hierarchy_selectors
query = "SELECT "+ " ".join(variables)
if uri_data_model:
query += " WHERE {VALUES (?value) { (<**URI**>)} "
else:
query += " WHERE {VALUES (?value) {" + values + "} "
# search for an optional superclass for each depth step
for i in range(max_hierarchy_depth):
query += "OPTIONAL { "+ variables[i] + " <"
query += hierarchy_relation + "> " + variables[i+1]+ " . } "
query += "}"
# else if the max_depth isnt specified search transitively.
else:
if uri_data_model:
query = "SELECT ?value ?hierarchy_selector"
query += " WHERE {VALUES (?value) { (<**URI**>)} "
else:
query = "SELECT ?value ?hierarchy_selector"
query += " WHERE {VALUES (?value) {" + values + "} "
query += "OPTIONAL { ?value <" +hierarchy_relation
query += "> ?hierarchy_selector . } }"
return query
def create_graph_from_raw(
DG, results, max_hierarchy_depth, current_level, uri_data_model):
"""Converts the XML obtained by the endpoint wrapper into a hierarchical
directed graph.
Args:
DG (Directed Graph): The empty or preprocessed graph to be appended.
results (DOM/pd.DataFrame): The raw results of the SPARQL query
max_hierarchy_depth (int): The maximum number of hierarchy levels when
the direct search is used.
current_level (pd.Series): In case of iterative hierarchy generation
the values of the current hierarchy level.
uri_data_model (bool): If enabled, the URI is directly queried
instead of a SPARQL endpoint.
Returns:
nx.DirectedGraph: Graph where edges point to direct superclasses of
nodes.
current_level: In case of iterative hierarchy generation the updated
hierarchy level.
"""
# the uri_querier returns a dataframe with two columns, the row-wise pairs
# are edges to be inserted into the graph
if uri_data_model:
# if there are no results, make sure that next level is empty and not
# full of NA values instead
if results.empty:
current_level = pd.Series()
# if no parents are found in the first iterations simply add the child
# nodes to the graph
elif results['hierarchy_selector'].isna().all():
if nx.is_empty(DG):
DG.add_nodes_from(results['value'])
current_level = pd.Series()
# each row-wise value-hierarchy-selector pair creates a directed edge
# in the graph. The current level is set to the currently highest
# parent layer.
else:
to_append = nx.convert_matrix.from_pandas_edgelist(
results, 'value', 'hierarchy_selector', create_using=nx.DiGraph())
DG = nx.compose(DG, to_append)
current_level = results['hierarchy_selector']
# if endpoint_wrapper is used, the graph is generated from the XML data
else:
for result_node in results.getElementsByTagName("result"):
for binding in result_node.getElementsByTagName("binding"):
attr_name = binding.getAttribute("name")
for childnode in binding.childNodes:
if childnode.firstChild is not None:
# get the attribute name and add it as node to the graph
value = childnode.firstChild.nodeValue
DG.add_node(value)
if max_hierarchy_depth:
# as long as the attribute is not the base
# class/category, add an edge from the predecessing
# attribute
if not attr_name == "value":
DG.add_edge(predecessing_value, value)
predecessing_value = value
else:
if isinstance(current_level, pd.Series):
current_level = list(current_level)
if attr_name == "value":
current_value = value
elif attr_name == "hierarchy_selector":
# add an edge from the lower hierarchy value to the
# upper hierarchy value
if not DG.has_edge(current_value, value):
DG.add_edge(current_value, value)
current_level += [value]
# in case of the iterative search, update the values to the current
# hierarchy level
if not max_hierarchy_depth:
current_level = pd.Series(list(dict.fromkeys(current_level)))
return DG, current_level
def hierarchy_graph_generator(
col,
hierarchy_relation = "http://www.w3.org/2000/01/rdf-schema#subClassOf",
max_hierarchy_depth = None, endpoint = DBpedia, uri_data_model = False, progress=False, caching=True):
"""Computes a hierarchy graph from an original set of features, where
directed edges symbolise a hierarchy relation from subclass to superclass.
Args:
col (pd.Series): The classes/categories for which the hierarchy graph
is generated.
hierarchy_relation (str, optional): The hierarchy relation to be used.
Defaults to "http://www.w3.org/2000/01/rdf-schema#subClassOf".
max_hierarchy_depth (int, optional): Number of jumps in hierarchy. If
None, transitive jumps are used. Defaults to None.
endpoint (Endpoint, optional): Link to the SPARQL endpoint that should
be queried. Defaults to DBpedia.
uri_data_model (bool, optional): whether to use sparql querier or the
uri data model. Defaults to False.
progress (bool, optional): If True, progress bars will be shown to
inform the user about the progress made by the process (if
"uri_data_model" = True). Defaults to False.
caching (bool, optional): Turn result-caching for queries issued during
the execution on or off.
Returns:
nx.DirectedGraph: Graph where edges point to direct superclasses of
nodes.
"""
# warn if wrong configurations are used and correct them
cond_subclass = hierarchy_relation ==\
"http://www.w3.org/2000/01/rdf-schema#subClassOf"
if cond_subclass and max_hierarchy_depth:
warnings.warn("""If you use subClass with a maximum hierarchy depth,
meaningless superclasses are generated.
Max_hierarchy_depth is set to None instead""")
max_hierarchy_depth = None
cond_broader= hierarchy_relation ==\
"http://www.w3.org/2004/02/skos/core#broader"
if cond_broader and max_hierarchy_depth is None:
warnings.warn("""Transitive superclass generation does not work for
categories. Max_hierarchy_depth is set to 1. For higher depths, set
max_hierarchy_depth to a higher integer""")
max_hierarchy_depth = 1
# Initialise the graph
DG = nx.DiGraph()
# if column contains only missings return empty graph
if col.isna().all():
return DG
current_level = col.copy()
# in this case the query contains all future hierarchy levels and queries
# them directly
if max_hierarchy_depth and not uri_data_model:
query = hierarchy_query_creator(
col, hierarchy_relation, max_hierarchy_depth, uri_data_model)
results = endpoint_wrapper(query, endpoint, return_XML=True, caching=caching)
DG, _ = create_graph_from_raw(
DG, results, max_hierarchy_depth, None, uri_data_model)
# here the "broader" steps have to be added sequentially from level to
# level until the max_hierarchy_depth is reached
elif max_hierarchy_depth and uri_data_model:
hierarchy_level = 0
while not current_level.empty and hierarchy_level<max_hierarchy_depth:
query = hierarchy_query_creator(
current_level, hierarchy_relation, max_hierarchy_depth,
uri_data_model)
temp_frame = pd.DataFrame(current_level)
results = uri_querier(
temp_frame, current_level.name, query, progress=progress, caching=caching)
current_level=list()
DG, current_level = create_graph_from_raw(
DG, results, max_hierarchy_depth, current_level,
uri_data_model)
hierarchy_level += 1
# iteratively loop from hierarchy level to hierarchy level until no
# more superclasses are found --> transitive without maximum
else:
while not current_level.empty:
query = hierarchy_query_creator(
current_level, hierarchy_relation, max_hierarchy_depth,
uri_data_model)
if uri_data_model:
temp_frame =
|
pd.DataFrame(current_level)
|
pandas.DataFrame
|
"""
@brief test log(time=6s)
"""
import sys
import unittest
from logging import getLogger
import numpy
import pandas
from pyquickhelper.pycode import ExtTestCase, skipif_circleci, ignore_warnings
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from skl2onnx.common.data_types import (
StringTensorType, FloatTensorType, Int64TensorType,
BooleanTensorType)
from mlprodict.onnxrt import OnnxInference
from mlprodict.onnx_conv import register_converters, to_onnx
from mlprodict.tools.asv_options_helper import get_ir_version_from_onnx
class TestOnnxrtRuntimeLightGbm(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
register_converters()
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
def test_missing(self):
from mlprodict.onnx_conv.parsers.parse_lightgbm import WrappedLightGbmBooster
r = WrappedLightGbmBooster._generate_classes( # pylint: disable=W0212
dict(num_class=1))
self.assertEqual(r.tolist(), [0, 1])
r = WrappedLightGbmBooster._generate_classes( # pylint: disable=W0212
dict(num_class=3))
self.assertEqual(r.tolist(), [0, 1, 2])
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical(self):
from lightgbm import LGBMClassifier
X = pandas.DataFrame({"A": numpy.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
# int
"B": numpy.random.permutation([1, 2, 3] * 100),
# float
"C": numpy.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),
# bool
"D": numpy.random.permutation([True, False] * 150),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = numpy.random.permutation([0, 1] * 150)
X_test = pandas.DataFrame({"A": numpy.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": numpy.random.permutation([1, 3] * 30),
"C": numpy.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": numpy.random.permutation([True, False] * 30),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y'] * 30),
ordered=True)})
cat_cols_actual = ["A", "B", "C", "D"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
gbm0 = LGBMClassifier().fit(X, y)
exp = gbm0.predict(X_test, raw_scores=False)
self.assertNotEmpty(exp)
init_types = [('A', StringTensorType()),
('B', Int64TensorType()),
('C', FloatTensorType()),
('D', BooleanTensorType()),
('E', StringTensorType())]
self.assertRaise(lambda: to_onnx(gbm0, initial_types=init_types), RuntimeError,
"at most 1 input(s) is(are) supported")
X = X[['C']].values.astype(numpy.float32)
X_test = X_test[['C']].values.astype(numpy.float32)
gbm0 = LGBMClassifier().fit(X, y, categorical_feature=[0])
exp = gbm0.predict_proba(X_test, raw_scores=False)
model_def = to_onnx(gbm0, X)
self.assertIn('ZipMap', str(model_def))
oinf = OnnxInference(model_def)
y = oinf.run({'X': X_test})
self.assertEqual(list(sorted(y)),
['output_label', 'output_probability'])
df = pandas.DataFrame(y['output_probability'])
self.assertEqual(df.shape, (X_test.shape[0], 2))
self.assertEqual(exp.shape, (X_test.shape[0], 2))
# self.assertEqualArray(exp, df.values, decimal=6)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical3(self):
from lightgbm import LGBMClassifier
X = pandas.DataFrame({"A": numpy.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
# int
"B": numpy.random.permutation([1, 2, 3] * 100),
# float
"C": numpy.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),
# bool
"D": numpy.random.permutation([True, False] * 150),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = numpy.random.permutation([0, 1, 2] * 100)
X_test = pandas.DataFrame({"A": numpy.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": numpy.random.permutation([1, 3] * 30),
"C": numpy.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": numpy.random.permutation([True, False] * 30),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y'] * 30),
ordered=True)})
cat_cols_actual = ["A", "B", "C", "D"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
gbm0 = LGBMClassifier().fit(X, y)
exp = gbm0.predict(X_test, raw_scores=False)
self.assertNotEmpty(exp)
init_types = [('A', StringTensorType()),
('B', Int64TensorType()),
('C', FloatTensorType()),
('D', BooleanTensorType()),
('E', StringTensorType())]
self.assertRaise(lambda: to_onnx(gbm0, initial_types=init_types), RuntimeError,
"at most 1 input(s) is(are) supported")
X = X[['C']].values.astype(numpy.float32)
X_test = X_test[['C']].values.astype(numpy.float32)
gbm0 = LGBMClassifier().fit(X, y, categorical_feature=[0])
exp = gbm0.predict_proba(X_test, raw_scores=False)
model_def = to_onnx(gbm0, X)
self.assertIn('ZipMap', str(model_def))
oinf = OnnxInference(model_def)
y = oinf.run({'X': X_test})
self.assertEqual(list(sorted(y)),
['output_label', 'output_probability'])
df = pandas.DataFrame(y['output_probability'])
self.assertEqual(df.shape, (X_test.shape[0], 3))
self.assertEqual(exp.shape, (X_test.shape[0], 3))
# self.assertEqualArray(exp, df.values, decimal=6)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris(self):
from lightgbm import LGBMClassifier, Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.int32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
other_x = numpy.random.randint(
0, high=10, size=(1500, X_train.shape[1]))
X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
y_train = numpy.hstack(
[y_train, numpy.zeros(500) + 3, numpy.zeros(500) + 4,
numpy.zeros(500) + 5]).astype(dtype=numpy.int32)
self.assertEqual(y_train.shape, (X_train.shape[0], ))
y_train = y_train % 2
# Classic
gbm = LGBMClassifier()
gbm.fit(X_train, y_train)
exp = gbm.predict_proba(X_test)
onx = to_onnx(gbm, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
# categorical_feature=[0, 1]
train_data = Dataset(
X_train, label=y_train,
feature_name=['c1', 'c2', 'c3', 'c4'],
categorical_feature=['c1', 'c2'])
params = {
"boosting_type": "gbdt",
"learning_rate": 0.05,
"n_estimators": 2,
"objective": "binary",
"max_bin": 5,
"min_child_samples": 100,
'verbose': -1,
}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris_booster3(self):
from lightgbm import LGBMClassifier, Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.int32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
other_x = numpy.random.randint(
0, high=10, size=(1500, X_train.shape[1]))
X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
y_train = numpy.hstack(
[y_train, numpy.zeros(500) + 3, numpy.zeros(500) + 4,
numpy.zeros(500) + 5]).astype(dtype=numpy.int32)
self.assertEqual(y_train.shape, (X_train.shape[0], ))
# Classic
gbm = LGBMClassifier()
gbm.fit(X_train, y_train)
exp = gbm.predict_proba(X_test)
onx = to_onnx(gbm, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
# categorical_feature=[0, 1]
train_data = Dataset(
X_train, label=y_train,
feature_name=['c1', 'c2', 'c3', 'c4'],
categorical_feature=['c1', 'c2'])
params = {
"boosting_type": "gbdt",
"learning_rate": 0.05,
"n_estimators": 2,
"objective": "binary",
"max_bin": 5,
"min_child_samples": 100,
'verbose': -1,
}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris_booster3_real(self):
from lightgbm import LGBMClassifier, Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.float32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
# Classic
gbm = LGBMClassifier()
gbm.fit(X_train, y_train)
exp = gbm.predict_proba(X_test)
onx = to_onnx(gbm.booster_, initial_types=[
('X', FloatTensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
# categorical_feature=[0, 1]
train_data = Dataset(
X_train, label=y_train,
feature_name=['c1', 'c2', 'c3', 'c4'],
categorical_feature=['c1', 'c2'])
params = {
"boosting_type": "gbdt",
"learning_rate": 0.05,
"n_estimators": 2,
"objective": "multiclass",
"max_bin": 5,
"min_child_samples": 100,
'verbose': -1,
'num_class': 3,
}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, initial_types=[
('X', FloatTensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris_dataframe(self):
from lightgbm import Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.int32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
other_x = numpy.random.randint(
0, high=10, size=(1500, X_train.shape[1]))
X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
y_train = numpy.hstack(
[y_train, numpy.zeros(500) + 3, numpy.zeros(500) + 4,
numpy.zeros(500) + 5]).astype(dtype=numpy.int32)
self.assertEqual(y_train.shape, (X_train.shape[0], ))
y_train = y_train % 2
df_train = pandas.DataFrame(X_train)
df_train.columns = ['c1', 'c2', 'c3', 'c4']
df_train['c1'] = df_train['c1'].astype('category')
df_train['c2'] = df_train['c2'].astype('category')
df_train['c3'] = df_train['c3'].astype('category')
df_train['c4'] = df_train['c4'].astype('category')
df_test = pandas.DataFrame(X_test)
df_test.columns = ['c1', 'c2', 'c3', 'c4']
df_test['c1'] = df_test['c1'].astype('category')
df_test['c2'] = df_test['c2'].astype('category')
df_test['c3'] = df_test['c3'].astype('category')
df_test['c4'] = df_test['c4'].astype('category')
# categorical_feature=[0, 1]
train_data = Dataset(
df_train, label=y_train)
params = {
"boosting_type": "gbdt",
"learning_rate": 0.05,
"n_estimators": 2,
"objective": "binary",
"max_bin": 5,
"min_child_samples": 100,
'verbose': -1,
}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, df_train)
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run(df_test)
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
onx.ir_version = get_ir_version_from_onnx()
oif = OnnxInference(onx, runtime='onnxruntime1')
got = oif.run(df_test)
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
onx = to_onnx(booster, df_train,
options={booster.__class__: {'cast': True}})
self.assertIn('op_type: "Cast"', str(onx))
oif = OnnxInference(onx)
got = oif.run(df_test)
values =
|
pandas.DataFrame(got['output_probability'])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import os, sys
from os import listdir
import json
from scipy.signal import find_peaks, savgol_filter, hilbert, wiener
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
from .plotter import Plotter
class Clustering:
def encode(self, data):
data = data.drop(['type', 'hour', 'minute', 'dayofweek'], axis=1)
return data
def clusterize(self, data, ep=0.01):
data_formated = self.encode(data.copy())
clustering = DBSCAN(eps=ep, min_samples=3).fit_predict(data_formated)
data['cluster'] = clustering
return data.sort_values('cluster')
class ContextMapping:
crimes_chicago = ['ASSAULT', 'BATTERY', 'BURGLARY', 'CRIMINAL DAMAGE',
'DECEPTIVE PRACTICE', 'MOTOR VEHICLE THEFT', 'ROBBERY',
'THEFT']
crimes_austin = ['ASSAULT', 'AUTO', 'BURGLARY', 'CRIMINAL', 'FAMILY',
'POSS', 'THEFT']
def __init__(self):
self.MONTHS = {
1 : 'January',
2 : 'February',
3 : 'March',
4 : 'April',
5 : 'May',
6 : 'June',
7 : 'July',
8 : 'August',
9 : 'September',
10 : 'October',
11 : 'November',
12 : 'December',
}
self.plotter = Plotter()
def remove_invalid_coord(self, df):
return df.query('lat != 0 & lon != 0')
def read_data(self, folder, file):
data_file = open(folder + file, 'r')
context_list = []
for line in data_file:
line = line.strip().split('\t')
item = {}
item['datetime'] = pd.to_datetime(str(line[0]))
item['hour'] = pd.to_datetime(str(line[0])).hour
item['minute'] = pd.to_datetime(str(line[0])).minute
item['lat'] = float(line[1])
item['lon'] = float(line[2])
item['type'] = line[3].strip().split()[0]
context_list.append(item)
data_file.close()
df =
|
pd.DataFrame(context_list)
|
pandas.DataFrame
|
# Core imports
import os
import time
from datetime import datetime
import random
# Third party imports
import geopandas as gpd
import pandas as pd
import yaml
#import gptables as gpt
# Module imports
import geospatial_mods as gs
import data_ingest as di
import data_transform as dt
import ftp_get_files_logic as fpts
import data_output as do
start_time = time.time()
# get current working directory
CWD = os.getcwd()
# TODO: find out best practice on CWD
# Load config
with open(os.path.join(CWD, "config.yaml")) as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
module = os.path.basename(__file__)
print(f"Config loaded in {module}")
# Retrieve Missing Data Files via FTP
#fpts.execute_file_grab(CWD)
# Constants
DEFAULT_CRS = config["DEFAULT_CRS"]
DATA_DIR = config["DATA_DIR"]
EXT_ORDER = config['EXT_ORDER']
# Years
# Getting the year for population data
pop_year = str(config["calculation_year"])
# Getting the year for centroid data
centroid_year = str(config["centroid_year"])
# Get the pandas dataframe for the stops data
stops_df = di.get_stops_file(url=config["NAPTAN_API"],
dir=os.path.join(os.getcwd(),
"data",
"stops"))
# filter out on inactive stops
filtered_stops = dt.filter_stops(stops_df=stops_df)
# coverts from pandas df to geo df
stops_geo_df = (di.geo_df_from_pd_df(pd_df=filtered_stops,
geom_x='Easting',
geom_y='Northing',
crs=DEFAULT_CRS))
# adds in high/low capacity column
stops_geo_df=dt.add_stop_capacity_type(stops_df=stops_geo_df)
# define la col which is LADXXNM where XX is last 2 digits of year e.g 21 from 2021
lad_col = f'LAD{pop_year[-2:]}NM'
# getting path for .shp file for LA's
uk_la_path = di.get_shp_file_name(dir=os.path.join(os.getcwd(),
"data",
"LA_shp",
pop_year))
# getting the coordinates for all LA's
uk_la_file = di.geo_df_from_geospatialfile(path_to_file=uk_la_path)
# Get list of all pop_estimate files for target year
pop_files = os.listdir(os.path.join(os.getcwd(),
"data/population_estimates",
pop_year
)
)
# Get the population data for the whole nation for the specified year
whole_nation_pop_df = di.get_whole_nation_pop_df(pop_files, pop_year)
# Get population weighted centroids into a dataframe
uk_pop_wtd_centr_df = (di.geo_df_from_geospatialfile
(os.path.join
(DATA_DIR,
'pop_weighted_centroids',
centroid_year)))
# Get output area boundaries
# OA_df = pd.read_csv(config["OA_boundaries_csv"])
# Links were changed at the source site which made the script fail.
# Manually downloading the csv for now
OA_boundaries_df = pd.read_csv(
os.path.join("data",
"Output_Areas__December_2011__Boundaries_EW_BGC.csv"))
# Merge with uk population df
uk_pop_wtd_centr_df = uk_pop_wtd_centr_df.merge(
OA_boundaries_df, on="OA11CD", how='left')
# Clean after merge
uk_pop_wtd_centr_df.drop('OBJECTID_y', axis=1, inplace=True)
uk_pop_wtd_centr_df.rename({'OBJECTID_x': 'OBJECTID'}, inplace=True)
# Getting the urban-rural classification by OA for England and Wales
Urb_Rur_ZIP_LINK = config["Urb_Rur_ZIP_LINK"]
URB_RUR_TYPES = config["URB_RUR_TYPES"]
# Make a df of the urban-rural classification
urb_rur_df = (di.any_to_pd("RUC11_OA11_EW",
Urb_Rur_ZIP_LINK,
['csv'],
URB_RUR_TYPES))
# These are the codes (RUC11CD) mapping to rural and urban descriptions (RUC11)
# I could make this more succinct, but leaving here
# for clarity and maintainability
urban_dictionary = {'A1': 'Urban major conurbation',
'C1': 'Urban city and town',
'B1': 'Urban minor conurbation',
'C2': 'Urban city and town in a sparse setting'}
# mapping to a simple urban or rural classification
urb_rur_df["urb_rur_class"] = (urb_rur_df.RUC11CD.map
(lambda x: "urban"
if x in urban_dictionary.keys()
else "rural"))
# filter the df. We only want OA11CD and an urban/rurual classification
urb_rur_df = urb_rur_df[['OA11CD', 'urb_rur_class']]
# joining urban rural classification onto the pop df
uk_pop_wtd_centr_df = (uk_pop_wtd_centr_df.merge
(urb_rur_df,
on="OA11CD",
how='left'))
# Joining the population dataframe to the centroids dataframe,
whole_nation_pop_df = whole_nation_pop_df.join(
other=uk_pop_wtd_centr_df.set_index('OA11CD'), on='OA11CD', how='left')
# Map OA codes to Local Authority Names
oa_la_lookup_path = di.get_oa_la_file_name(os.path.join(os.getcwd(),
"data/oa_la_mapping",
pop_year))
LA_df = pd.read_csv(oa_la_lookup_path, usecols=["OA11CD", lad_col])
whole_nation_pop_df = pd.merge(
whole_nation_pop_df, LA_df, how="left", on="OA11CD")
# Unique list of LA's to iterate through
list_local_auth = uk_la_file[lad_col].unique()
# selecting random LA for dev purposes
# eventually will iterate through all LA's
random_la=random.choice(list_local_auth)
list_local_auth=[random_la]
# define output dicts to capture dfs
total_df_dict = {}
sex_df_dict = {}
urb_rur_df_dict={}
disab_df_dict = {}
age_df_dict = {}
for local_auth in list_local_auth:
print(f"Processing: {local_auth}")
# Get a polygon of la based on the Location Code
la_poly = (gs.get_polygons_of_loccode(
geo_df=uk_la_file,
dissolveby=lad_col,
search=local_auth))
# Creating a Geo Dataframe of only stops in la
la_stops_geo_df = (gs.find_points_in_poly
(geo_df=stops_geo_df,
polygon_obj=la_poly))
# Make LA LSOA just containing local auth
uk_la_file = uk_la_file[[lad_col, 'geometry']]
# merge the two dataframes limiting to just the la
la_pop_df = whole_nation_pop_df.merge(uk_la_file,
how='right',
left_on=lad_col,
right_on=lad_col,
suffixes=('_pop', '_LA'))
# subset by the local authority name needed
la_pop_df = la_pop_df.loc[la_pop_df[lad_col] == local_auth]
# rename the "All Ages" column to pop_count as it's the population count
la_pop_df.rename(columns={"All Ages": "pop_count"}, inplace=True)
# Get a list of ages from config
age_lst = config['age_lst']
# Get a datframe limited to the data ages columns only
age_df = dt.slice_age_df(la_pop_df, age_lst)
# Create a list of tuples of the start and finish indexes for the age bins
age_bins = dt.get_col_bins(age_lst)
# get the ages in the age_df binned, and drop the original columns
age_df = dt.bin_pop_ages(age_df, age_bins, age_lst)
# Ridding the la_pop df of the same cols
la_pop_df.drop(age_lst, axis=1, inplace=True)
# merging summed+grouped ages back in
la_pop_df =
|
pd.merge(la_pop_df, age_df, left_index=True, right_index=True)
|
pandas.merge
|
import os
import logging
import warnings
import pathlib as pl
import scrapbook as sb
import papermill as pm
import numpy as np
import pandas as pd
from scipy.stats import norm
from scipy.integrate import trapz
from scipy.interpolate import interp1d
from IPython.core.display import display
import matplotlib as mpl
import plotly.graph_objs as go
from plotly.offline import iplot
from matplotlib import pyplot as plt
logging.basicConfig(level=logging.ERROR)
plib = "pathlib.Path"
# --------------------------------------------------------------------------#
"""Functions called by PM_Sampler_Ops.ipynb, SSP_to_Mean_Curve.ipynb,
Stratified_Sampler.ipynb, and Make_Production_Run_List.ipynb. These
notebooks calculate the mean flow frequency curve, specify a range of
annual exceedance probabilities with corresponding weights, and assign
those weights to discharge events.
"""
# --------------------------------------------------------------------------#
def make_directories(dir_lst: list, verbose: bool = True) -> None:
"""Check if each directory within the passed list exists and create any
missing directories.
"""
for directory in dir_lst:
if not os.path.isdir(directory):
os.makedirs(directory)
if verbose:
print('{0} - created\n'.format(str(directory)))
else:
if verbose:
print('{0} - already exists\n'.format(str(directory)))
return None
def list_ssp_files(path: plib, max_cl: float = 0.999,
verbose: bool = True) -> list:
"""Identify all .rpt files whose upper confidence limit is greater than
0.5 and less than or equal to the maximum defined upper confidence
limit.
"""
ssp_results = []
for file in pl.Path(path).glob('**/*.rpt'):
filename = file.name
assert '_' in filename, 'Filename does not include an "_" separating' \
' the gage ID from the confidence limit, ' \
'should be "GageID_UpperConfidenceLimit"'
split_stem = file.stem.split('_')
assert len(split_stem) == 2, 'Filename contains more than two ' \
'elements, should be ' \
'"GageID_UpperConfidenceLimit"'
cl = float(split_stem[1])
if cl > 100.0:
cl = cl/10.0
if 50.0 < cl <= (max_cl*100):
ssp_results.append(file)
if verbose:
print('{0} added to list'.format(filename))
elif cl > (max_cl*100):
if verbose:
print('{0} not added to list, above maximum confidence'
' limit'.format(filename))
elif cl <= 50:
if verbose:
print('{0} not added to list, below minimum confidence '
'limit'.format(filename))
else:
if verbose:
print('{0} not added to list, check naming '
'convention'.format(filename))
assert len(ssp_results) > 0, 'No .rpt files identified in {0}'.format(path)
if verbose:
print('')
return ssp_results
def make_ssp_table(ssp_results: list, version: str = '2_2') -> pd.DataFrame:
"""Create a table summarizing the SSP results where the index is the
annual exceedance probability, each column is a confidence limit,
and each cell is the corresponding discharge.
"""
df = pd.DataFrame()
assert_len = 'The .rpt files do not have the same number of annual ' \
'exceedance probabilities'
for i, file in enumerate(ssp_results):
if i == 0:
df = GetFreqCurves(file, version=version)
else:
tmp = GetFreqCurves(file, version=version)
cols = list(set(tmp.columns) - {'0.5', 'AEP'})
tmp = tmp[cols].copy()
assert df.shape[0] == tmp.shape[0], assert_len
df = df.merge(tmp, left_index=True, right_index=True)
df = df.set_index('AEP')
df = df.reindex(sorted(df.columns), axis=1)
cl_totals = [np.sum(df.iloc[:, i]) for i in range(df.shape[1])]
assert np.all(np.diff(cl_totals) > 0), 'Q not increasing with CL as ' \
'expected, check data'
col_lst = [float(col) for col in df.columns]
assert len(col_lst) == len(set(col_lst)), 'Duplicate columns'
return df
def GetFreqCurves(f: plib, version: str = '2_2') -> pd.DataFrame:
"""Read the passed .rpt file and extract the annual exceedance
probability, median flow frequency curve, and the upper and lower
confidence limits. Note that the user must specify the HEC-SSP version
used to create the .rpt files.
"""
assert version in ['2_1', '2_2'], 'GetFreqCurve can only read .rpt files' \
' from versions 2.1 and 2.2 of HEC-SSP'
if version == '2_1':
line_breaks = [(1, 13), (14, 26), (27, 40), (41, 53), (54, 65)]
else:
line_breaks = [(1, 15), (16, 29), (34, 42), (46, 60), (61, 74)]
read_results = False
aep = []
with open(f) as file:
lines = file.readlines()
for i, line in enumerate(lines):
if 'Upper Confidence Level:' in line:
high = float(line.split(':')[1].replace('\n', ''))
if 'Lower Confidence Level:' in line:
low = float(line.split(':')[1].replace('\n', ''))
if 'Frequency:' in line:
aep.append(float(line.split(':')[1].replace('\n', '')) / 100.0)
if 'Final Results' in line:
read_results = True
elif '<< Frequency Curve >>' in line and read_results:
skiprows = i + 7
assert (float(high) + float(low)) == 1.0, 'In {0} the upper and lower ' \
'confidence limit values do not' \
' add to 1.0, check the user ' \
'defined confidence limits in ' \
'HEC-SSP'.format(f)
cols = ['0.5', 'Variance', 'AEP', str(low), str(high)]
df = pd.read_fwf(f, skiprows=skiprows, colspecs=line_breaks, names=cols)
df = df[['0.5', str(low), str(high)]][0:len(aep)].copy()
for col in df.columns:
df[col] = df[col].apply(lambda x: float(x.replace(',', '')))
df['AEP'] = aep
return df
def monotonic_test(df: pd.DataFrame, adj_amount: float = 1.0, verbose: bool = True) -> pd.DataFrame:
"""Test that the discharge increases with decreasing annual exceedance
probability and adjust the discharge for the smallest annual exceedance
probabilities if not.
"""
no_adjust = True
for col in df.columns:
if np.diff(df[col]).max() >= 0:
no_adjust = False
maximum = df[col].max()
idx = df[col].idxmax()
diff = round(maximum - df.iloc[0][col], 1)
adj_df_idx = df.loc[:idx][col].index
num = len(adj_df_idx)
val = np.arange(maximum, maximum + num * adj_amount, adj_amount)
for i, v in enumerate(adj_df_idx):
df.loc[v][col] = val[num - 1 - i]
cl = float(col) * 100.0
aep = df.iloc[0].name
warnings.warn('Values not increasing with decreasing AEP for the {0}% '
'CL: difference of {1} between {2} and {3}. '
'Adjusting values\n'.format(cl, diff, aep, idx))
if no_adjust and verbose:
print('Values increase with decreasing annual exceedance '
'probability for all confidence limits as expected\n')
return df
def zvar(cl: list) -> np.ndarray:
"""Used to calculate the standard normal z variate of the passed
confidence limits or annual exceedance probabilities.
"""
clz = np.array([norm.ppf((1 - clim)) for clim in cl])
return clz
def binq(df: pd.DataFrame) -> np.ndarray:
"""Determines the minimum and maximum discharge value for the passed
dataframe and constructs an array of equally spaced discharge between
these two values.
"""
qmin = df.min().min()
qmax = df.max().max()
q = np.linspace(qmin, qmax, num=len(df))
return q
def interp_aep(df: pd.DataFrame, q: np.ndarray, clz: np.ndarray,
aepz: np.ndarray, extrapolate: bool = True) -> pd.DataFrame:
"""Apply linear interpolation/extrapolation to calculate AEP(z) for
each CL(z) and binned flow.
"""
df1 = df.copy()
df1['Q'] = q
df1.set_index('Q', inplace=True)
df1.columns = clz
if not extrapolate:
for cl in np.arange(len(clz)):
q_min = df.iloc[:, cl].min()
q_max = df.iloc[:, cl].max()
f = interp1d(df.iloc[:, cl], aepz)
for i in np.arange(len(q)):
q_val = q[i]
if q_val < q_min:
df1.iloc[i, cl] = aepz[-1]
if (q_val >= q_min) & (q_val <= q_max):
df1.iloc[i, cl] = f(q_val)
if q_val > q_max:
df1.iloc[i, cl] = aepz[0]
if extrapolate:
for cl in np.arange(len(clz)):
f = interp1d(df.iloc[:, cl], aepz, fill_value='extrapolate')
for i in np.arange(len(q)):
q_val = q[i]
df1.iloc[i, cl] = f(q_val)
return df1
def zvar_inv(df: pd.DataFrame, cl: list) -> pd.DataFrame:
"""Calculate the the inverse of the standard normal Z variate for each
annual exceedance probability and confidence limit.
"""
df.columns = cl
for clim in cl:
df[clim] = 1 - norm.cdf(df[clim])
return df
def mean_aep(df: pd.DataFrame, exclude_tails: bool = True) -> list:
"""Calculate the mean (expected) value of the annual exceedance
probability for each flow; the mean is equal to the area under the
CDF, which is calculated using the trapezoidal rule. If exclude_tails
is True than the integration only includes the area between the lower
and upper confidence limits, else the integration includes the entire
distribution, i.e. from 0.0 to the lower confidence limit and from the
upper confidence limit to 1.0.
"""
aepm_lst = []
cl_arr = df.columns.values
for logq in df.index:
aep_arr = df.loc[logq].values
aepm = trapz(aep_arr, x=cl_arr)
if exclude_tails:
scale_val = cl_arr[-1] - cl_arr[0]
aepm = aepm/scale_val
else:
leftside = aep_arr[0] * cl_arr[0]
rightside = aep_arr[-1] * cl_arr[0]
aepm = aepm + leftside + rightside
aepm_lst.append(aepm)
return aepm_lst
def ffc_summary(standard_aep: list, standard_aepz: np.ndarray,
add_ri: bool = False, verbose: bool = False) -> pd.DataFrame:
"""Initialize a summary table to store the mean and median flow frequency
curves.
"""
df = pd.DataFrame(data={'AEPz': standard_aepz}, index=standard_aep)
df.index.name = 'AEP'
if add_ri:
df['RI'] = 1.0/df.index
if verbose:
display(df.head(2))
return df
def interp_q(aepz: np.ndarray, q: np.ndarray):
"""Create a function for calculating the log flow given the normal z
variate of the annual exceedance probability.
"""
f = interp1d(aepz, q, fill_value='extrapolate')
return f
def format_mean_curve(dic: dict, verbose: bool = True) -> pd.DataFrame:
"""Convert the mean flow frequency curve dictionary into a pandas
dataframe.
"""
df = pd.DataFrame.from_dict(dic)
df.reset_index(inplace=True)
df = df.rename(columns={'index': 'AEP'})
for col in df.columns:
df[col] =
|
pd.to_numeric(df[col], errors='coerce')
|
pandas.to_numeric
|
import os
import sys
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, compat
from pandas.util import testing as tm
class TestToCSV:
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 =
|
DataFrame([None, 1])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import datetime
from data.dataloader.base import BaseLoader
class JHULoader(BaseLoader):
"""Dataloader that outputs time series case data for US states, counties, and US from
the JHU github repo 'https://www.github.com/CSSEGISandData/COVID-19/'
Allows the user to do fitting on US states, US counties, and all countries
Args:
BaseLoader (abstract class): Abstract Data Loader Class
"""
def __init__(self):
super().__init__()
def _modify_dataframe(self, df, column_name='RecoveredCases', province_info_column_idx=4):
"""Helper function for modifying the dataframes such that each row is a
snapshot of a country on a particular day
Args:
df (pd.DataFrame): dataframe to be modified
column_name (str, optional): Modification to be done for which column. Defaults to 'RecoveredCases'.
province_info_column_idx (int, optional): What is the column index of the
province info column. Defaults to 4.
Returns:
pd.DataFrame: Modified dataframe
"""
cases_matrix = df.to_numpy()[:, province_info_column_idx:]
cases_array = cases_matrix.reshape(-1, 1)
province_info_matrix = df.to_numpy()[:, :province_info_column_idx]
province_info_array = np.repeat(province_info_matrix, cases_matrix.shape[1], axis=0)
province_info_columns = df.columns[:province_info_column_idx]
date = pd.to_datetime(df.columns[province_info_column_idx:])
date_array = np.tile(date, cases_matrix.shape[0]).reshape(-1, 1)
data = np.concatenate((province_info_array, date_array, cases_array), axis=1)
df = pd.DataFrame(data=data, columns=province_info_columns.to_list() + ['Date', column_name])
df['Date'] = pd.to_datetime(df['Date'])
return df
def _load_from_daily_reports(self):
"""
This function parses the CSVs from JHU's daily_reports module
and converts them to pandas dataframes
This returns case counts for all US counties, states, and US as a whole
Important to note that this returns `deceased`, `total`, `active` and `recovered` numbers
Returns:
pd.DataFrame: dataframe of case counts
"""
starting_date = datetime.datetime.strptime('04-12-2020', "%m-%d-%Y")
main_url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/' + \
'master/csse_covid_19_data/csse_covid_19_daily_reports_us/{}.csv'
total_days = (datetime.datetime.today() - starting_date).days
df_master = pd.read_csv(main_url.format(starting_date.strftime("%m-%d-%Y")))
df_master['date'] = starting_date
for i in range(1, total_days+1):
curr_date = starting_date + datetime.timedelta(days=i)
try:
df = pd.read_csv(main_url.format(curr_date.strftime("%m-%d-%Y")))
df['date'] = curr_date
df_master =
|
pd.concat([df_master, df], ignore_index=True)
|
pandas.concat
|
import os
import numpy as np
import pandas as pd
import streamlit as st
import time
from datetime import datetime
from glob import glob
from omegaconf import OmegaConf
from pandas.api.types import is_numeric_dtype
from streamlit_autorefresh import st_autorefresh
from dataloader import read_csv, clear_data
from preprocessing.filter import apply_filter
from preprocessing.target import apply_target, target_encode_numeric, target_encode_category
from preprocessing import delete_nan, replace_nan, delete_outlier, encode_category
from model import split_data, get_best_model
from analysis import get_shap_value, get_importance, simulation_1d, simulation_2d
from graph.evaluation import plot_reg_evaluation, plot_confusion_matrix
from graph.importance import plot_importance
from graph.explanation import plot_shap, plot_simulation_1d, plot_simulation_2d
from graph.matplot import plot_simulation_1d as matplotlib_simulation_1d
from graph.matplot import plot_shap as matplotlib_shap
from helper import get_session_id, encode, convert_figs2zip
# Warning
import warnings
warnings.filterwarnings('ignore')
# # Korean
# import matplotlib
# from matplotlib import font_manager, rc
# font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
# rc('font', family=font_name)
# matplotlib.rcParams['axes.unicode_minus'] = False
# Create Session
if 'config' not in st.session_state:
st.session_state['config'] = OmegaConf.load('config.yaml')
if 'files' not in st.session_state:
st.session_state['files'] = np.sort(glob(
os.path.join(
st.session_state['config']['file']['root'],
'*.csv'
)
))
if 'train_file_path' not in st.session_state:
st.session_state['train_file_path'] = None
if 'filter' not in st.session_state:
st.session_state['filter'] = None
if 'encoder' not in st.session_state:
st.session_state['encoder'] = None
if 'target' not in st.session_state:
st.session_state['target'] = None
if 'feature_all' not in st.session_state:
st.session_state['feature_all'] = None
if 'feature_selected' not in st.session_state:
st.session_state['feature_selected'] = None
if 'data_quality' not in st.session_state:
st.session_state['data_quality'] = None
if 'mode' not in st.session_state:
st.session_state['mode'] = None
if 'model' not in st.session_state:
st.session_state['model'] = None
if 'state_0' not in st.session_state:
st.session_state['state_0'] = None
if '_df_0' not in st.session_state:
st.session_state['_df_0'] = None
if 'state_1' not in st.session_state:
st.session_state['state_1'] = None
if '_df_1' not in st.session_state:
st.session_state['_df_1'] = None
if 'state_2' not in st.session_state:
st.session_state['state_2'] = None
if '_df_2' not in st.session_state:
st.session_state['_df_2'] = None
if 'state_3' not in st.session_state:
st.session_state['state_3'] = None
if '_df_3' not in st.session_state:
st.session_state['_df_3'] = None
# Title
st.markdown('# XAI for tree models')
st.write(f'SESSION ID: {get_session_id()}')
# STEP 1.
st.markdown('### STEP 1. Data preparation')
# Start Time
start_time = time.time()
# State 0: _df_0
state_0 = {}
# Select Train
train_file_path = st.selectbox(
label = 'Train Data',
options = st.session_state['files'],
index = 0
)
state_0['train_file_path'] = train_file_path
# update _df_0
if (
state_0 != st.session_state['state_0']
):
df = read_csv(
path = state_0['train_file_path'],
max_len = st.session_state['config']['data']['max_len'],
add_random_noise = st.session_state['config']['data']['add_random_noise'],
random_state = st.session_state['config']['setup']['random_state'],
)
df = clear_data(df)
# Update session state
st.session_state['train_file_path'] = state_0['train_file_path']
st.session_state['_df_0'] = df
st.session_state['model'] = None
# Print Options
st.sidebar.write('Options')
# State 1: _df_1
state_1 = {}
# Get Filter Number
num_filter = st.sidebar.number_input(
label = 'Filter',
value = 0,
min_value = 0,
max_value = len(st.session_state['_df_0'].columns),
step=1
)
# Get Filter Value
filter = {}
if num_filter > 0:
for i in range(num_filter):
column = st.selectbox(
label = f'Filtered column #{i+1}',
options = [None]+list(st.session_state['_df_0'].columns),
)
if column is not None:
values = list(
np.sort(st.session_state['_df_0'][column].dropna().unique())
)
selected_values = st.multiselect(
label = f'Select values #{i+1}',
options = values,
default = values
)
filter[column] = selected_values
state_1['filter'] = filter
# Get Mode
mode = st.selectbox(
label = 'Type',
options = ['Regression', 'Binary Classification']
)
state_1['mode'] = mode
# Get Target
target = st.selectbox(
label = 'Target',
options = list(st.session_state['_df_0'].columns)
)
state_1['target'] = target
# Target Encoding
if mode == 'Binary Classification':
values = st.session_state['_df_0'][target].dropna()
if is_numeric_dtype(values):
column_c0, column_i0, column_c1, column_i1 = st.columns(4)
with column_c0:
l_q = st.number_input(
label = 'Label 0 Upper Limit (%)',
value = 20,
min_value = 0,
max_value = 100,
step = 1
)
state_1['l_q'] = l_q
with column_c1:
h_q = st.number_input(
label = 'Label 0 Lower Limit (%)',
value = 80,
min_value = 0,
max_value = 100,
step = 1
)
state_1['h_q'] = h_q
with column_i0:
st.metric(
label = 'Label 0 Maximum',
value = f"{np.percentile(values, q=l_q):.4f}"
)
with column_i1:
st.metric(
label = 'Label 1 Minimum',
value = f"{np.percentile(values, q=h_q):.4f}"
)
else:
uniques = list(np.sort(np.unique(values)))
col_0, col_1 = st.columns(2)
with col_0:
label_0 = st.selectbox(
label = 'Label 0',
options = uniques,
index = 0
)
state_1['label_0'] = label_0
with col_1:
label_1 = st.selectbox(
label = 'Label 1',
options = [column for column in uniques if column != label_0],
index = 0
)
state_1['label_1'] = label_1
# update _df_1
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1']
):
# Get DF
df = st.session_state['_df_0'].copy()
# Apply Filter
df = apply_filter(
df = df,
filter = filter
)
# Apply Target
df = apply_target(
df = df,
target = target
)
# Encode target if the mode is binary classification
if state_1['mode'] == 'Binary Classification':
if ('l_q' in state_1) and ('h_q' in state_1):
df = target_encode_numeric(
df = df,
target = state_1['target'],
l_q = state_1['l_q'],
h_q = state_1['h_q']
)
elif ('label_0' in state_1) and ('label_1' in state_1):
df = target_encode_category(
df = df,
target = state_1['target'],
label_0 = state_1['label_0'],
label_1 = state_1['label_1']
)
# Update session state
st.session_state['filter'] = state_1['filter']
st.session_state['target'] = state_1['target']
st.session_state['feature_all'] = [column for column in df.columns if column != state_1['target']]
st.session_state['data_quality'] = df.notnull().sum() / len(df)
st.session_state['mode'] = state_1['mode']
if ('l_q' in state_1) and ('h_q' in state_1):
st.session_state['l_q'] = state_1['l_q']
st.session_state['h_q'] = state_1['h_q']
st.session_state['label_0'] = None
st.session_state['label_1'] = None
elif ('label_0' in state_1) and ('label_1' in state_1):
st.session_state['l_q'] = None
st.session_state['h_q'] = None
st.session_state['label_0'] = state_1['label_0']
st.session_state['label_1'] = state_1['label_1']
else:
st.session_state['l_q'] = None
st.session_state['h_q'] = None
st.session_state['label_0'] = None
st.session_state['label_1'] = None
st.session_state['_df_1'] = df
st.session_state['model'] = None
# State 2: _df_2
state_2 = {}
# NaN Data
nan_data = st.sidebar.selectbox(
label = 'NaN Data',
options = ['Delete', 'Replace']
)
state_2['nan_data'] = nan_data
# Auto Feature Selection
auto_feature_selection = st.sidebar.selectbox(
label = 'Auto Feature Selection',
options = [False, True]
)
state_2['auto_feature_selection'] = auto_feature_selection
# update _df_2
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1'] or
state_2 != st.session_state['state_2']
):
# Get DF
df = st.session_state['_df_1'].copy()
# Encode Data
df, encoder = encode_category(df)
# Update session state
st.session_state['nan_data'] = state_2['nan_data']
st.session_state['auto_feature_selection'] = auto_feature_selection
st.session_state['encoder'] = encoder
st.session_state['_df_2'] = df.reset_index(drop=True)
st.session_state['model'] = None
# State 3: _df_3
state_3 = {}
# Select Features
st.sidebar.markdown("""---""")
st.sidebar.write('Features')
st.sidebar.text(f'Data quality | name')
index = [
st.sidebar.checkbox(
label = f"{st.session_state['data_quality'][column]:.2f} | {column}",
key = f"_{column}",
value = True,
) for column in st.session_state['feature_all']
]
feature_selected = list(np.array(st.session_state['feature_all'])[index])
state_3['feature_selected'] = feature_selected
# Magage Features
def uncheck():
for column in st.session_state['feature_all']:
st.session_state[f'_{column}'] = False
def check():
for column in st.session_state['feature_all']:
st.session_state[f'_{column}'] = True
_, col_1, col_2 = st.sidebar.columns([1, 4, 5])
with col_1:
st.button(
label = 'Check All',
on_click = check
)
with col_2:
st.button(
label = 'Uncheck All',
on_click = uncheck
)
# update _df_3
if (
state_0 != st.session_state['state_0'] or
state_1 != st.session_state['state_1'] or
state_2 != st.session_state['state_2'] or
state_3 != st.session_state['state_3']
):
# Get DF
df = st.session_state['_df_2'].copy()
# Select columns
columns = state_3['feature_selected'] + [st.session_state['target']]
df = df[columns]
# Update session state
st.session_state['feature_selected'] = state_3['feature_selected']
st.session_state['_df_3'] = df
st.session_state['model'] = None
# Update states
st.session_state['state_0'] = state_0
st.session_state['state_1'] = state_1
st.session_state['state_2'] = state_2
st.session_state['state_3'] = state_3
# Data wall time
wall_time = time.time() - start_time
# Print Information
st.sidebar.markdown("""---""")
st.sidebar.write(f"Wall time: {wall_time:.4f} sec")
st.sidebar.write(f"Data Num: {len(st.session_state['_df_3'])}")
st.sidebar.write(f"Target: {st.session_state['target']}")
st.sidebar.write(f"Feature Num: {len(feature_selected)}")
# Print Encoder
columns = st.session_state['feature_selected'] + [st.session_state['target']]
encoder = {}
if len(st.session_state['encoder']) > 0:
for column in columns:
if column in st.session_state['encoder']:
encoder[column] = st.session_state['encoder'][column]
if len(encoder) > 0:
st.sidebar.write('Encoded Features')
st.sidebar.write(encoder)
# Print DF
st.write('Sample Data (5)')
st.write(st.session_state['_df_3'].iloc[:5])
# Train Model
if st.session_state['model'] is None:
st.markdown("""---""")
if st.button('Start Model Training'):
# Log
time_now = str(datetime.now())[:19]
print(f'START | {time_now} | {get_session_id()} | {st.session_state["train_file_path"]}')
# Load Data
df = st.session_state['_df_3'].copy()
features = st.session_state['feature_selected']
target = st.session_state['target']
if st.session_state['mode'] == 'Regression':
mode = 'reg'
if st.session_state['mode'] == 'Binary Classification':
mode = 'clf'
# NaN Data
df = df[features+[target]].copy()
if df.isna().sum().sum() == 0:
st.session_state['nan_processed'] = False
else:
if st.session_state['nan_data'] == 'Delete':
df = delete_nan(df)
elif st.session_state['nan_data'] == 'Replace':
df = replace_nan(
df = df,
random_state = st.session_state['config']['setup']['random_state']
)
st.session_state['nan_processed'] = True
st.session_state['data_num'] = len(df)
# Dataset
datasets = split_data(
df = df,
features = features,
target = target,
mode = mode,
n_splits = st.session_state['config']['split']['n_splits'],
shuffle = True,
random_state = st.session_state['config']['setup']['random_state']
)
# Best Model
best_model, history = get_best_model(
datasets = datasets,
mode = mode,
random_state = st.session_state['config']['setup']['random_state'],
n_jobs = st.session_state['config']['setup']['n_jobs']
)
best_model['features'] = features
best_model['target'] = target
best_model['datasets'] = datasets
# SHAP
source, shap_value = get_shap_value(
config = best_model,
max_num = st.session_state['config']['shap']['max_num']
)
output = get_importance(
shap_value,
sort = st.session_state['config']['importance']['sort'],
normalize = st.session_state['config']['importance']['normalize']
)
shap = {}
shap['features'] = output['features']
shap['importance'] = output['importance']
shap['source'] = source
shap['shap_value'] = shap_value
if (
st.session_state['auto_feature_selection'] and
'random_noise' in shap['features']
):
features = shap['features']
index = np.where(np.array(features)=='random_noise')[0][0]
if index != 0:
# Print Info
st.write('Auto Feature Selection is ON.')
# Set new features
features = features[:index]
# Dataset
datasets = split_data(
df = df,
features = features,
target = target,
mode = mode,
n_splits = st.session_state['config']['split']['n_splits'],
shuffle = True,
random_state = st.session_state['config']['setup']['random_state']
)
# Best Model
best_model, history = get_best_model(
datasets = datasets,
mode = mode,
random_state = st.session_state['config']['setup']['random_state'],
n_jobs = st.session_state['config']['setup']['n_jobs']
)
best_model['features'] = features
best_model['target'] = target
best_model['datasets'] = datasets
# SHAP
source, shap_value = get_shap_value(
config = best_model,
max_num = st.session_state['config']['shap']['max_num']
)
output = get_importance(
shap_value,
sort = st.session_state['config']['importance']['sort'],
normalize = st.session_state['config']['importance']['normalize']
)
shap = {}
shap['features'] = output['features']
shap['importance'] = output['importance']
shap['source'] = source
shap['shap_value'] = shap_value
# Update session state
st.session_state['history'] = history
st.session_state['model'] = best_model
st.session_state['shap'] = shap
# Refresh page
st_autorefresh(interval=100, limit=2)
# Result
else:
# STEP 2. Evaluation
st.markdown('### STEP 2. Evaluation')
# NaN Data
if st.session_state['nan_processed']:
st.write(f"NaN Data process mode is {st.session_state['nan_data']}.")
# Data number
st.write(f"Data Number: {st.session_state['data_num']}")
# Print Best Model
best = {}
best['name'] = st.session_state['model']['name']
best.update(st.session_state['model']['score'])
st.write('Best Model')
st.write(best)
# Print Score
st.write(st.session_state['history'])
# Graph
if st.session_state['mode'] == 'Regression':
st.altair_chart(
plot_reg_evaluation(
true = st.session_state['model']['oob_true'],
pred = st.session_state['model']['oob_pred'],
target = st.session_state['model']['target']
),
use_container_width = True
)
elif st.session_state['mode'] == 'Binary Classification':
st.pyplot(
plot_confusion_matrix(
true = st.session_state['model']['oob_true'],
pred = st.session_state['model']['oob_pred'],
target = st.session_state['model']['target']
)
)
# STEP 3. Feature Importance
features = st.session_state['shap']['features']
importance = st.session_state['shap']['importance']
col_1, col_2 = st.columns([3, 1])
with col_1:
st.markdown('### STEP 3. Feature Importance')
with col_2:
show_number = st.number_input(
label = 'Number',
value = np.minimum(10, len(features)),
min_value = 1,
max_value = len(features),
step = 1
)
st.altair_chart(
plot_importance(
features = features,
importance = importance,
target = st.session_state['model']['target'],
num = show_number
),
use_container_width=True
)
# Download CSV
df_importance = pd.DataFrame()
df_importance['feature'] = features
df_importance['importance'] = importance
st.download_button(
label = 'Download (.csv)',
data = df_importance.to_csv(index=False).encode('utf-8-sig'),
file_name = f'importance.csv',
mime = 'text/csv'
)
# STEP 4. Local Explanation
df = df = st.session_state['_df_3']
source = st.session_state['shap']['source']
shap_value = st.session_state['shap']['shap_value']
col_1, col_2 = st.columns([3, 1])
with col_1:
st.markdown('### STEP 4. Local Explanation')
with col_2:
type_name = st.selectbox(
label = 'Type',
options = ['SHAP', '1D Simulation', '2D Simulation']
)
if type_name == 'SHAP':
feature = st.selectbox(
label = 'Feature',
options = features
)
st.altair_chart(
plot_shap(
x = source[feature].values,
y = shap_value[feature].values,
x_all = df[feature].dropna().values,
feature = feature,
target = st.session_state['model']['target'],
mean = np.mean(st.session_state['model']['oob_true'])
),
use_container_width = True
)
# Print Encode
if feature in st.session_state['encoder']:
st.write(feature)
st.write(st.session_state['encoder'][feature])
# Download CSV
df_shap = pd.DataFrame()
df_shap[feature] = source[feature].values
df_shap['SHAP Value'] = shap_value[feature].values
st.download_button(
label = 'Download (.csv)',
data = df_shap.to_csv(index=False).encode('utf-8-sig'),
file_name = f'shap.csv',
mime = 'text/csv'
)
# Download figures
col_0, col_1 = st.columns(2)
with col_0:
if st.button('Extract all figures'):
progress = st.progress(0)
figs = []
for i, feature in enumerate(features):
# get figure
figs.append(
matplotlib_shap(
x = source[feature].values,
y = shap_value[feature].values,
x_all = df[feature].dropna().values,
feature = feature,
target = st.session_state['model']['target'],
mean = np.mean(st.session_state['model']['oob_true'])
)
)
# Update progress
progress.progress((i+1)/len(features))
# convert to zip
with col_1:
st.download_button(
label = 'Download (.zip)',
data = convert_figs2zip(figs),
file_name = 'shap.zip',
mime="application/octet-stream"
)
elif type_name == '1D Simulation':
feature = st.selectbox(
label = 'Feature',
options = features
)
x, y = simulation_1d(
datasets = st.session_state['model']['datasets'],
models = st.session_state['model']['models'],
features = st.session_state['model']['features'],
feature = feature,
mode = st.session_state['model']['type'],
num = st.session_state['config']['simulation']['num']
)
st.altair_chart(
plot_simulation_1d(
x = x,
y = y,
x_all = df[feature].dropna().values,
feature = feature,
target = st.session_state['model']['target']
),
use_container_width = True
)
# Print Encode
if feature in st.session_state['encoder']:
st.write(feature)
st.write(st.session_state['encoder'][feature])
# Download CSV
df_1d = pd.DataFrame()
df_1d[feature] = x
df_1d['Prediction'] = y
st.download_button(
label = 'Download (.csv)',
data = df_1d.to_csv(index=False).encode('utf-8-sig'),
file_name = f'1d_simulation.csv',
mime = 'text/csv'
)
# Download figures
col_0, col_1 = st.columns(2)
with col_0:
if st.button('Extract all figures'):
progress = st.progress(0)
figs = []
for i, feature in enumerate(features):
# get x and y
x, y = simulation_1d(
datasets = st.session_state['model']['datasets'],
models = st.session_state['model']['models'],
features = st.session_state['model']['features'],
feature = feature,
mode = st.session_state['model']['type'],
num = st.session_state['config']['simulation']['num']
)
# get figure
figs.append(
matplotlib_simulation_1d(
x = x,
y = y,
x_all = df[feature].dropna().values,
feature = feature,
target = st.session_state['model']['target']
)
)
# Update progress
progress.progress((i+1)/len(features))
# convert to zip
with col_1:
st.download_button(
label = 'Download (.zip)',
data = convert_figs2zip(figs),
file_name = '1d_simulation.zip',
mime="application/octet-stream"
)
elif type_name == '2D Simulation':
col_0, col_1 = st.columns(2)
with col_0:
feature_0 = st.selectbox(
label = 'Feature #1',
options = features
)
with col_1:
feature_1 = st.selectbox(
label = 'Feature #2',
options = [feature for feature in features if feature != feature_0]
)
x_0, x_1, y = simulation_2d(
datasets = st.session_state['model']['datasets'],
models = st.session_state['model']['models'],
features = st.session_state['model']['features'],
feature_0 = feature_0,
feature_1 = feature_1,
mode = st.session_state['model']['type'],
num = st.session_state['config']['simulation']['num']
)
st.altair_chart(
plot_simulation_2d(
x_0 = x_0,
x_1 = x_1,
y = y,
feature_0 = feature_0,
feature_1 = feature_1,
target = st.session_state['model']['target']
),
use_container_width = True
)
# Print Encode
if feature_0 in st.session_state['encoder']:
st.write(feature_0)
st.write(st.session_state['encoder'][feature_0])
if feature_1 in st.session_state['encoder']:
st.write(feature_1)
st.write(st.session_state['encoder'][feature_1])
# Download CSV
df_2d =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 7 13:45:11 2020
@author: anniewong
"""
import pandas as pd
import pickle
import sys
sys.path.insert(1, '../src/MyAIGuide/data')
from fitbitDataGatheredFromAPI import fitbitDataGatheredFromAPI
from store_diary_participant8 import store_retrieve_diary
from complaintsData import complaintsData
from storePainIntensitiesForParticipants2to9 import storePainIntensitiesForParticipants2to9
foldername = "../data/raw/ParticipantData/Participant8Anonymized/"
diary= "../data/external/myaiguideconfidentialdata/Participant8/Participant8diaries.json"
# Create empty dataframe to fill
data =
|
pd.DataFrame()
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.