prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from datetime import datetime, timedelta
from typing import Optional, Union
def check_prices(**kwargs) -> bool:
'''checks if one or more series of prices are of correct types'''
for key, value in kwargs.items():
if not isinstance(value, pd.Series):
print(f'{key} must be a pandas.Series')
return False
elif not isinstance(value.index, pd.DatetimeIndex):
print(f'index of {key} must be a pandas.DatetimeIndex')
return False
return True
def check_time(**kwargs) -> bool:
'''checks if one or more timestamps are of correct types'''
for key, value in kwargs.items():
if (not isinstance(value, pd.Timestamp)) and (not isinstance(value, datetime)):
print(f'{key} must be a pandas.Timestamp or datetime.datetime')
return False
return True
def convert_ticker(ticker) -> str:
'''converts ticker symbol for easier complementary API usage'''
assert ('XSHG' in ticker) or ('XSHE' in ticker) or ('SH' in ticker) or ('SZ' in ticker), 'Invalid ticker symbol'
if 'XSHG' in ticker:
ticker = ticker.replace('.XSHG', '.SH')
elif 'XSHE' in ticker:
ticker = ticker.replace('.XSHE', '.SZ')
elif 'SH' in ticker:
ticker = ticker.replace('.SH', '.XSHG')
elif 'SZ' in ticker:
ticker = ticker.replace('.SZ', '.XSHE')
return ticker
def add_suffix(ticker:str) -> str:
'''
Adds Shanghai or Shenzhen stock exchange suffix (.SH or .SZ) to a ticker
Parameters
----------
ticker : str
the ticker symbol to add suffix onto
Examples
--------
>>> from xquant.util import add_suffix
>>> add_suffix('1')
'000001.SZ'
>>> add_suffix('300001')
'300001.SZ'
>>> add_suffix('600001')
'600001.SH'
'''
ticker = ticker.zfill(6)
if len(ticker) > 6:
raise Exception('Cannot interpret ticker symbol')
if ticker[0] == '6':
ticker += '.SH'
else:
ticker += '.SZ'
return ticker
def closest_trading_day(date, df_index, method:str=None) -> pd.Timestamp:
'''gets the closest trading day according to a provided index'''
assert check_time(date=date)
assert isinstance(df_index, pd.DatetimeIndex), 'df_index must be a pandas.DatetimeIndex'
assert method in [None, 'ffill','bfill'], "method must be one of None, 'ffill', or 'bfill'"
date = pd.to_datetime(date)
open_days = df_index
try:
idx = open_days.get_loc(date)
except KeyError:
idx = open_days.get_loc(date, method=method)
day = open_days[idx]
return day
def next_trading_day(date, df_index) -> pd.Timestamp:
'''gets the next trading day after a certain date according to a provided index'''
assert check_time(date=date)
assert isinstance(df_index, pd.DatetimeIndex), 'df_index must be a pandas.DatetimeIndex'
date = pd.to_datetime(date)
open_days = df_index
try:
idx = open_days.get_loc(date) + 1
next_day = open_days[idx]
except KeyError:
idx = open_days.get_loc(date, method='bfill')
next_day = open_days[idx]
return next_day
def last_trading_day(date, df_index) -> pd.Timestamp:
'''gets the last trading day before a certain date according to a provided index'''
assert check_time(date=date)
assert isinstance(df_index, pd.DatetimeIndex), 'df_index must be a pandas.DatetimeIndex'
date = pd.to_datetime(date)
open_days = df_index
try:
idx = open_days.get_loc(date) - 1
last_day = open_days[idx]
except KeyError:
idx = open_days.get_loc(date, method='ffill')
last_day = open_days[idx]
return last_day
def business_days(month, df_index) -> pd.DatetimeIndex:
'''returns all business days in a month accroding to a provided index'''
assert isinstance(df_index, pd.DatetimeIndex), 'df_index must be a pandas.DatetimeIndex'
month = pd.to_datetime(month)
return df_index[str(month.year)+'-'+str(month.month)].index
def quarter_generator(start, end) -> None:
'''
A generator that yields beginnings of quarters
'''
assert check_time(start=start, end=end)
date = start - timedelta(1)
end = end - | pd.tseries.offsets.BQuarterBegin(startingMonth=1) | pandas.tseries.offsets.BQuarterBegin |
import json
import numpy as np
import pytest
from pandas import DataFrame, Index, json_normalize
import pandas._testing as tm
from pandas.io.json._normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [
{
"country": "USA",
"states": [
{
"name": "California",
"cities": [
{"name": "San Francisco", "pop": 12345},
{"name": "Los Angeles", "pop": 12346},
],
},
{
"name": "Ohio",
"cities": [
{"name": "Columbus", "pop": 1234},
{"name": "Cleveland", "pop": 1236},
],
},
],
},
{
"country": "Germany",
"states": [
{"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},
{
"name": "Nordrhein-Westfalen",
"cities": [
{"name": "Duesseldorf", "pop": 1238},
{"name": "Koeln", "pop": 1239},
],
},
],
},
]
@pytest.fixture
def state_data():
return [
{
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
"info": {"governor": "<NAME>"},
"shortname": "FL",
"state": "Florida",
},
{
"counties": [
{"name": "Summit", "population": 1234},
{"name": "Cuyahoga", "population": 1337},
],
"info": {"governor": "<NAME>"},
"shortname": "OH",
"state": "Ohio",
},
]
@pytest.fixture
def author_missing_data():
return [
{"info": None},
{
"info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
"author_name": {"first": "Jane", "last_name": "Doe"},
},
]
@pytest.fixture
def missing_metadata():
return [
{
"name": "Alice",
"addresses": [
{
"number": 9562,
"street": "Morris St.",
"city": "Massillon",
"state": "OH",
"zip": 44646,
}
],
},
{
"addresses": [
{
"number": 8449,
"street": "Spring St.",
"city": "Elizabethton",
"state": "TN",
"zip": 37643,
}
]
},
]
@pytest.fixture
def max_level_test_input_data():
"""
input data to test json_normalize with max_level param
"""
return [
{
"CreatedBy": {"Name": "User001"},
"Lookup": {
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
"Image": {"a": "b"},
}
]
class TestJSONNormalize:
def test_simple_records(self):
recs = [
{"a": 1, "b": 2, "c": 3},
{"a": 4, "b": 5, "c": 6},
{"a": 7, "b": 8, "c": 9},
{"a": 10, "b": 11, "c": 12},
]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], "counties")
expected = DataFrame(state_data[0]["counties"])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, "counties")
expected = []
for rec in state_data:
expected.extend(rec["counties"])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, "counties", meta="state")
expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = | json_normalize({"A": {"A": 1, "B": 2}}) | pandas.io.json.json_normalize |
import requests
from bs4 import BeautifulSoup
import json
import pandas as pd
def get_list_of_youtube_channels(term,n):
# initialize list of links
links = []
# get a list of links for channels while searching for a given term
for i in range(0,n,10):
r = requests.get("https://www.bing.com/search?q="+term+"%20site%3A%20https%3A%2F%2Fwww.youtube.com%2Fchannel%2F&first="+str(i))
soup = BeautifulSoup(r.text,'html.parser')
link_elements = soup.find_all('h2')
for l in link_elements:
try:
a = l.find('a')
links = links + [a['href']]
except:
pass
# get user ids from urls
channel_ids_rough = [l.split("/")[l.split("/").index('channel')+1] for l in links if "channel" in l.split("/")]
channel_ids = [c.split("?")[0] for c in channel_ids_rough]
# remove duplicates
channel_ids = list(set(channel_ids))
return channel_ids
# get API key for the YouTube Data v3 API -- you can create one at https://console.developers.google.com/apis/credentials
def get_youtube_api_key():
with open('authentications/youtube_api_key.txt', 'r') as f:
api_key=f.read()
return api_key
def get_data_string_for_youtube_channels(channel_ids):
# initialize dataframe
df = | pd.DataFrame() | pandas.DataFrame |
from jug import TaskGenerator, CachedFunction
import os
from os import path
from jug.hooks.exit_checks import exit_if_file_exists, exit_env_vars
exit_env_vars()
exit_if_file_exists('jug.exit.marker')
def get_sample(f):
return path.split(f)[-1].split('_')[0]
BASE = '/g/scb2/bork/ralves/projects/genecat/outputs/'
HEADERS_FILE = 'GMGC.95nr.fna.sam.header'
def list_samfiles():
from glob import glob
samfiles = glob(BASE + '/*.iter2.*')
blacklist = set([get_sample(s) for s in samfiles if s.endswith('.sam.gz')])
return [s for s in samfiles if s.endswith('minimap.iter2.noseq.sam.xz') and get_sample(s) not in blacklist]
_gene_blacklist= None
def expand_sort(ifile, ofile):
import lzma
import subprocess
from os import unlink
from tempfile import TemporaryDirectory
global _gene_blacklist
if _gene_blacklist is None:
_gene_blacklist = set(line.strip() for line in open('redundant.complete.sorted.txt'))
print("Loaded blacklist with {} elements.".format(len(_gene_blacklist )))
BUFSIZE = 16*1024*1024
block = []
partials = []
def write_block(out_base):
nonlocal block
if not block:
return
block.sort()
out_name = out_base + '/' + str(len(partials)) + '.sam'
print(f'Writing to {out_name}')
with open(out_name, 'wt') as output:
for line in block:
output.write(line)
block = []
partials.append(out_name)
block = []
with TemporaryDirectory() as tdir:
for line in lzma.open(ifile, 'rt'):
if line[0] == '@':
continue
if line.split('\t')[2] in _gene_blacklist :
continue
block.append(line)
if len(block) == BUFSIZE:
write_block(tdir)
write_block(tdir)
subprocess.check_call(['sort', '--merge', '-o', ofile] + partials)
@TaskGenerator
def count1(samfile):
from ngless import NGLess
import tempfile
ofname = 'outputs/{}.txt'.format(get_sample(samfile))
ofname_u = 'outputs/{}.unique.txt'.format(get_sample(samfile))
sname = tempfile.NamedTemporaryFile(suffix='.sam', delete=False)
sname.close()
try:
sname = sname.name
expand_sort(samfile, sname)
sc = NGLess.NGLess('0.7')
e = sc.env
e.sam = sc.samfile_(sname, headers=HEADERS_FILE)
sc.write_(sc.count_(e.sam, features=['seqname'], multiple='{unique_only}', discard_zeros=True),
ofile=ofname_u + '.gz')
sc.write_(sc.count_(e.sam, features=['seqname'], discard_zeros=True),
ofile=ofname + '.gz')
sc.run(auto_install=False, ncpus='auto')
return ofname
finally:
os.unlink(sname)
_rename = None
def load_rename_table():
global _rename
if _rename is None:
_rename = {}
for line in open('/g/bork1/coelho/DD_DeCaF/genecats.cold/GMGC10.rename.table.txt'):
n, name = line.split()
_rename[name] = int(n.split('.')[1], 10)
return _rename
def load_gene2bactNOG():
gene2bactNOG = {}
for line in open('cold/annotations/GMGC.95nr.emapper.annotations'):
tokens = line.strip('\n').split('\t')
bactNOG = [b for b in tokens[9].split(',') if 'bactNOG' in b]
if len(bactNOG):
gene2bactNOG[tokens[0]] = bactNOG[0]
return gene2bactNOG
def load_gene2ko():
return load_annotation(6)
def load_gene2highNOG():
return load_annotation(11)
def load_annotation(ix):
group = {}
for line in open('cold/annotations/GMGC.95nr.emapper.annotations'):
tokens = line.strip('\n').split('\t')
tok = tokens[ix]
if len(tok):
group[tokens[0]] = tok
return group
_sizes = None
def scale(g, sizes):
total = g.sum()
gn = g / sizes[g.index]
gn *= total /gn.sum()
return gn
@TaskGenerator
def scale_combine(f):
f = f + '.gz'
import pandas as pd
import numpy as np
global _sizes
if _sizes is None:
_sizes = pd.read_table('tables/GMGC.95nr.sizes', index_col=0, squeeze=True)
_rename = load_rename_table()
non_unique = pd.read_table(f, index_col=0, squeeze=True)
fu = f.replace('.txt.gz', '.unique.txt.gz')
unique = pd.read_table(fu, index_col=0, squeeze=True)
unique_scaled = scale(unique, _sizes)
non_unique_scaled = scale(non_unique, _sizes)
pasted = pd.DataFrame({'raw_unique': unique, 'raw': non_unique, 'unique_scaled' : unique_scaled, 'scaled' : non_unique_scaled})
pasted.fillna(0, inplace=True)
pasted.rename(index=_rename, inplace=True)
pasted.reset_index(inplace=True)
pasted['index'] = pasted['index'].astype(np.int32)
fname = f.replace('.txt.gz', '.feather')
pasted.to_feather(fname)
return fname
@TaskGenerator
def compute_totals(counts):
import pandas as pd
totals = {}
for f in counts:
print(f"Loading {f}")
sample = f.split('/')[1].split('.')[0]
f = pd.read_feather(f.replace('txt', 'feather'))
totals[sample] = pd.Series({'total': f['raw'].values.sum(), 'total_unique': f['raw_unique'].values.sum()})
return pd.DataFrame(totals)
@TaskGenerator
def save_totals(totals):
totals.T.astype(int).to_csv('tables/total.tsv', sep='\t')
def prepare_gene2functional(annotation, oname_feather, oname_list):
import pandas as pd
rename = load_rename_table()
gi2func = {}
for k,v in annotation.items():
gi2func[rename[k]] = v
gi = pd.DataFrame({'gi2func': | pd.Series(gi2func) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
replacement_comparison.py
Functions to compare openSMILE outputs for various noise replacement methods
for each waveform in the sample.
Authors:
– <NAME>, 2017 (<EMAIL>)
© 2017, Child Mind Institute, Apache v2.0 License
@author: jon.clucas
"""
import numpy as np, os, pandas as pd, sys
if os.path.abspath('../../') not in sys.path:
if os.path.isdir(os.path.join(os.path.abspath('../..'), 'SM_openSMILE')):
sys.path.append(os.path.abspath('../..'))
elif os.path.isdir(os.path.join(os.path.abspath('..'), 'SM_openSMILE')):
sys.path.append(os.path.abspath('..'))
elif os.path.isdir('SM_openSMILE'):
sys.path.append(os.path.abspath('.'))
from SM_openSMILE.openSMILE_preprocessing.noise_replacement import \
condition_comparison_nr_all as cc, nr_box_plots as nbp
from SM_openSMILE.cfg import conditions, oSdir, configs
adults_replaced = os.path.dirname(oSdir)
def main():
# collect_mad_ranks()
plot_mad_ranks()
def collect_mad_ranks():
"""
Function to collect median absolute deviations for all [replacement ×
condition]s for each configuration for each URSI
Parameters
----------
None
Returns
-------
None
Outputs
-------
csv files
two csv files per URSI per configuration file: one for totals, and one
for summaries
"""
replacements = ['removed', 'replaced_clone', 'replaced_pink', 'timeshifted'
]
for i, replacement in enumerate(replacements):
replacements[i] = '_'.join(['adults', replacement])
replacements = [*replacements, 'adults']
list_of_dataframes = []
for URSI in os.listdir(adults_replaced):
if URSI not in ['.DS_Store', 'summary']:
for condition in conditions:
for config_file in configs:
URSI_files = []
for method in replacements:
method_dir = os.path.join(adults_replaced, URSI,
'openSMILE_outputs', config_file,
method)
if os.path.isdir(method_dir):
if len(URSI_files) == 0:
or_dir = os.path.join(adults_replaced, URSI,
'openSMILE_outputs', config_file,
'original')
if os.path.isdir(or_dir):
for or_file in os.listdir(or_dir):
if condition in or_file:
URSI_files.append(os.path.join(
or_dir, or_file))
for csv_file in os.listdir(method_dir):
csv_path = os.path.join(method_dir, csv_file)
if condition in csv_file and csv_path not in \
URSI_files:
URSI_files.append(csv_path)
if len(URSI_files) == 6:
print(''.join(["Processing ", URSI, ", ", condition,
" : ", config_file]))
list_of_dataframes.append(["_".join([URSI, config_file]
), cc.build_dataframe(URSI,
['original', *replacements],
config_file, URSI_files)])
cc.mad_rank_analyses([list_of_dataframes], adults_replaced)
def plot_mad_ranks():
"""
Function to build boxplots for the outputs of collect_mad_ranks().
Parameters
----------
None
Returns
-------
None
Outputs
-------
svg files
one svg file for each config file
"""
for config in configs:
for y in ["sum(MAD)", "mean(MAD)"]:
out_path = os.path.join(oSdir, 'mad_ranks', config)
if not os.path.exists(out_path):
os.makedirs(out_path)
nbp.plot(sum_mad_ranks(config), out_path, y, "csv")
nbp.plot(sum_mad_ranks(config), out_path, y)
def sum_mad_ranks(config):
"""
Function to build boxplots for the outputs of collect_mad_ranks().
Parameters
----------
config : string
openSMILE config file
Returns
-------
df : pandas dataframe
pandas dataframe totalling all relevant summaries
Outputs
-------
csv files
one csv file for each config file
"""
df = | pd.DataFrame() | pandas.DataFrame |
""" Functions that load downloaded emoji data and prepare train/dev/test sets for NNs """
from math import ceil
import os
import string
import pandas as pd
import numpy as np
# import emoji
CHARACTERS = """ '",.\\/|?:;@'~#[]{}-=_+!"£$%^&*()abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890"""
def read_tweet_data(path):
"""" loads the csv (path) containing text and emoji data
returns a pandas dataframe containing line number, text, and emoji """
data = pd.read_csv(path, dtype='object')
data = data.loc[:, ['text', 'emoji']] # should contain two labelled columns
# filter out comumn headers (rows where text='text' emoji='emoji')
filt = data['emoji'] != 'emoji'
return data[filt]
def filter_tweets_min_count(tweets, min_count=1000):
""" loads an m x 3 pandas dataframe (cols line number, text, emoji) and returns
filtered list with only emojis with >min_count examples """
return tweets.groupby('emoji').filter(lambda c: len(c) > min_count)
def filter_text_for_handles(text, chars=CHARACTERS):
""" takes an pd.Series of text, removes twitter handles from
text data - all text preceded by @ and then all characters not contained in
universal set"""
def filter_handles(txt): return ' '.join(
word for word in txt.split(' ') if not word.startswith('@'))
def filter_chars(txt): return ''.join([c for c in txt if c in chars])
def filter(txt): return filter_chars(filter_handles(txt))
return text.apply(filter)
def pad_text(text, length=160):
""" pads text with preceding whitespace and/or truncates tweet to 160 characters """
if len(text) > length:
return text[0:length]
padded_text = ' ' * (length - len(text)) + text
return padded_text
def get_series_data_from_tweet(tweet, length=160, window_size=40, step=3):
""" input (tweet) is a pd.Series, a row of a pd.DataFrame
returns corresponding lists sentences (of length window_size)
and next_chars (single character). """
sentences = []
next_chars = []
# pad all tweets to 160 characters
# padded_text = ' ' * (160-tweet_length) + tweet['text']
padded_text = pad_text(tweet['text'], length=length)
for i in range(0, length - window_size, step):
sentences.append(padded_text[i:i+window_size])
next_chars.append(padded_text[i+window_size])
return (sentences, next_chars)
def get_emoji_and_series_data_from_tweet(tweet, length=160, window_size=40, step=3):
""" input (tweet) is a pd.Series, a row of a pd.DataFrame
returns corresponding lists sentences (of length window_size),
emoji and next_chars (both single characters). """
sentences = []
next_chars = []
emoji = []
# pad all tweets to 160 characters
# padded_text = ' ' * (160-tweet_length) + tweet['text']
padded_text = pad_text(tweet['text'], length=length)
for i in range(0, length - window_size, step):
sentences.append(padded_text[i:i+window_size])
next_chars.append(padded_text[i+window_size])
emoji.append(tweet['emoji'])
return (sentences, emoji, next_chars)
def get_unique_chars_list(list_strings):
""" takes list of strings, returns dict of all characters """
one_big_string = ' '.join(list_strings)
chars = sorted(list(set(one_big_string)))
# print('Unique chars: ', len(chars))
char_indices = dict((char, chars.index(char)) for char in chars)
return chars, char_indices
def get_emojis_list(emoji_pandas_series):
""" Gets a sorted list of unique emojis, and a dictionary of inverses"""
emojis = sorted(list(set(emoji_pandas_series)))
emoji_indices = dict((emoji, emojis.index(emoji)) for emoji in emojis)
return emojis, emoji_indices
def get_universal_chars_list():
""" gets a universal set of text characters and basic punctuation, suitable for using
on all tweets. returns set of characters and the index. """
return get_unique_chars_list(CHARACTERS)
def get_x_y_bool_arrays(sentences, next_chars):
""" takes the list of strings (sentences) and list of next_chars, and
one-hot encodes them using Boolean type, returns as arrays of x, y.
Now replaced by get_x_bool_array and get_y_bool_array as vectorisable versions
that work over a pd.Series"""
print("Deprecated! Use get_x_bool_array or get_y_bool_array instead")
chars, char_index = get_universal_chars_list()
text_x = np.zeros((len(sentences), len(sentences[0]),
len(chars)), dtype=np.bool)
text_y = np.zeros((len(sentences), len(char_index)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for pos, char in enumerate(sentence):
text_x[i, pos, char_index[char]] = 1
text_y[i, char_index[next_chars[i]]] = 1
return (text_x, text_y)
def get_x_bool_array(sentence, chars, char_index):
""" similar to get_x_y_bool_arrays() but operates on a single
sentence and returns a one-hot encoded bool array (dims len(sentence) x len(chars)).
Series chars is a list of recognised characters and char_index is the corresponding index"""
# chars, char_index = get_unique_chars_list(sentence)
text_x = np.zeros((len(sentence), len(sentence[0]),
len(chars)), dtype=np.bool)
# text_y = np.zeros((len(sentences), len(char_index)), dtype=np.bool)
for i, s in enumerate(sentence):
for pos, char in enumerate(s):
text_x[i, pos, char_index[char]] = 1
# text_y[i, char_index[next_chars[i]]] = 1
return np.asarray(text_x)
def get_y_bool_array(next_chars, char_index):
""" similar to get_x_y_bool_arrays() but operates on a single
sentence and returns a one-hot encoded bool array only (one dimension of size len(chars)).
Series chars is a list of recognised characters and char_index is the corresponding index"""
# Pass in a global list/index of characters so it's the same encoding for all tweets
# chars, char_index = get_unique_chars_list(sentence)
# text_x = np.zeros((len(sentence), len(sentence[0]),
# len(chars)), dtype=np.bool)
text_y = np.zeros((len(next_chars), len(char_index)), dtype=np.bool)
for i in range(len(next_chars)):
text_y[i, char_index[next_chars[i]]] = 1
return np.asarray(text_y)
def get_emoji_bool_array(emoji, emoji_index):
""" gets the one-hot encoded array for emojis, exactly like get_y_bool_array"""
#emoji_one_hot = np.zeros((1, len(emoji_index)), dtype=np.bool)
#emoji_one_hot[0, emoji_index[emoji]] = 1
emoji_one_hot = np.zeros((len(emoji), len(emoji_index)), dtype=np.bool)
for i in range(len(emoji)):
emoji_one_hot[i, emoji_index[emoji[i]]] = 1
return np.asarray(emoji_one_hot)
def x_y_bool_array_to_sentence(text_x, text_y, chars, position=0, separator=False):
""" converts one-hot encoded arrays text_x, text_y back to human
readable, for debug purposes """
def bool_array_to_char(bool_array, chars):
return chars[np.argmax(bool_array.astype(int))]
def decode_line(text_x, chars):
string = []
for i in range(text_x.shape[0]):
string.append(bool_array_to_char(text_x[i], chars))
return string
def decode_example(text_x, text_y):
# decodes x, y from array type back into english
if separator:
sep = ':'
else:
sep = ''
return(''.join(decode_line(text_x, chars)) + # decode x
sep + bool_array_to_char(text_y, chars)) # decode y
return decode_example(text_x[position], text_y[position])
def convert_tweet_to_xy(tweet, length=160, window_size=40, step=3):
""" converts a tweet (pd DataFrame with 'text' field) to x, y text pairs, where x is
window_size character moving window over the text, and y is the expected next character.
outputs an ndarray of dims (m, window_size, characters) where m is the final number of
training examples and characters is the number of characters in the set (78 by default) """
# apply the function to split each tweet into multiple windows of 40 chars and
# a corresponding n_char
# spits out a list of (x, y) tuples which is a real headache but we can fix it
assert length > window_size
zipped = tweet.apply(
lambda x: get_series_data_from_tweet(x, length=length, window_size=window_size, step=step),
axis=1)
(x_tuple, y_tuple) = zip(*zipped) # unzips the tuples into separate tuples of x, y
# get the universal character set and the corresponding index
chars_univ, char_idx_univ = get_universal_chars_list()
x_bool = pd.Series(x_tuple).apply(lambda x: get_x_bool_array(x, chars_univ, char_idx_univ))
y_bool = pd.Series(y_tuple).apply(lambda x: get_y_bool_array(x, char_idx_univ))
x_dims = (len(x_bool), # indexes over tweets
# indexes over different sentence windows ((160 - window_size) / step = 40)
x_bool[0].shape[0],
x_bool[0].shape[1], # indexes over characters in the window (window_size = 40)
x_bool[0].shape[2]) # one-hot encoding for each character (78)
y_dims = (len(y_bool), # indexes over tweets
y_bool[0].shape[0], # indexes over different sentence windows (40)
y_bool[0].shape[1]) # one-hot encoding for each character (78)
# allocate space for the array
x_arr = np.zeros(shape=x_dims)
y_arr = np.zeros(shape=y_dims)
for i, twit in enumerate(x_bool):
x_arr[i] = twit
for i, nchar in enumerate(y_bool):
y_arr[i] = nchar
# temp for profiling
del x_bool, y_bool
# finally, reshape into a (m, w, c) array
# where m is training example, w is window size,
# c is one-hot encoded character
x_fin = x_arr.reshape(x_arr.shape[0] * x_arr.shape[1], x_arr.shape[2], x_arr.shape[3])
# y is a (m, c) array, where m is training example and c is one-hot encoded character
y_fin = y_arr.reshape(y_arr.shape[0] * y_arr.shape[1], y_arr.shape[2])
# temp
del x_arr, y_arr
return x_fin, y_fin
def convert_tweet_to_xy_generator(tweet, length=160, window_size=40,
step=3, batch_size=64, emoji_set=None):
""" generator function that batch converts tweets (from pd DataFrame of tweets) to tuple of (x,y)
data, (where x is (m, window_size, character_set_size) ndarray and y is an (m,character_set_size)
dimensional array) suitable for feeding to keras fit_generator.
If set of all emojis is passed in as emoji_set, then the x return
value is a list containing m,emoji_size matrix as well as the text.
Num training examples per tweet given by math.ceil((length - window_size)/step)"""
assert length > window_size
batch_num = 0
n_batches = int(tweet.shape[0] / batch_size) # terminate after last full batch for now
# calculate num training examples per tweet
m_per_tweet = int(ceil((length - window_size) / step))
# get the universal character set and its index
chars_univ, char_idx_univ = get_universal_chars_list()
if emoji_set:
emoji_idx = dict((emoji, emoji_set.index(emoji)) for emoji in emoji_set)
# allocate ndarray to contain one-hot encoded batch
x_dims = (batch_size, # num tweets
m_per_tweet,
window_size,
len(chars_univ)) # length of the one-hot vector
y_dims = (batch_size, # num tweets
m_per_tweet,
len(chars_univ)) # length of the one-hot vector
x_arr = np.zeros(shape=x_dims)
y_arr = np.zeros(shape=y_dims)
if emoji_set:
emoji_dims = (batch_size,
m_per_tweet,
len(emoji_set))
emoji_arr = np.zeros(shape=emoji_dims)
while batch_num < n_batches: # in case tweet < batch_size
# slice the batch
this_batch = tweet.iloc[(batch_num*batch_size):(batch_num+1)*batch_size]
# expand out all the tweets
if emoji_set:
zipped = this_batch.apply(
lambda x: get_emoji_and_series_data_from_tweet(
x, length=length, window_size=window_size, step=step),
axis=1)
# unzips the tuples into separate tuples of x, y
(x_tuple, emoji_tuple, y_tuple) = zip(*zipped)
else:
zipped = this_batch.apply(
lambda x: get_series_data_from_tweet(
x, length=length, window_size=window_size, step=step),
axis=1)
# unzips the tuples into separate tuples of x, y
(x_tuple, y_tuple) = zip(*zipped)
# turn each tuple into an series and then one-hot encode it
x_bool = pd.Series(x_tuple).apply(lambda x: get_x_bool_array(x, chars_univ, char_idx_univ))
y_bool = | pd.Series(y_tuple) | pandas.Series |
import os, glob, sys, io
import numpy as np
import pandas as pd # Timeseries data
import datetime as dt # Time manipulation
import yaml
from matplotlib.dates import date2num # Convert dates to matplotlib axis coords
from matplotlib import dates
from scipy import fftpack
from scipy import stats
from bin.tools import *
def init(config_file):
# Read configuration
with open(r'%s' % config_file) as file:
config_list = yaml.load(file, Loader=yaml.FullLoader)
src = config_list['sources']['ebas_ozone']
src_svanvik_OzoNorClim = config_list['sources']['svanvik_ozone']
src_rra = config_list['sources']['regional_ozone']
station_list = config_list['station_list']
workflow = config_list['workflow']
file.close()
# Read data
try:
data = {}
for station in station_list:
if station=='Barrow':
data.update({station:load_data(src+station+'/*', type="Barrow")})
else:
data.update({station:load_data(src+station+'/*.nas')})
except NameError:
sys.exit("Can't load ozone station data please check your source directory!")
# Concate Jergul and Karasjok data
data.update({'jergkara':pd.concat((data['Jergul'], data['Karasjok']))})
# Read and convert xls file data
data_svanvik_OzoNorClim = []
for file in sorted(glob.glob(src_svanvik_OzoNorClim)):
tmp_data_svanvik = pd.read_excel(file, index_col=0, header=0)
data_svanvik_OzoNorClim.append(tmp_data_svanvik['O3_mugm-3'].where(tmp_data_svanvik['O3_mugm-3']>=0.5).dropna()/2.)
# Concat data Svanvik data
data.update({'svanvik_OzoNorClim':pd.concat(data_svanvik_OzoNorClim)})
# Load regional model reanalysis 2018 and set time axis
try:
data_rra = xr.open_dataset(src_rra)
data_rra['time'] = pd.date_range("2018-01-01", periods=365*24, freq='H')
data.update({'rra':data_rra})
except NameError:
print("Warning: Can't load regional data please check your source directory!")
return(data, workflow)
def extract_station_data(data, station_list):
from bin.station_info import station_location
local_rra = {}
for each in station_list:
local_rra.update({each:data['rra'].sel(lat=station_location[each].lat, lon=station_location[each].lon, method='nearest', time='2018-07')['O3']*0.5})
return(local_rra)
def compute_time_lag(data):
time_lag = range(-32,33)
lag_jergkara_esrange = []
lag_jergkara_pallas = []
lag_svanvik_esrange = []
lag_svanvik_pallas = []
lag_svanvik_jergkara = []
lag_label = ("jergkara_esrange","jergkara_pallas","svanvik_esrange","svanvik_pallas","svanvik_jergkara")
for i in time_lag:
lag_jergkara_esrange.append(time_lagged_corr(data['jergkara'], data['Esrange'], lag=i, pandas=True))
lag_jergkara_pallas.append(time_lagged_corr(data['jergkara'], data['Pallas'], lag=i, pandas=True))
lag_svanvik_esrange.append(time_lagged_corr(data['Svanvik'], data['Esrange'], lag=i, pandas=True))
lag_svanvik_pallas.append(time_lagged_corr(data['Svanvik'], data['Pallas'], lag=i, pandas=True))
lag_svanvik_jergkara.append(time_lagged_corr(data['Svanvik'], data['jergkara'], lag=i, pandas=True))
# Print maximum in lag
lag_max = {}
print("Lag correlation")
for i,lag in zip(lag_label,(lag_jergkara_esrange, lag_jergkara_pallas, lag_svanvik_esrange, lag_svanvik_pallas, lag_svanvik_jergkara)):
lag_max.update({i:np.array(time_lag)[np.where(np.array(lag)==np.array(lag).max())[0]][0]})
print("%s max at %d h" % (i, lag_max[i]))
return(lag_max)
def compute_clim(data):
doys = np.arange(1,367)
# Climatology from Esrange, Pallas, Jergul/Karasjok data
climatology = pd.concat((data['Esrange'][:'2012'], data['Pallas'][:'2012'], data['jergkara'][:'2012']))
# Daily mean climatology from Esrange, Pallas, Jergul/Karasjok data
yozone, yerr, yerr_mean = compute_climatology(climatology)
yozone_max, yerr_max, yerr_mean_max = compute_climatology(climatology, mode='max')
yozone_min, yerr_min, yerr_mean_min = compute_climatology(climatology, mode='min')
# Svanvik climatology
yozone_svanvik, yerr_svanvik, yerr_mean_svanvik = compute_climatology(data['Svanvik'])
yozone_max_svanvik, yerr_max_svanvik, yerr_mean_max_svanvik = compute_climatology(data['Svanvik'], mode='max')
yozone_min_svanvik, yerr_min_svanvik, yerr_mean_min_svanvik = compute_climatology(data['Svanvik'], mode='min')
# Hourly climatology
clim_hourly, clim_hourly_err, clim_hourly_err_mean = compute_climatology(climatology, mode='hourly')
clim_hourly_svanvik, clim_hourly_err_svanvik, clim_hourly_err_mean_svanvik = compute_climatology(data['Svanvik'], mode='hourly')
# Compute spline fits
from scipy.interpolate import UnivariateSpline
# Fennoscandic climatology
w = 1/yerr_mean
fitSpl_dmean = UnivariateSpline(doys, climatology.groupby(climatology.index.dayofyear).apply(np.nanmean), w=w)
dmax = climatology.resample('1d').apply(np.nanmax)
fitSpl_dmax = UnivariateSpline(doys, dmax.groupby(dmax.index.dayofyear).apply(np.nanmean))
# Svanvik
w_svanvik = 1/yerr_mean_svanvik
fitSpl_dmean_svanvik = UnivariateSpline(doys, data['Svanvik'].groupby(data['Svanvik'].index.dayofyear).apply(np.nanmean), w=w_svanvik)
dmax_svanvik = data['Svanvik'].resample('1d').apply(np.nanmax)
fitSpl_dmax_svanvik = UnivariateSpline(doys, dmax_svanvik.groupby(dmax_svanvik.index.dayofyear).apply(np.nanmean))
# Pickle splines for comparison with other data
import pickle
with open('obs_climatologies.pkl','wb') as output:
pickle.dump(fitSpl_dmean, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(fitSpl_dmean_svanvik, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(yerr_mean, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(yerr_mean_svanvik, output, pickle.HIGHEST_PROTOCOL)
return({'clim':clim_hourly, 'clim_err':clim_hourly_err, 'clim_err_mean':clim_hourly_err_mean},
{'clim':clim_hourly_svanvik, 'clim_err':clim_hourly_err_svanvik, 'clim_err_mean':clim_hourly_err_mean_svanvik})
def sample_climatology(clim, clim_svanvik):
# Sample from houerly climatology
sample_clim_svanvik = pd.DataFrame(pd.concat((clim_svanvik.iloc[:(31+28)*24],clim_svanvik.iloc[(31+29)*24:])).values, index=pd.date_range("2018-01-01 0:0", "2018-12-31 23:0", freq='H'))
sample_clim = pd.DataFrame(pd.concat((clim.iloc[:(31+28)*24],clim.iloc[(31+29)*24:])).values, index= | pd.date_range("2018-01-01 0:0", "2018-12-31 23:0", freq='H') | pandas.date_range |
"""
Functions for comparing and visualizing model performance. Most of these functions rely on ATOM's model tracker and
datastore services, which are not part of the standard AMPL installation, but a few functions will work on collections of
models saved as local files.
"""
import os
import sys
import pdb
import pandas as pd
import numpy as np
import matplotlib
import logging
import json
import shutil
import tarfile
import tempfile
from collections import OrderedDict
from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.pipeline import model_tracker as trkr
import atomsci.ddm.pipeline.model_pipeline as mp
import atomsci.ddm.pipeline.parameter_parser as parse
import atomsci.ddm.pipeline.model_wrapper as mw
import atomsci.ddm.pipeline.featurization as feat
from tensorflow.python.keras.utils.layer_utils import count_params
logger = logging.getLogger('ATOM')
mlmt_supported = True
try:
from atomsci.clients import MLMTClient
except (ModuleNotFoundError, ImportError):
logger.debug("Model tracker client not supported in your environment; can look at models in filesystem only.")
mlmt_supported = False
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('axes', labelsize=12)
logging.basicConfig(format='%(asctime)-15s %(message)s')
nan = np.float32('nan')
#------------------------------------------------------------------------------------------------------------------
def del_ignored_params(dictionary, ignored_params):
"""
Deletes ignored parameters from the dictionary if they exist
Args:
dictionary (dict): A dictionary with parameters
ignored_parameters (list(str)): A list of keys potentially in the dictionary
Returns:
None
"""
for ip in ignored_params:
if ip in dictionary:
del dictionary[ip]
#------------------------------------------------------------------------------------------------------------------
def get_collection_datasets(collection_name):
"""
Returns a list of unique training datasets used for all models in a given collection.
Args:
collection_name (str): Name of model tracker collection to search for models.
Returns:
list: List of model training (dataset_key, bucket) tuples.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
dataset_set = set()
mlmt_client = dsf.initialize_model_tracker()
dset_dicts = mlmt_client.model.query_datasets(collection_name=collection_name, metrics_type='training').result()
# Convert to a list of (dataset_key, bucket) tuples
for dset_dict in dset_dicts:
dataset_set.add((dset_dict['dataset_key'], dset_dict['bucket']))
return sorted(dataset_set)
#------------------------------------------------------------------------------------------------------------------
def extract_collection_perf_metrics(collection_name, output_dir, pred_type='regression'):
"""
Obtain list of training datasets with models in the given collection. Get performance metrics for
models on each dataset and save them as CSV files in the given output directory.
Args:
collection_name (str): Name of model tracker collection to search for models.
output_dir (str): Directory where tables of performance metrics will be written.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
None
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
datasets = get_collection_datasets(collection_name)
os.makedirs(output_dir, exist_ok=True)
for dset_key, bucket in datasets:
dset_perf_df = get_training_perf_table(dset_key, bucket, collection_name, pred_type=pred_type)
dset_perf_file = '%s/%s_%s_model_perf_metrics.csv' % (output_dir, os.path.basename(dset_key).replace('.csv', ''), collection_name)
dset_perf_df.to_csv(dset_perf_file, index=False)
print('Wrote file %s' % dset_perf_file)
#------------------------------------------------------------------------------------------------------------------
def get_training_perf_table(dataset_key, bucket, collection_name, pred_type='regression', other_filters = {}):
"""
Load performance metrics from model tracker for all models saved in the model tracker DB under
a given collection that were trained against a particular dataset. Identify training parameters
that vary between models, and generate plots of performance vs particular combinations of
parameters.
Args:
dataset_key (str): Training dataset key.
bucket (str): Training dataset bucket.
collection_name (str): Name of model tracker collection to search for models.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
other_filters (dict): Other filter criteria to use in querying models.
Returns:
pd.DataFrame: Table of models and performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
mlmt_client = dsf.initialize_model_tracker()
query_params = {
"match_metadata": {
"training_dataset.bucket": bucket,
"training_dataset.dataset_key": dataset_key,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
query_params['match_metadata'].update(other_filters)
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
if metadata_list == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(metadata_list))
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
xgb_learning_rate_list = []
xgb_gamma_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
score_dict = {}
for subset in subsets:
score_dict[subset] = []
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for metadata_dict in metadata_list:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key = metadata_dict['training_dataset']['dataset_key']
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['rf_specific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'xgboost':
xgb_params = metadata_dict['xgb_specific']
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(xgb_params["xgb_learning_rate"])
xgb_gamma_list.append(xgb_params["xgb_gamma"])
for subset in subsets:
score_dict[subset].append(subset_metrics[subset][metric_type])
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
dataset_key=dataset_key,
featurizer=featurizer_list,
splitter=splitter_list,
max_epochs=max_epochs_list,
best_epoch=best_epoch_list,
learning_rate=learning_rate_list,
layer_sizes=layer_sizes_list,
dropouts=dropouts_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list,
xgb_learning_rate = xgb_learning_rate_list,
xgb_gamma = xgb_gamma_list))
for subset in subsets:
metric_col = '%s_%s' % (metric_type, subset)
perf_df[metric_col] = score_dict[subset]
sort_metric = '%s_valid' % metric_type
perf_df = perf_df.sort_values(sort_metric, ascending=False)
return perf_df
# -----------------------------------------------------------------------------------------------------------------
def extract_model_and_feature_parameters(metadata_dict):
"""
Given a config file, extract model and featuer parameters. Looks for parameter names
that end in *_specific. e.g. nn_specific, auto_featurizer_specific
Args:
model_metadict (dict): Dictionary containing NON-FLATTENED metadata for an AMPL model
Returns:
dictionary containing featurizer and model parameters. Most contain the following
keys. ['max_epochs', 'best_epoch', 'learning_rate', 'layer_sizes', 'drop_outs',
'rf_estimators', 'rf_max_features', 'rf_max_depth', 'xgb_gamma', 'xgb_learning_rate',
'featurizer_parameters_dict', 'model_parameters_dict']
"""
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
required = ['max_epochs', 'best_epoch', 'learning_rate', 'layer_sizes', 'dropouts',
'rf_estimators', 'rf_max_features', 'rf_max_depth', 'xgb_gamma', 'xgb_learning_rate']
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
model_info['max_epochs'] = nn_params['max_epochs']
model_info['best_epoch'] = nn_params['best_epoch']
model_info['learning_rate'] = nn_params['learning_rate']
model_info['layer_sizes'] = ','.join(['%d' % s for s in nn_params['layer_sizes']])
model_info['dropouts'] = ','.join(['%.2f' % d for d in nn_params['dropouts']])
elif model_type == 'RF':
rf_params = metadata_dict['rf_specific']
model_info['rf_estimators'] = rf_params['rf_estimators']
model_info['rf_max_features'] = rf_params['rf_max_features']
model_info['rf_max_depth'] = rf_params['rf_max_depth']
elif model_type == 'xgboost':
xgb_params = metadata_dict['xgb_specific']
model_info['xgb_gamma'] = xgb_params['xgb_gamma']
model_info['xgb_learning_rate'] = xgb_params['xgb_learning_rate']
for r in required:
if r not in model_info:
# all fields must be filled in
model_info[r] = nan
# the new way of extracting model parameters is to simply save them in json
if 'nn_specific' in metadata_dict:
model_metadata = metadata_dict['nn_specific']
# include learning rate, max_epochs, and best_epoch for convenience
model_info['max_epochs'] = model_metadata['max_epochs']
model_info['best_epoch'] = model_metadata['best_epoch']
learning_rate_col = [c for c in model_metadata.keys() if c.endswith('learning_rate')]
if len(learning_rate_col) == 1:
model_info['learning_rate'] = model_metadata[learning_rate_col[0]]
# delete several parameters that aren't normally saved
ignored_params = ['batch_size','bias_init_consts','optimizer_type',
'weight_decay_penalty','weight_decay_penalty_type','weight_init_stddevs']
del_ignored_params(model_metadata, ignored_params)
elif 'rf_specific' in metadata_dict:
model_metadata = metadata_dict['rf_specific']
elif 'xgb_specific' in metadata_dict:
model_metadata = metadata_dict['xgb_specific']
# delete several parameters that aren't normally saved
ignored_params = ['xgb_colsample_bytree','xgb_max_depth',
'xgb_min_child_weight','xgb_n_estimators','xgb_subsample']
del_ignored_params(model_metadata, ignored_params)
else:
# no model parameters found
model_metadata = {}
model_info['model_parameters_dict'] = json.dumps(model_metadata)
if 'ecfp_specific' in metadata_dict:
feat_metadata = metadata_dict['ecfp_specific']
elif 'auto_featurizer_specific' in metadata_dict:
feat_metadata = metadata_dict['auto_featurizer_specific']
elif 'autoencoder_specific' in metadata_dict:
feat_metadata = metadata_dict['autoencoder_specific']
else:
# no model parameters found
feat_metadata = {}
model_info['feat_parameters_dict'] = json.dumps(feat_metadata)
return model_info
# ------------------------------------------------------------------------------------------------------------------
def get_best_perf_table(metric_type, col_name=None, result_dir=None, model_uuid=None, metadata_dict=None, PK_pipe=False):
"""
Extract parameters and training run performance metrics for a single model. The model may be
specified either by a metadata dictionary, a model_uuid or a result directory; in the model_uuid case, the function
queries the model tracker DB for the model metadata. For models saved in the filesystem, can query the performance
data from the original result directory, but not from a saved tarball.
Args:
metric_type (str): Performance metric to include in result dictionary.
col_name (str): Collection name containing model, if model is specified by model_uuid.
result_dir (str): result directory of the model, if Model tracker is not supported and metadata_dict not provided.
model_uuid (str): UUID of model to query, if metadata_dict is not provided.
metadata_dict (dict): Full metadata dictionary for a model, including training metrics and
dataset metadata.
PK_pipe (bool): If True, include some additional parameters in the result dictionary specific to PK models.
Returns:
model_info (dict): Dictionary of parameter or metric name - value pairs.
Todo:
Add support for models saved as local tarball files.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' needs to be provided.")
return None
elif mlmt_supported and col_name:
mlmt_client = dsf.initialize_model_tracker()
if metadata_dict is None:
if model_uuid is None:
print("Have to specify either metadata_dict or model_uuid")
return
query_params = {
"match_metadata": {
"model_uuid": model_uuid,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
metadata_list = list(mlmt_client.model.query_model_metadata(
collection_name=col_name,
query_params=query_params
).result())
if len(metadata_list) == 0:
print("No matching models returned")
return None
metadata_dict = metadata_list[0]
elif result_dir:
model_dir = ""
for dirpath, dirnames, filenames in os.walk(result_dir):
if model_uuid in dirnames:
model_dir = os.path.join(dirpath, model_uuid)
break
if model_dir:
with open(os.path.join(model_dir, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
else:
print(f"model_uuid ({model_uuid}) not exist in {result_dir}.")
return None
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
model_info['collection_name'] = col_name
# Get model metrics for this model
metrics_dicts = [d for d in metadata_dict['training_metrics'] if d['label'] == 'best']
if len(metrics_dicts) != 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
return None
model_params = metadata_dict['model_parameters']
model_info['model_type'] = model_params['model_type']
model_info['featurizer'] = model_params['featurizer']
split_params = metadata_dict['splitting_parameters']
model_info['splitter'] = split_params['splitter']
if 'split_uuid' in split_params:
model_info['split_uuid'] = split_params['split_uuid']
model_info['dataset_key'] = metadata_dict['training_dataset']['dataset_key']
model_info['bucket'] = metadata_dict['training_dataset']['bucket']
dset_meta = metadata_dict['training_dataset']['dataset_metadata']
if PK_pipe:
model_info['assay_name'] = dset_meta.get('assay_category', 'NA')
model_info['response_col'] = dset_meta.get('response_cols', dset_meta.get('response_col', 'NA'))
try:
model_info['descriptor_type'] = metadata_dict['descriptor_specific']['descriptor_type']
except KeyError:
model_info['descriptor_type'] = 'NA'
try:
model_info['num_samples'] = dset_meta['num_row']
except:
# KSM: Commented out because original dataset may no longer be accessible.
#tmp_df = dsf.retrieve_dataset_by_datasetkey(model_info['dataset_key'], model_info['bucket'])
#model_info['num_samples'] = tmp_df.shape[0]
model_info['num_samples'] = nan
# add model and feature params
# model_uuid appears in model_feature_params and will overwrite the one in model_info
# it's the same uuid, so it should be ok
model_feature_params = extract_model_and_feature_parameters(metadata_dict)
model_info.update(model_feature_params)
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
metric_col = '%s_%s' % (metric_type, subset)
model_info[metric_col] = metrics_dict['prediction_results'][metric_type]
if (model_params['prediction_type'] == 'regression') and (metric_type != 'rms_score'):
metric_col = 'rms_score_%s' % subset
model_info[metric_col] = metrics_dict['prediction_results']['rms_score']
return model_info
# ---------------------------------------------------------------------------------------------------------
def get_best_models_info(col_names=None, bucket='public', pred_type="regression", result_dir=None, PK_pipeline=False,
output_dir='/usr/local/data',
shortlist_key=None, input_dset_keys=None, save_results=False, subset='valid',
metric_type=None, selection_type='max', other_filters={}):
"""
Tabulate parameters and performance metrics for the best models, according to a given metric, trained against
each specified dataset.
Args:
col_names (list of str): List of model tracker collections to search.
bucket (str): Datastore bucket for training datasets.
pred_type (str): Type of models (regression or classification).
result_dir (list of str): Result directories of the models, if model tracker is not supported.
PK_pipeline (bool): Are we being called from PK pipeline?
output_dir (str): Directory to write output table to.
shortlist_key (str): Datastore key for table of datasets to query models for.
input_dset_keys (str or list of str): List of datastore keys for datasets to query models for. Either shortlist_key
or input_dset_keys must be specified, but not both.
save_results (bool): If True, write the table of results to a CSV file.
subset (str): Input dataset subset ('train', 'valid', or 'test') for which metrics are used to select best models.
metric_type (str): Type of performance metric (r2_score, roc_auc_score, etc.) to use to select best models.
selection_type (str): Score criterion ('max' or 'min') to use to select best models.
other_filters (dict): Additional selection criteria to include in model query.
Returns:
top_models_df (DataFrame): Table of parameters and metrics for best models for each dataset.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' needs to be provided.")
return None
top_models_info = []
sort_order = {'max': -1, 'min': 1}
sort_ascending = {'max': False, 'min': True}
if metric_type is None:
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
if other_filters is None:
other_filters = {}
# define dset_keys
if input_dset_keys is not None and shortlist_key is not None:
raise ValueError("You can specify either shortlist_key or input_dset_keys but not both.")
elif input_dset_keys is not None and shortlist_key is None:
if type(input_dset_keys) == str:
dset_keys = [input_dset_keys]
else:
dset_keys = input_dset_keys
elif input_dset_keys is None and shortlist_key is None:
raise ValueError('Must specify either input_dset_keys or shortlist_key')
else:
dset_keys = dsf.retrieve_dataset_by_datasetkey(shortlist_key, bucket)
if dset_keys is None:
# define dset_keys, col_names and buckets from shortlist file
shortlist = pd.read_csv(shortlist_key)
if 'dataset_key' in shortlist.columns:
dset_keys = shortlist['dataset_key'].unique()
elif 'task_name' in shortlist.columns:
dset_keys = shortlist['task_name'].unique()
else:
dset_keys = shortlist.values
if 'collection' in shortlist.columns:
col_names = shortlist['collection'].unique()
if 'bucket' in shortlist.columns:
bucket = shortlist['bucket'].unique()
if mlmt_supported and col_names is not None:
mlmt_client = dsf.initialize_model_tracker()
if type(col_names) == str:
col_names = [col_names]
if type(bucket) == str:
bucket=[bucket]
# Get the best model over all collections for each dataset
for dset_key in dset_keys:
dset_key = dset_key.strip()
dset_model_info = []
for col_name in col_names:
for buck in bucket:
try:
query_params = {
"match_metadata": {
"training_dataset.dataset_key": dset_key,
"training_dataset.bucket": buck,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
"subset": subset,
"$sort": [{"prediction_results.%s" % metric_type : sort_order[selection_type]}]
},
}
query_params['match_metadata'].update(other_filters)
try:
print('Querying collection %s for models trained on dataset %s, %s' % (col_name, buck, dset_key))
metadata_list = list(mlmt_client.model.query_model_metadata(
collection_name=col_name,
query_params=query_params,
limit=1
).result())
except Exception as e:
print("Error returned when querying the best model for dataset %s in collection %s" % (dset_key, col_name))
print(e)
continue
if len(metadata_list) == 0:
print("No models returned for dataset %s in collection %s" % (dset_key, col_name))
continue
print('Query returned %d models' % len(metadata_list))
model = metadata_list[0]
model_info = get_best_perf_table(metric_type, col_name, metadata_dict=model, PK_pipe=PK_pipeline)
if model_info is not None:
res_df = pd.DataFrame.from_records([model_info])
dset_model_info.append(res_df)
except Exception as e:
print(e)
continue
metric_col = '%s_%s' % (metric_type, subset)
if len(dset_model_info) > 0:
dset_model_df = pd.concat(dset_model_info, ignore_index=True).sort_values(
by=metric_col, ascending=sort_ascending[selection_type])
top_models_info.append(dset_model_df.head(1))
print('Adding data for bucket %s, dset_key %s' % (dset_model_df.bucket.values[0], dset_model_df.dataset_key.values[0]))
elif result_dir:
metric_col = '%s_%s' % (subset, metric_type)
for rd in result_dir:
temp_perf_df = get_filesystem_perf_results(result_dir = rd, pred_type = pred_type).sort_values(
by=metric_col, ascending=sort_ascending[selection_type])
top_models_info.append(temp_perf_df.head(1))
print(f"Adding data from '{rd}' ")
if len(top_models_info) == 0:
print("No metadata found")
return None
top_models_df = pd.concat(top_models_info, ignore_index=True)
if save_results:
os.makedirs(output_dir, exist_ok=True)
if shortlist_key is not None:
# Not including shortlist key right now because some are weirdly formed and have .csv in the middle
top_models_df.to_csv(os.path.join(output_dir, 'best_models_metadata.csv'), index=False)
else:
for dset_key in input_dset_keys:
# TODO: This doesn't make sense; why output multiple copies of the same table?
shortened_key = dset_key.rstrip('.csv')
top_models_df.to_csv(os.path.join(output_dir, 'best_models_metadata_%s.csv' % shortened_key), index=False)
return top_models_df
# TODO: This function looks like work in progress, should we delete it?
'''
#---------------------------------------------------------------------------------------------------------
def _get_best_grouped_models_info(collection='pilot_fixed', pred_type='regression', top_n=1, subset='test'):
"""
Get results for models in the given collection.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
res_dir = '/usr/local/data/%s_perf' % collection
plt_dir = '%s/Plots' % res_dir
os.makedirs(plt_dir, exist_ok=True)
res_files = os.listdir(res_dir)
suffix = '_%s_model_perf_metrics.csv' % collection
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for res_file in res_files:
try:
if not res_file.endswith(suffix):
continue
res_path = os.path.join(res_dir, res_file)
res_df = pd.read_csv(res_path, index_col=False)
res_df['combo'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
dset_name = res_file.replace(suffix, '')
datasets.append(dset_name)
res_df['dataset'] = dset_name
print(dset_name)
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
res_df['model_type/feat'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
grouped_df = res_df.groupby('model_type/feat').apply(
lambda t: t.head(top_n)
).reset_index(drop=True)
top_grouped_models.append(grouped_df)
top_combo = res_df['model_type/feat'].values[0]
top_combo_dsets.append(top_combo + dset_name.lstrip('ATOM_GSK_dskey'))
top_score = res_df['{0}_{1}'.format(metric_type, subset)].values[0]
top_model_feat.append(top_combo)
top_scores.append(top_score)
num_samples.append(res_df['Dataset Size'][0])
'''
#------------------------------------------------------------------------------------------------------------------
def get_umap_nn_model_perf_table(dataset_key, bucket, collection_name, pred_type='regression'):
"""
Load performance metrics from model tracker for all NN models with the given prediction_type saved in
the model tracker DB under a given collection that were trained against a particular dataset. Show
parameter settings for UMAP transformer for models where they are available.
Args:
dataset_key (str): Dataset key for training dataset.
bucket (str): Dataset bucket for training dataset.
collection_name (str): Name of model tracker collection to search for models.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
query_params = {
"match_metadata": {
"training_dataset.bucket": bucket,
"training_dataset.dataset_key": dataset_key,
"model_parameters.model_type" : "NN",
"model_parameters.prediction_type" : pred_type
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
query_params['match_metadata'].update(other_filters)
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
mlmt_client = dsf.initialize_model_tracker()
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
if metadata_list == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(metadata_list))
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
best_epoch_list = []
max_epochs_list = []
feature_transform_type_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
sort_metric = 'r2_score'
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
sort_metric = 'roc_auc_score'
metrics = ['roc_auc_score', 'prc_auc_score', 'matthews_cc', 'kappa', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
for metadata_dict in metadata_list:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
if len(metrics_dicts) > 3:
raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
if model_type != 'NN':
continue
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
if 'umap_specific' in metadata_dict:
umap_params = metadata_dict['umap_specific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
featurizer=featurizer_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
feature_transform_type=feature_transform_type_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list ))
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (metric, subset)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = '%s_valid' % sort_metric
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_tarball_perf_table(model_tarball, pred_type='classification'):
"""
Retrieve model metadata and performance metrics for a model saved as a tarball (.tar.gz) file.
Args:
model_tarball (str): Path of model tarball file, named as model.tar.gz.
pred_type (str): Prediction type ('classification' or 'regression') of model.
Returns:
tuple (pd.DataFrame, dict): Table of performance metrics and a dictionary of model metadata.
"""
tarf_content = tarfile.open(model_tarball, "r")
metadata_file = tarf_content.getmember("./model_metadata.json")
ext_metadata = tarf_content.extractfile(metadata_file)
meta_json = json.load(ext_metadata)
ext_metadata.close()
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score',
'accuracy_score', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = [0,0]
for emet in meta_json["training_metrics"]:
label = emet["label"]
score_ix = 0 if label == "best" else 1
subset = emet["subset"]
for metric in metrics:
score_dict[subset][metric][score_ix] = emet["prediction_results"][metric]
perf_df = pd.DataFrame()
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
return perf_df, meta_json
#------------------------------------------------------------------------------------------------------------------
def get_filesystem_perf_results(result_dir, pred_type='classification'):
"""
Retrieve metadata and performance metrics for models stored in the filesystem from a hyperparameter search run.
Args:
result_dir (str): Root directory for results from a hyperparameter search training run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of metadata fields and performance metrics.
"""
ampl_version_list = []
model_uuid_list = []
model_type_list = []
featurizer_list = []
dataset_key_list = []
splitter_list = []
model_score_type_list = []
feature_transform_type_list = []
# model type specific lists
param_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score', 'num_compounds']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score', 'num_compounds',
'accuracy_score', 'bal_accuracy', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
score_dict['valid']['model_choice_score'] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
tar_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
# collect all tars for later
tar_list = tar_list + [os.path.join(dirpath, f) for f in filenames if f.endswith('.tar.gz')]
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
if meta_dict['model_parameters']['prediction_type']==pred_type:
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
# build dictonary of tarball names
tar_dict = {os.path.basename(tf):tf for tf in tar_list}
path_list = []
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
dataset_key = metadata_dict['training_dataset']['dataset_key']
dataset_name = mp.build_tarball_name(mp.build_dataset_name(dataset_key), model_uuid)
if dataset_name in tar_dict:
path_list.append(tar_dict[dataset_name])
else:
# unable to find saved tar file
path_list.append('')
# Get list of training run metrics for this model
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
ampl_version = model_params['ampl_version']
ampl_version_list.append(ampl_version)
model_type = model_params['model_type']
model_type_list.append(model_type)
model_score_type = model_params['model_choice_score_type']
model_score_type_list.append(model_score_type)
featurizer = model_params['featurizer']
#mix ecfp, graphconv, moe, mordred, rdkit for concise representation
if featurizer in ["computed_descriptors", "descriptors"]:
featurizer = metadata_dict["descriptor_specific"]["descriptor_type"]
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key_list.append(metadata_dict['training_dataset']['dataset_key'])
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
param_list.append(extract_model_and_feature_parameters(metadata_dict))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
score_dict['valid']['model_choice_score'].append(subset_metrics['valid']['model_choice_score'])
param_df = pd.DataFrame(param_list)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_path = path_list,
ampl_version=ampl_version_list,
model_type=model_type_list,
dataset_key=dataset_key_list,
featurizer=featurizer_list,
splitter=splitter_list,
model_score_type=model_score_type_list,
feature_transform_type=feature_transform_type_list))
perf_df = perf_df.merge(param_df, on='model_uuid', how='inner')
perf_df['model_choice_score'] = score_dict['valid']['model_choice_score']
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = 'model_choice_score'
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
def get_filesystem_models(result_dir, pred_type):
"""
Identify all models in result_dir and create perf_result table with 'tarball_path' column containing a path
to each tarball.
"""
perf_df = get_filesystem_perf_results(result_dir, pred_type)
if pred_type == 'regression':
metric = 'valid_r2_score'
else:
metric = 'valid_roc_auc_score'
#best_df = perf_df.sort_values(by=metric, ascending=False).drop_duplicates(subset='dataset_key').copy()
perf_df['dataset_names'] = perf_df['dataset_key'].apply(lambda f: os.path.splitext(os.path.basename(f))[0])
perf_df['tarball_names'] = perf_df.apply(lambda x: '%s_model_%s.tar.gz' % (x['dataset_names'], x['model_uuid']), axis=1)
tarball_names = set(perf_df['tarball_names'].values)
all_filenames = []
for dirpath, dirnames, filenames in os.walk(result_dir):
for fn in filenames:
if fn in tarball_names:
all_filenames.append((fn, os.path.join(dirpath, fn)))
found_files_df = pd.DataFrame({'tarball_names':[f[0] for f in all_filenames],
'tarball_paths':[f[1] for f in all_filenames]})
perf_df = perf_df.merge(found_files_df, on='tarball_names', how='outer')
return perf_df
#------------------------------------------------------------------------------------------------------------------
def copy_best_filesystem_models(result_dir, dest_dir, pred_type, force_update=False):
"""
Identify the best models for each dataset within a result directory tree (e.g. from a hyperparameter search).
Copy the associated model tarballs to a destination directory.
Args:
result_dir (str): Path to model training result directory.
dest_dir (str): Path of directory wherre model tarballs will be copied to.
pred_type (str): Prediction type ('classification' or 'regression') of models to copy
force_update (bool): If true, overwrite tarball files that already exist in dest_dir.
Returns:
pd.DataFrame: Table of performance metrics for best models.
"""
perf_df = get_filesystem_perf_results(result_dir, pred_type)
if pred_type == 'regression':
metric = 'valid_r2_score'
else:
metric = 'valid_roc_auc_score'
best_df = perf_df.sort_values(by=metric, ascending=False).drop_duplicates(subset='dataset_key').copy()
dataset_names = [os.path.splitext(os.path.basename(f))[0] for f in best_df.dataset_key.values]
model_uuids = best_df.model_uuid.values
tarball_names = ['%s_model_%s.tar.gz' % (dset_name, model_uuid) for dset_name, model_uuid in zip(dataset_names, model_uuids)]
for dirpath, dirnames, filenames in os.walk(result_dir):
for fn in filenames:
if (fn in tarball_names) and (force_update or not os.path.exists(os.path.join(dest_dir, fn))):
shutil.copy2(os.path.join(dirpath, fn), dest_dir)
print('Copied %s' % fn)
return best_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_perf_tables(collection_names=None, filter_dict={}, result_dir=None, prediction_type='regression', verbose=False):
"""
Load model parameters and performance metrics from model tracker for all models saved in the model tracker DB under
the given collection names (or result directory if Model tracker is not available) with the given prediction type.
Tabulate the parameters and metrics including:
dataset (assay name, target, parameter, key, bucket)
dataset size (train/valid/test/total)
number of training folds
model type (NN or RF)
featurizer
transformation type
metrics: r2_score, mae_score and rms_score for regression, or ROC AUC for classification
Args:
collection_names (list): Names of model tracker collections to search for models.
filter_dict (dict): Additional filter criteria to use in model query.
result_dir (str or list): Directories to search for models; must be provided if the model tracker DB is not available.
prediction_type (str): Type of models (classification or regression) to query.
verbose (bool): If true, print status messages as collections are processed.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' is needed.")
return None
collection_list = []
ampl_version_list=[]
model_uuid_list = []
time_built_list = []
model_type_list = []
dataset_key_list = []
bucket_list = []
param_list = []
featurizer_list = []
desc_type_list = []
transform_list = []
dset_size_list = []
splitter_list = []
split_strategy_list = []
split_uuid_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
split_uuid_list=[]
model_feat_param_list = []
if prediction_type == 'regression':
score_types = ['r2_score', 'mae_score', 'rms_score']
else:
# TODO: add more classification metrics later
score_types = ['roc_auc_score', 'prc_auc_score', 'accuracy_score', 'bal_accuracy', 'precision', 'recall_score', 'npv', 'matthews_cc', 'kappa']
subsets = ['train', 'valid', 'test']
score_dict = {}
ncmpd_dict = {}
for subset in subsets:
score_dict[subset] = {}
for score_type in score_types:
score_dict[subset][score_type] = []
ncmpd_dict[subset] = []
metadata_list_dict = {}
if mlmt_supported and collection_names:
mlmt_client = dsf.initialize_model_tracker()
filter_dict['model_parameters.prediction_type'] = prediction_type
for collection_name in collection_names:
print("Finding models in collection %s" % collection_name)
query_params = {
"match_metadata": filter_dict,
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
metadata_list_dict[collection_name] = metadata_list
elif result_dir:
if isinstance(result_dir, str):
result_dir = [result_dir]
for rd in result_dir:
if rd not in metadata_list_dict:
metadata_list_dict[rd] = []
for dirpath, dirnames, filenames in os.walk(rd):
if "model_metadata.json" in filenames:
with open(os.path.join(dirpath, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
metadata_list_dict[rd].append(metadata_dict)
for ss in metadata_list_dict:
for i, metadata_dict in enumerate(metadata_list_dict[ss]):
if (i % 10 == 0) and verbose:
print('Processing collection %s model %d' % (ss, i))
# Check that model has metrics before we go on
if not 'training_metrics' in metadata_dict:
continue
collection_list.append(ss)
model_uuid = metadata_dict['model_uuid']
model_uuid_list.append(model_uuid)
time_built = metadata_dict['time_built']
time_built_list.append(time_built)
model_params = metadata_dict['model_parameters']
ampl_version = model_params.get('ampl_version', 'probably 1.0.0')
ampl_version_list.append(ampl_version)
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
if 'descriptor_specific' in metadata_dict:
desc_type = metadata_dict['descriptor_specific']['descriptor_type']
elif featurizer in ['graphconv', 'ecfp']:
desc_type = featurizer
else:
desc_type = ''
desc_type_list.append(desc_type)
dataset_key = metadata_dict['training_dataset']['dataset_key']
bucket = metadata_dict['training_dataset']['bucket']
dataset_key_list.append(dataset_key)
bucket_list.append(bucket)
dset_metadata = metadata_dict['training_dataset']['dataset_metadata']
param = metadata_dict['training_dataset']['response_cols'][0]
param_list.append(param)
transform_type = metadata_dict['training_dataset']['feature_transform_type']
transform_list.append(transform_type)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
split_uuid_list.append(split_params.get('split_uuid', ''))
split_strategy = split_params['split_strategy']
split_strategy_list.append(split_strategy)
if 'umap_specific' in metadata_dict:
umap_params = metadata_dict['umap_specific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
model_feat_param_list.append(extract_model_and_feature_parameters(metadata_dict))
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
if split_strategy == 'k_fold_cv':
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['test']['num_compounds']
else:
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['valid']['num_compounds'] + subset_metrics['test']['num_compounds']
for subset in subsets:
subset_size = subset_metrics[subset]['num_compounds']
for score_type in score_types:
try:
score = subset_metrics[subset][score_type]
except KeyError:
score = float('nan')
score_dict[subset][score_type].append(score)
ncmpd_dict[subset].append(subset_size)
dset_size_list.append(dset_size)
col_dict = dict(
collection=collection_list,
ampl_version=ampl_version_list,
model_uuid=model_uuid_list,
time_built=time_built_list,
model_type=model_type_list,
featurizer=featurizer_list,
features=desc_type_list,
transformer=transform_list,
splitter=splitter_list,
split_strategy=split_strategy_list,
split_uuid=split_uuid_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list,
dataset_bucket=bucket_list,
dataset_key=dataset_key_list,
dataset_size=dset_size_list,
parameter=param_list
)
perf_df = pd.DataFrame(col_dict)
param_df = pd.DataFrame(model_feat_param_list)
perf_df = perf_df.merge(param_df, on='model_uuid', how='inner')
for subset in subsets:
ncmpds_col = '%s_size' % subset
perf_df[ncmpds_col] = ncmpd_dict[subset]
for score_type in score_types:
metric_col = '%s_%s' % (subset, score_type)
perf_df[metric_col] = score_dict[subset][score_type]
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_metadata_table(uuids, collections=None):
"""
Tabulate metadata fields and performance metrics for a set of models identified by specific model_uuids.
Args:
uuids (list): List of model UUIDs to query.
collections (list or str): Names of collections in model tracker DB to get models from. If collections is
a string, it must identify one collection to search for all models. If a list, it must be of the same
length as `uuids`. If not provided, all collections will be searched.
Returns:
pd.DataFrame: Table of metadata fields and performance metrics for models.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
if isinstance(uuids,str):
uuids = [uuids]
if isinstance(collections,str):
collections = [collections] * len(uuids)
mlist = []
mlmt_client = dsf.initialize_model_tracker()
for idx,uuid in enumerate(uuids):
if collections is not None:
collection_name = collections[idx]
else:
collection_name = trkr.get_model_collection_by_uuid(uuid)
model_meta = trkr.get_full_metadata_by_uuid(uuid, collection_name=collection_name)
mdl_params = model_meta['model_parameters']
data_params = model_meta['training_dataset']
# Get model metrics for this model
metrics = pd.DataFrame(model_meta['training_metrics'])
metrics = metrics[metrics['label']=='best']
train_metrics = metrics[metrics['subset']=='train']['prediction_results'].values[0]
valid_metrics = metrics[metrics['subset']=='valid']['prediction_results'].values[0]
test_metrics = metrics[metrics['subset']=='test']['prediction_results'].values[0]
# Try to name the model something intelligible in the table
name = 'NA'
if 'target' in data_params['dataset_metadata']:
name = data_params['dataset_metadata']['target']
if (name == 'NA') & ('assay_endpoint' in data_params['dataset_metadata']):
name = data_params['dataset_metadata']['assay_endpoint']
if (name == 'NA') & ('response_col' in data_params['dataset_metadata']):
name = data_params['dataset_metadata']['response_col']
if name != 'NA':
if 'param' in data_params['dataset_metadata'].keys():
name = name + ' ' + data_params['dataset_metadata']['param']
else:
name = 'unknown'
transform = 'None'
if 'transformation' in data_params['dataset_metadata'].keys():
transform = data_params['dataset_metadata']['transformation']
if mdl_params['featurizer'] == 'computed_descriptors':
featurizer = model_meta['descriptor_specific']['descriptor_type']
else:
featurizer = mdl_params['featurizer']
try:
split_uuid = model_meta['splitting_parameters']['split_uuid']
except:
split_uuid = 'Not Available'
if mdl_params['prediction_type'] == 'regression':
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['nn_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['rf_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'xgboost':
xgb_params = model_meta['xgb_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Gamma': xgb_params['xgb_gamma'],
'Learning rate': xgb_params['xgb_max_depth'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
elif mdl_params['prediction_type'] == 'classification':
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['nn_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{str(train_metrics['confusion_matrix'])}/{str(valid_metrics['confusion_matrix'])}/{str(test_metrics['confusion_matrix'])}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['rf_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{train_metrics['confusion_matrix']}/{valid_metrics['confusion_matrix']}/{test_metrics['confusion_matrix']}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'xgboost':
xgb_params = model_meta['xgb_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Gamma': xgb_params['xgb_gamma'],
'XGB Learning rate': xgb_params['xgb_max_depth'],
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{train_metrics['confusion_matrix']}/{valid_metrics['confusion_matrix']}/{test_metrics['confusion_matrix']}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
mlist.append(OrderedDict(minfo))
return pd.DataFrame(mlist).set_index('Name').transpose()
#------------------------------------------------------------------------------------------------------------------
def get_training_datasets(collection_names):
"""
Query the model tracker DB for all the unique dataset keys and buckets used to train models in the given
collections.
Args:
collection_names (list): List of names of model tracker collections to search for models.
Returns:
dict: Dictionary mapping collection names to lists of (dataset_key, bucket) tuples for training sets.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
result_dict = {}
mlmt_client = dsf.initialize_model_tracker()
for collection_name in collection_names:
dset_list = mlmt_client.model.get_training_datasets(collection_name=collection_name).result()
result_dict[collection_name] = dset_list
return result_dict
#------------------------------------------------------------------------------------------------------------------
def get_dataset_models(collection_names, filter_dict={}):
"""
Query the model tracker for all models saved in the model tracker DB under the given collection names. Returns a dictionary
mapping (dataset_key,bucket) pairs to the list of (collection,model_uuid) pairs trained on the corresponding datasets.
Args:
collection_names (list): List of names of model tracker collections to search for models.
filter_dict (dict): Additional filter criteria to use in model query.
Returns:
dict: Dictionary mapping training set (dataset_key, bucket) tuples to (collection, model_uuid) pairs.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
result_dict = {}
coll_dset_dict = get_training_dict(collection_names)
mlmt_client = dsf.initialize_model_tracker()
for collection_name in collection_names:
dset_list = coll_dset_dict[collection_name]
for dset_dict in dset_list:
query_filter = {
'training_dataset.bucket': dset_dict['bucket'],
'training_dataset.dataset_key': dset_dict['dataset_key']
}
query_filter.update(filter_dict)
query_params = {
"match_metadata": query_filter
}
print('Querying models in collection %s for dataset %s, %s' % (collection_name, bucket, dset_key))
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
include_fields=['model_uuid']
).result()
for i, metadata_dict in enumerate(metadata_list):
if i % 50 == 0:
print('Processing collection %s model %d' % (collection_name, i))
model_uuid = metadata_dict['model_uuid']
result_dict.setdefault((dset_key,bucket), []).append((collection_name, model_uuid))
return result_dict
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_files(result_dir, pred_type='regression'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a multitask hyperparameter search.
Format the per-task performance metrics in a table with a row for each task and columns for each model/subset
combination.
Args:
result_dir (str): Path to root result directory containing output from a hyperparameter search run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['num_compounds', 'r2_score', 'task_r2_scores']
else:
metrics = ['num_compounds', 'roc_auc_score', 'task_roc_auc_scores']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
raise Exception("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
dset_params = metadata_dict['training_dataset']
response_cols = dset_params['response_cols']
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
# Format the data as a table with groups of 3 columns for each model
num_models = len(model_uuid_list)
if pred_type == 'regression':
model_params = ['model_uuid', 'learning_rate', 'layer_sizes', 'dropouts', 'max_epochs', 'best_epoch',
'subset', 'num_compounds', 'mean_r2_score']
else:
model_params = ['model_uuid', 'learning_rate', 'layer_sizes', 'dropouts', 'max_epochs', 'best_epoch',
'subset', 'num_compounds', 'mean_roc_auc_score']
param_list = model_params + response_cols
perf_df = pd.DataFrame(dict(col_0=param_list))
colnum = 0
for i in range(num_models):
for subset in subsets:
vals = []
if subset == 'train':
vals.append(model_uuid_list[i])
vals.append(learning_rate_list[i])
vals.append(layer_sizes_list[i])
vals.append(dropouts_list[i])
vals.append('%d' % max_epochs_list[i])
vals.append('%d' % best_epoch_list[i])
else:
vals = vals + ['']*6
vals.append(subset)
vals.append('%d' % score_dict[subset]['num_compounds'][i])
if pred_type == 'regression':
vals.append('%.3f' % score_dict[subset]['r2_score'][i])
vals = vals + ['%.3f' % v for v in score_dict[subset]['task_r2_scores'][i]]
else:
vals.append('%.3f' % score_dict[subset]['roc_auc_score'][i])
vals = vals + ['%.3f' % v for v in score_dict[subset]['task_roc_auc_scores'][i]]
colnum += 1
colname = 'col_%d' % colnum
perf_df[colname] = vals
return perf_df
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_files_new(result_dir, pred_type='regression'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a multitask hyperparameter search.
Format the per-task performance metrics in a table with a row for each task and columns for each model/subset
combination.
Args:
result_dir (str): Path to root result directory containing output from a hyperparameter search run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
best_epoch_list = []
max_epochs_list = []
featurizer_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['num_compounds', 'r2_score', 'task_r2_scores',
'task_rms_scores']
else:
metrics = ['num_compounds', 'roc_auc_score', 'task_roc_auc_scores']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
raise Exception("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
dset_params = metadata_dict['training_dataset']
response_cols = dset_params['response_cols']
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
featurizer_list.append(model_params["featurizer"])
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
# Format the data as a table with groups of 3 columns for each model
num_models = len(model_uuid_list)
data = {
"model_uuid": model_uuid_list,
"learning_rate": learning_rate_list,
"layer_sizes": layer_sizes_list,
"dropouts": dropouts_list,
"featurizer": featurizer_list
}
for i in range(num_models):
for subset in subsets:
for ix, task in enumerate(response_cols):
if pred_type == "regression":
colr2 = f"{subset}_{task}_r2"
colrms = f"{subset}_{task}_rms"
if colr2 not in data:
data[colr2] = []
data[colrms] = []
data[colr2].append(score_dict[subset]["task_r2_scores"][i][ix])
data[colrms].append(score_dict[subset]["task_rms_scores"][i][ix])
else:
colauc = f"{subset}_{task}_roc_auc"
if colauc not in data:
data[colauc] = []
data[colauc].append(score_dict[subset]["task_roc_auc_scores"][i][ix])
perf_df = pd.DataFrame(data)
return perf_df
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_tracker(collection_name, response_cols=None, expand_responses=None, expand_subsets='test',
exhaustive=False):
"""
Retrieve full metadata and metrics from model tracker for all models in a collection and format them
into a table, including per-task performance metrics for multitask models.
Meant for multitask NN models, but works for single task models as well.
By AKP. Works for model tracker as of 10/2020
Args:
collection_name (str): Name of model tracker collection to search for models.
response_cols (list, str or None): Names of tasks (response columns) to query performance results for.
If None, checks to see if the entire collection has the same response cols.
Otherwise, should be list of strings or a comma-separated string.
asks for clarification. Note: make sure response cols are listed in same order as in metadata.
Recommended: None first, then clarify.
expand_responses (list, str or None): Names of tasks / response columns you want to include results for in
the final dataframe. Useful if you have a lot of tasks and only want to look at the performance of a
few of them. Must also be a list or comma separated string, and must be a subset of response_cols.
If None, will expand all responses.
expand_subsets (list, str or None): Dataset subsets ('train', 'valid' and/or 'test') to show metrics for.
Again, must be list or comma separated string, or None to expand all.
exhaustive (bool): If True, return large dataframe with all model tracker metadata minus any columns not
in expand_responses. If False, return trimmed dataframe with most relevant columns.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
# check inputs are correct
if collection_name.startswith('old_'):
raise Exception("This function is not implemented for the old format of metadata.")
if isinstance(response_cols, list):
pass
elif response_cols is None:
pass
elif isinstance(response_cols, str):
response_cols=[x.strip() for x in response_cols.split(',')]
else:
raise Exception("Please input response cols as None, list or comma separated string.")
if isinstance(expand_responses, list):
pass
elif expand_responses is None:
pass
elif isinstance(expand_responses, str):
expand_responses=[x.strip() for x in expand_responses.split(',')]
else:
raise Exception("Please input expand response col(s) as list or comma separated string.")
if isinstance(expand_subsets, list):
pass
elif expand_subsets is None:
pass
elif isinstance(expand_subsets, str):
expand_subsets=[x.strip() for x in expand_subsets.split(',')]
else:
raise Exception("Please input subset(s) as list or comma separated string.")
# get metadata
if response_cols is not None:
filter_dict={'training_dataset.response_cols': response_cols}
else:
filter_dict={}
models = trkr.get_full_metadata(filter_dict, collection_name)
if len(models)==0:
raise Exception("No models found with these response cols in this collection. To get a list of possible response cols, pass response_cols=None.")
models = pd.DataFrame.from_records(models)
# expand model metadata - deal with NA descriptors / NA other fields
alldat=models[['model_uuid', 'time_built']]
models=models.drop(['model_uuid', 'time_built'], axis = 1)
for column in models.columns:
if column == 'training_metrics':
continue
nai=models[models[column].isna()].index
nonas=models[~models[column].isna()]
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
alldat=alldat.join(tempdf)
# assign response cols
if len(alldat.response_cols.astype(str).unique())==1:
response_cols=alldat.response_cols[0]
print("Response cols:", response_cols)
else:
raise Exception(f"There is more than one set of response cols in this collection. Please choose from these lists: {alldat.response_cols.unique()}")
# expand training metrics - deal with NA's in columns
metrics=pd.DataFrame.from_dict(models['training_metrics'].tolist())
allmet=alldat[['model_uuid']]
for column in metrics.columns:
nai=metrics[metrics[column].isna()].index
nonas=metrics[~metrics[column].isna()]
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
label=tempdf[f'label'][nonas.index[0]]
metrics_type=tempdf[f'metrics_type'][nonas.index[0]]
subset=tempdf[f'subset'][nonas.index[0]]
nai=tempdf[tempdf[f'prediction_results'].isna()].index
nonas=tempdf[~tempdf[f'prediction_results'].isna()]
tempdf=pd.DataFrame.from_records(nonas[f'prediction_results'].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
tempdf=tempdf.add_prefix(f'{label}_{subset}_')
allmet=allmet.join(tempdf, lsuffix='', rsuffix="_2")
alldat=alldat.merge(allmet, on='model_uuid')
# expand task level training metrics for subset(s) of interest - deal w/ NA values
if expand_subsets is None:
expand_subsets=['train', 'valid', 'test']
for sub in expand_subsets:
listcols=alldat.columns[alldat.columns.str.contains("task")& alldat.columns.str.contains(sub)]
for column in listcols:
colnameslist=[]
for task in response_cols:
colnameslist.append(f'{column}_{task}')
nai=alldat[alldat[column].isna()].index
nonas=alldat[~alldat[column].isna()]
if isinstance(nonas.loc[nonas.index[0],column], list):
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index= nonas.index, columns=colnameslist)
tempdf=pd.concat([tempdf, | pd.DataFrame(np.nan, index=nai, columns=tempdf.columns) | pandas.DataFrame |
import argparse
import pandas as pd
import numpy as np
GENE = 'Hugo_Symbol'
PROTEIN = 'Protein_Change'
CHROMOSOME = 'Chromosome'
ALT = 'Alteration'
START_POSITION = 'Start_position'
END_POSITION = 'End_position'
REF_ALLELE = 'Reference_Allele'
ALT_ALLELE = 'Tumor_Seq_Allele2'
REF_COUNT = 't_ref_count'
ALT_COUNT = 't_alt_count'
VAR_CLASS = 'Variant_Classification'
sample_id = 'sample_id'
maf_handle = 'maf_handle'
exac_handle = 'exac_handle'
whitelist_handle = 'whitelist_handle'
filter_syn = 'filter_syn'
min_exac_ac = 'min_exac_ac'
min_depth = 'min_depth'
boolean_filter_noncoding = 'boolean_filter_noncoding'
boolean_whitelist = 'boolean_disable_whitelist'
EXAC_CHR = 'CHROM'
EXAC_POS = 'POS'
EXAC_REF = 'REF'
EXAC_ALT = 'ALT'
EXAC_AF = 'AF'
EXAC_AC = 'AC'
EXAC_AC_AFR = 'AC_AFR'
EXAC_AC_AMR = 'AC_AMR'
EXAC_AC_EAS = 'AC_EAS'
EXAC_AC_FIN = 'AC_FIN'
EXAC_AC_NFE = 'AC_NFE'
EXAC_AC_OTH = 'AC_OTH'
EXAC_AC_SAS = 'AC_SAS'
EXAC_AN = 'AN'
EXAC_AN_AFR = 'AN_AFR'
EXAC_AN_AMR = 'AN_AMR'
EXAC_AN_EAS = 'AN_EAS'
EXAC_AN_FIN = 'AN_FIN'
EXAC_AN_NFE = 'AN_NFE'
EXAC_AN_OTH = 'AN_OTH'
EXAC_AN_SAS = 'AN_SAS'
MAPPED_GENE = 'gene'
MAPPED_CHR = 'chromosome'
MAPPED_REF = 'ref_allele'
MAPPED_ALT = 'alt_allele'
MAPPED_POS = 'start_position'
MAPPED_AA = 'protein_change'
MAPPED_VAR_CLASS = 'variant_classification'
MAPPED_REF_COUNT = 'ref_count'
MAPPED_ALT_COUNT = 'alt_count'
EXAC_COMMON = 'exac_common'
WL = 'whitelist'
DEPTH = 'read_depth'
LOW_DEPTH = 'low_read_depth'
CODING = 'coding'
COMMON = 'common_variant'
maf_column_map = {
GENE: MAPPED_GENE,
CHROMOSOME: MAPPED_CHR,
PROTEIN: MAPPED_AA,
START_POSITION: MAPPED_POS,
REF_ALLELE: MAPPED_REF,
ALT_ALLELE: MAPPED_ALT,
VAR_CLASS: MAPPED_VAR_CLASS,
REF_COUNT: MAPPED_REF_COUNT,
ALT_COUNT: MAPPED_ALT_COUNT
}
output_column_map = {v: k for k, v in maf_column_map.items()}
exac_column_map = {
EXAC_CHR: MAPPED_CHR,
EXAC_POS: MAPPED_POS,
EXAC_REF: MAPPED_REF,
EXAC_ALT: MAPPED_ALT,
EXAC_AF: 'exac_af',
EXAC_AC: 'exac_ac',
EXAC_AC_AFR: 'exac_ac_afr',
EXAC_AC_AMR: 'exac_ac_amr',
EXAC_AC_EAS: 'exac_ac_eas',
EXAC_AC_FIN: 'exac_ac_fin',
EXAC_AC_NFE: 'exac_ac_nfe',
EXAC_AC_OTH: 'exac_ac_oth',
EXAC_AC_SAS: 'exac_ac_sas',
EXAC_AN: 'exac_an',
EXAC_AN_AFR: 'exac_an_afr',
EXAC_AN_AMR: 'exac_an_amr',
EXAC_AN_EAS: 'exac_an_eas',
EXAC_AN_FIN: 'exac_an_fin',
EXAC_AN_NFE: 'exac_an_nfe',
EXAC_AN_OTH: 'exac_an_oth',
EXAC_AN_SAS: 'exac_an_sas',
}
whitelist_column_map = {0: MAPPED_CHR, 1: MAPPED_POS, 2: END_POSITION, 3:ALT}
population_keys = [EXAC_AC_AFR, EXAC_AC_AMR, EXAC_AC_EAS, EXAC_AC_FIN, EXAC_AC_NFE, EXAC_AC_OTH, EXAC_AC_SAS]
populations = [exac_column_map[x] for x in population_keys]
def check_column_names(df, map):
for column_name in map.keys():
assert column_name in df.columns, \
'Expected column %s not found among %s' % (column_name, df.columns)
def read(handle, **kwargs):
return pd.read_csv(handle, sep='\t', comment='#', dtype='object', **kwargs)
def standard_read(handle, column_map, **kwargs):
check_column_names(read(handle, nrows=3), column_map)
return read(handle, encoding='latin-1', **kwargs).rename(columns=column_map)
def apply_str(x):
try:
return x.astype(int).astype(str)
except ValueError:
return x.astype(str)
def annotate_read_depth(series_alt_count, series_ref_count):
return series_alt_count.astype(int).add(series_ref_count.astype(int))
def get_idx_low_depth(series_depth, min_depth):
return series_depth[series_depth.astype(int).lt(int(min_depth))].index
def get_idx_coding_classifications(series_classification):
coding_classifications = [
'Missense_Mutation', 'Nonsense_Mutation', 'Nonstop_Mutation', 'Splice_Site',
'Frame_Shift_Ins', 'Frame_Shift_Del', 'In_Frame_Ins', 'In_Frame_Del']
return series_classification[series_classification.isin(coding_classifications)].index
def rename_exac_cols(df):
colmap = {}
old_columns = df.columns[df.columns.str.lower().str.contains('exac')]
new_columns = ['_'.join([col, 'previous_annotation']) for col in old_columns]
for old, new in zip(old_columns, new_columns):
colmap[old] = new
return df.rename(columns=colmap)
def write_integer(number, filename):
with open(filename, 'w') as f:
f.write('%d' % number)
def main(inputs):
df = standard_read(inputs[maf_handle], maf_column_map, low_memory=False)
df = rename_exac_cols(df)
exac = standard_read(inputs[exac_handle], exac_column_map, low_memory=False)
merge_cols = [MAPPED_CHR, MAPPED_POS, MAPPED_REF, MAPPED_ALT]
df = df.merge(exac, on=merge_cols, how='left')
df.loc[:, populations] = df.loc[:, populations].fillna(0.0)
df.loc[:, LOW_DEPTH] = np.nan
df.loc[:, CODING] = np.nan
df.loc[:, WL] = np.nan
df.loc[:, EXAC_COMMON] = 0.0
idx_original = df.index
df[MAPPED_ALT_COUNT] = df[MAPPED_ALT_COUNT].fillna(0.0)
df[MAPPED_REF_COUNT] = df[MAPPED_REF_COUNT].fillna(0.0)
df[DEPTH] = annotate_read_depth(df[MAPPED_ALT_COUNT], df[MAPPED_REF_COUNT])
df.loc[:, LOW_DEPTH] = 0.0
idx_read_depth = get_idx_low_depth(df[DEPTH], inputs[min_depth])
df.loc[:, CODING] = 0.0
idx_coding = get_idx_coding_classifications(df[MAPPED_VAR_CLASS])
idx_noncoding = idx_original.difference(idx_coding)
if not inputs[boolean_whitelist]:
df.loc[:, WL] = 0.0
whitelist = read(inputs_dict[whitelist_handle], header=-1).rename(columns=whitelist_column_map)
df[whitelist_column_map[3]] = df[MAPPED_GENE].astype(str) + ':' + \
df[MAPPED_AA].str.split('p.', expand=True).loc[:, 1].astype(str)
df[whitelist_column_map[3]] = df[whitelist_column_map[3]].fillna('')
idx_whitelist = df[df[whitelist_column_map[3]].isin(whitelist[whitelist_column_map[3]])].index
else:
idx_whitelist = | pd.DataFrame() | pandas.DataFrame |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": | pd.Series([10, 9, 8]) | pandas.Series |
import json
import os
import re
from typing import Any, Dict, Optional
import pandas as pd
from pandas import DataFrame
import geopandas as gpd
from network_wrangler import ProjectCard
from network_wrangler import RoadwayNetwork
from .transit import CubeTransit, StandardTransit
from .logger import WranglerLogger
from .parameters import Parameters
from .roadway import ModelRoadwayNetwork
class Project(object):
"""A single or set of changes to the roadway or transit system.
Compares a base and a build transit network or a base and build
highway network and produces project cards.
.. highlight:: python
Typical usage example:
::
test_project = Project.create_project(
base_transit_source=os.path.join(CUBE_DIR, "transit.LIN"),
build_transit_source=os.path.join(CUBE_DIR, "transit_route_shape_change"),
)
test_project.evaluate_changes()
test_project.write_project_card(
os.path.join(SCRATCH_DIR, "t_transit_shape_test.yml")
)
Attributes:
DEFAULT_PROJECT_NAME: a class-level constant that defines what
the project name will be if none is set.
STATIC_VALUES: a class-level constant which defines values that
are not evaluated when assessing changes.
card_data (dict): {"project": <project_name>, "changes": <list of change dicts>}
roadway_changes (DataFrame): pandas dataframe of CUBE roadway changes.
transit_changes (CubeTransit):
base_roadway_network (RoadwayNetwork):
base_transit_network (CubeTransit):
build_transit_network (CubeTransit):
project_name (str): name of the project, set to DEFAULT_PROJECT_NAME if not provided
"""
DEFAULT_PROJECT_NAME = "USER TO define"
STATIC_VALUES = [
"model_link_id",
"area_type",
"county",
#"assign_group",
"centroidconnect",
]
def __init__(
self,
roadway_changes: Optional[DataFrame] = None,
transit_changes: Optional[CubeTransit] = None,
base_roadway_network: Optional[RoadwayNetwork] = None,
base_transit_network: Optional[CubeTransit] = None,
build_transit_network: Optional[CubeTransit] = None,
project_name: Optional[str] = "",
evaluate: bool = False,
parameters={},
):
"""
constructor
"""
self.card_data = Dict[str, Dict[str, Any]]
self.roadway_changes = roadway_changes
self.base_roadway_network = base_roadway_network
self.base_transit_network = base_transit_network
self.build_transit_network = build_transit_network
self.transit_changes = transit_changes
self.project_name = (
project_name if project_name else Project.DEFAULT_PROJECT_NAME
)
self.parameters = Parameters(**parameters)
if base_roadway_network != None:
self.determine_roadway_network_changes_compatability()
if evaluate:
self.evaluate_changes()
def write_project_card(self, filename):
"""
Writes project cards.
Args:
filename (str): File path to output .yml
Returns:
None
"""
ProjectCard(self.card_data).write(filename)
WranglerLogger.info("Wrote project card to: {}".format(filename))
@staticmethod
def create_project(
roadway_log_file: Optional[str] = None,
roadway_shp_file: Optional[str] = None,
roadway_csv_file: Optional[str] = None,
base_roadway_dir: Optional[str] = None,
base_transit_source: Optional[str] = None,
build_transit_source: Optional[str] = None,
roadway_changes: Optional[DataFrame] = None,
transit_changes: Optional[CubeTransit] = None,
base_roadway_network: Optional[RoadwayNetwork] = None,
base_transit_network: Optional[CubeTransit] = None,
build_transit_network: Optional[CubeTransit] = None,
project_name=None,
parameters={},
):
"""
Constructor for a Project instance.
Args:
roadway_log_file (str): File path to consuming logfile.
roadway_shp_file (str): File path to consuming shape file for roadway changes.
roadway_csv_file (str): File path to consuming csv file for roadway changes.
base_roadway_dir (str): Folder path to base roadway network.
base_transit_dir (str): Folder path to base transit network.
base_transit_file (str): File path to base transit network.
build_transit_dir (str): Folder path to build transit network.
build_transit_file (str): File path to build transit network.
roadway_changes (DataFrame): pandas dataframe of CUBE roadway changes.
transit_changes (CubeTransit): build transit changes.
base_roadway_network (RoadwayNetwork): Base roadway network object.
base_transit_network (CubeTransit): Base transit network object.
build_transit_network (CubeTransit): Build transit network object.
Returns:
A Project instance.
"""
if base_transit_source:
base_transit_network = CubeTransit.create_from_cube(base_transit_source)
WranglerLogger.debug(
"Base network has {} lines".format(len(base_transit_network.lines))
)
if len(base_transit_network.lines) <= 10:
WranglerLogger.debug(
"Base network lines: {}".format(
"\n - ".join(base_transit_network.lines)
)
)
else:
msg = "No base transit network."
WranglerLogger.info(msg)
base_transit_network = None
if build_transit_source and transit_changes:
msg = "Method takes only one of 'build_transit_source' and 'transit_changes' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if build_transit_source:
WranglerLogger.debug("build")
build_transit_network = CubeTransit.create_from_cube(build_transit_source)
WranglerLogger.debug(
"Build network has {} lines".format(len(build_transit_network.lines))
)
if len(build_transit_network.lines) <= 10:
WranglerLogger.debug(
"Build network lines: {}".format(
"\n - ".join(build_transit_network.lines)
)
)
else:
msg = "No transit changes given or processed."
WranglerLogger.info(msg)
transit_changes = None
if roadway_log_file and roadway_changes:
msg = "Method takes only one of 'roadway_log_file' and 'roadway_changes' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_shp_file and roadway_changes:
msg = "Method takes only one of 'roadway_shp_file' and 'roadway_changes' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_csv_file and roadway_changes:
msg = "Method takes only one of 'roadway_csv_file' and 'roadway_changes' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_log_file and roadway_csv_file:
msg = "Method takes only one of 'roadway_log_file' and 'roadway_csv_file' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_shp_file and roadway_csv_file:
msg = "Method takes only one of 'roadway_shp_file' and 'roadway_csv_file' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_log_file and roadway_shp_file:
msg = "Method takes only one of 'roadway_log_file' and 'roadway_shp_file' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if roadway_log_file:
roadway_changes = Project.read_logfile(roadway_log_file)
elif roadway_shp_file:
roadway_changes = gpd.read_file(roadway_shp_file)
roadway_changes = DataFrame(roadway_changes.drop("geometry", axis = 1))
roadway_changes["model_node_id"] = 0
elif roadway_csv_file:
roadway_changes = pd.read_csv(roadway_csv_file)
roadway_changes["model_node_id"] = 0
else:
msg = "No roadway changes given or processed."
WranglerLogger.info(msg)
roadway_changes = pd.DataFrame({})
if base_roadway_network and base_roadway_dir:
msg = "Method takes only one of 'base_roadway_network' and 'base_roadway_dir' but both given"
WranglerLogger.error(msg)
raise ValueError(msg)
if base_roadway_dir:
base_roadway_network = ModelRoadwayNetwork.read(
os.path.join(base_roadway_dir, "link.json"),
os.path.join(base_roadway_dir, "node.geojson"),
os.path.join(base_roadway_dir, "shape.geojson"),
True,
)
base_roadway_network.create_calculated_variables()
base_roadway_network.calculate_distance(overwrite = True)
base_roadway_network.fill_na()
base_roadway_network.convert_int()
base_roadway_network.split_properties_by_time_period_and_category()
else:
msg = "No base roadway network."
WranglerLogger.info(msg)
base_roadway_network = None
project = Project(
roadway_changes=roadway_changes,
transit_changes=transit_changes,
base_roadway_network=base_roadway_network,
base_transit_network=base_transit_network,
build_transit_network=build_transit_network,
evaluate=True,
project_name=project_name,
parameters=parameters,
)
return project
@staticmethod
def read_logfile(logfilename: str) -> DataFrame:
"""
Reads a Cube log file and returns a dataframe of roadway_changes
Args:
logfilename (str): File path to CUBE logfile.
Returns:
A DataFrame reprsentation of the log file.
"""
WranglerLogger.info("Reading logfile: {}".format(logfilename))
with open(logfilename) as f:
content = f.readlines()
# (content[0].startswith("HighwayLayerLogX")):
if not content[0].startswith("HighwayLayerLogX"):
WranglerLogger.info("Returning an empty dataframe")
return DataFrame()
NodeLines = [x.strip() for x in content if x.startswith("N")]
LinkLines = [x.strip() for x in content if x.startswith("L")]
linkcol_names = ["OBJECT", "OPERATION", "GROUP"] + LinkLines[0].split(",")[1:]
nodecol_names = ["OBJECT", "OPERATION", "GROUP"] + NodeLines[0].split(",")[1:]
link_df = DataFrame(
data=[re.split(",|;", x) for x in LinkLines[1:]], columns=linkcol_names
)
node_df = DataFrame(
data=[re.split(",|;", x) for x in NodeLines[1:]], columns=nodecol_names
)
log_df = | pd.concat([link_df, node_df], ignore_index=True, sort=False) | pandas.concat |
"""
Train for manipulate files only
"""
import argparse
import data_loader
import models
import numpy as np
import utils
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from pathlib import Path
import transforms as albu_trans
from torchvision.transforms import ToTensor, Normalize, Compose
from PIL import Image
import pandas as pd
data_path = Path('data')
class_map = {'HTC-1-M7': 0,
'LG-Nexus-5x': 1,
'Motorola-Droid-Maxx': 2,
'Motorola-Nexus-6': 3,
'Motorola-X': 4,
'Samsung-Galaxy-Note3': 5,
'Samsung-Galaxy-S4': 6,
'Sony-NEX-7': 7,
'iPhone-4s': 8,
'iPhone-6': 9}
target_size = 256
def validation(model, criterion, valid_loader):
model.eval()
losses = []
accuracy_scores = []
for inputs, targets in valid_loader:
inputs = utils.variable(inputs, volatile=True)
targets = utils.variable(targets)
outputs = model(inputs)
loss = criterion(outputs, targets)
losses.append(loss.data[0])
accuracy_scores += list(targets.data.cpu().numpy() == np.argmax(outputs.data.cpu().numpy(), axis=1))
valid_loss = np.mean(losses) # type: float
valid_accuracy = np.mean(accuracy_scores) # type: float
print('Valid loss: {:.4f}, accuracy: {:.4f}'.format(valid_loss, valid_accuracy))
return {'valid_loss': valid_loss, 'accuracy': valid_accuracy}
def is_image(file_path):
img = Image.open(str(file_path))
try:
img.size
except:
return False
return True
def get_df(mode=None):
if mode == 'train':
train_path = data_path / 'train'
train_file_names = list(train_path.glob('**/*.*'))
train_file_names = [x.absolute() for x in train_file_names if is_image(x)]
main_df = pd.DataFrame({'file_name': train_file_names})
main_df['fname'] = main_df['file_name'].apply(lambda x: x.name, 1)
main_df = main_df[main_df['fname'] != '(MotoNex6)8.jpg']
main_df['target'] = main_df['file_name'].apply(lambda x: x.parent.name, 1)
main_df['is_manip'] = 0
flickr_path = data_path / 'new_flickr'
flickr_file_names = list(flickr_path.glob('**/*.*'))
flickr_file_names = [x.absolute() for x in flickr_file_names]
flickr_df = | pd.DataFrame({'file_name': flickr_file_names}) | pandas.DataFrame |
import unittest
import backtest_pkg as bt
import pandas as pd
import numpy as np
from math import sqrt, log
from pandas.util.testing import assert_frame_equal
def cal_std(data):
if len(data)<=1:
return np.nan
data_mean = sum(data)/len(data)
data_var = sum((i-data_mean)**2 for i in data)/(len(data)-1)
return sqrt(data_var)
def cal_mean(data):
return sum(data)/len(data)
class TestMarketSingleAsset(unittest.TestCase):
def setUp(self):
def construct_market(data):
ticker = ['Test Ticker']
index = pd.date_range('2020-01-01', periods=len(data), freq='D')
data_dict = dict(
adj_close_price = pd.DataFrame(data, index=index, columns=ticker),
open_price = pd.DataFrame(data, index=index, columns=ticker),
high_price = pd.DataFrame([i*1.1 for i in data], index=index, columns=ticker),
low_price = pd.DataFrame([i*0.9 for i in data], index=index, columns=ticker),
close_price = pd.DataFrame(data, index=index, columns=ticker),
)
return bt.market(**data_dict)
data_trend = [1, 2, 3, 4, 5]
self.index = pd.date_range('2020-01-01', periods=len(data_trend), freq='D')
self.ticker = ['Test Ticker']
self.market = construct_market(data_trend)
self.market_down = construct_market(data_trend[::-1])
data_sin = [3, 5, 3, 1, 3]
data_convex = [3, 2, 1, 2, 3]
data_concave = [1, 2, 3, 2, 1]
self.market_sin = construct_market(data_sin)
self.market_convex = construct_market(data_convex)
self.market_concave = construct_market(data_concave)
# Daily return: np.log([np.nan, 2/1, 3/2, 4/3, 5/4])
def test_market_daily_ret(self):
expect = pd.DataFrame(log(5/4), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(), expect)
def test_market_daily_ret_given_date(self):
date_str = '2020-01-03'
date = pd.to_datetime(date_str)
expect = pd.DataFrame(log(3/2), index=[date], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(date=date), expect)
assert_frame_equal(self.market.daily_ret(date=date_str), expect)
def test_market_daily_ret_given_lag(self):
lag = 1
expect = pd.DataFrame(log(4/3), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(lag=lag), expect)
def test_market_daily_ret_given_date_lag(self):
date = pd.to_datetime('2020-01-03')
lag = 1
expect = pd.DataFrame(log(2/1), index=[date], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(date=date, lag=lag), expect)
def test_market_daily_ret_out_range_date(self):
late_date = pd.to_datetime('2020-01-20')
early_date = pd.to_datetime('2019-01-01')
with self.assertRaises(AssertionError):
self.market.daily_ret(date=early_date)
with self.assertRaises(AssertionError):
self.market.daily_ret(date=late_date)
def test_market_daily_ret_large_lag(self):
lag = 100
expect = pd.DataFrame(np.nan, index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.daily_ret(lag=lag), expect)
def test_market_daily_ret_negative_lag(self):
lag = -1
with self.assertRaises(AssertionError):
self.market.daily_ret(lag=lag)
def test_market_total_ret(self):
expect = pd.DataFrame(log(5), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.total_ret(), expect)
def test_market_total_ret_given_date(self):
date_str = '2020-01-03'
date = pd.to_datetime(date_str)
expect = pd.DataFrame(log(3), index=[date], columns=self.ticker)
assert_frame_equal(self.market.total_ret(date=date), expect)
assert_frame_equal(self.market.total_ret(date=date_str), expect)
def test_market_total_ret_given_period(self):
expect = pd.DataFrame(log(5/3), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.total_ret(period=2), expect)
def test_market_total_ret_given_date_period(self):
date_str = '2020-01-04'
date = pd.to_datetime(date_str)
expect = pd.DataFrame(log(4/2), index=[date], columns=self.ticker)
assert_frame_equal(self.market.total_ret(date = date, period=2), expect)
def test_market_total_ret_out_range_date(self):
late_date = pd.to_datetime('2020-01-20')
early_date = pd.to_datetime('2019-01-01')
with self.assertRaises(AssertionError):
self.market.total_ret(date=early_date)
with self.assertRaises(AssertionError):
self.market.total_ret(date=late_date)
def test_market_total_ret_large_period(self):
with self.assertRaises(AssertionError):
self.market.total_ret(period=100)
def test_market_total_ret_negative_period(self):
with self.assertRaises(AssertionError):
self.market.total_ret(period=0)
with self.assertRaises(AssertionError):
self.market.total_ret(period=-1)
def test_market_vol(self):
data = [log(i) for i in [2/1, 3/2, 4/3, 5/4]]
expect = pd.DataFrame(cal_std(data), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.volatility(), expect)
def test_market_vol_given_date(self):
date_str = '2020-01-03'
date = pd.to_datetime(date_str)
data = [log(i) for i in [2/1, 3/2]]
expect = pd.DataFrame(cal_std(data), index=[date], columns=self.ticker)
assert_frame_equal(self.market.volatility(date=date), expect)
assert_frame_equal(self.market.volatility(date=date_str), expect)
def test_market_vol_given_period(self):
data = [log(i) for i in [4/3, 5/4]]
expect = pd.DataFrame(cal_std(data), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.volatility(period=2), expect)
def test_market_vol_given_date_period(self):
date_str = '2020-01-04'
date = pd.to_datetime(date_str)
data = [log(i) for i in [3/2, 4/3]]
expect = pd.DataFrame(cal_std(data), index=[date], columns=self.ticker)
assert_frame_equal(self.market.volatility(date=date, period=2), expect)
def test_market_vol_period_1(self):
expect = pd.DataFrame(np.nan, index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.volatility(period=1), expect)
def test_market_vol_out_range_period(self):
with self.assertRaises(AssertionError):
self.market.volatility(period=10)
def test_market_bollinger(self):
data_std = cal_std(list(range(1, 6)))
expect = pd.DataFrame((5-3)/data_std, index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.bollinger(), expect)
def test_market_bollinger_given_date(self):
date_str = '2020-01-03'
date = pd.to_datetime(date_str)
data = [1, 2, 3]
expect = pd.DataFrame((3-2)/cal_std(data), index=[date], columns=self.ticker)
assert_frame_equal(self.market.bollinger(date=date), expect)
assert_frame_equal(self.market.bollinger(date=date_str), expect)
def test_market_bollinger_given_period(self):
data = [3, 4, 5]
expect = pd.DataFrame((5-4)/cal_std(data), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market.bollinger(period=3), expect)
def test_market_bollinger_given_date_period(self):
date_str = '2020-01-04'
date = pd.to_datetime(date_str)
data = [2, 3, 4]
expect = pd.DataFrame((4-3)/cal_std(data), index=[date], columns=self.ticker)
assert_frame_equal(self.market.bollinger(date=date, period=3), expect)
def test_market_bollinger_down(self):
data = [5, 4, 3, 2, 1]
expect = pd.DataFrame((data[-1] - cal_mean(data))/cal_std(data), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market_down.bollinger(), expect)
def test_market_bollinger_sin(self):
data = [3, 5, 3, 1, 3]
expect = pd.DataFrame((data[-1] - cal_mean(data))/cal_std(data), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market_sin.bollinger(), expect)
def test_market_bollinger_convex(self):
data = [3, 2, 1, 2, 3]
expect = pd.DataFrame((data[-1] - cal_mean(data))/cal_std(data), index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market_convex.bollinger(), expect)
def test_market_RSI(self):
expect = pd.DataFrame(0.5, index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market_convex.RSI(), expect)
def test_market_RSI_given_date(self):
date_str = '2020-01-03'
date = pd.to_datetime(date_str)
expect = pd.DataFrame(0., index=[date], columns=self.ticker)
assert_frame_equal(self.market_convex.RSI(date=date), expect)
assert_frame_equal(self.market_convex.RSI(date=date_str), expect)
def test_market_RSI_given_period(self):
expect = pd.DataFrame(1., index=[self.index[-1]], columns=self.ticker)
assert_frame_equal(self.market_convex.RSI(period=2), expect)
def test_market_RSI_given_date_period(self):
date_str = '2020-01-04'
date = pd.to_datetime(date_str)
expect = pd.DataFrame(0.5, index=[date], columns=self.ticker)
assert_frame_equal(self.market_convex.RSI(date=date, period=2), expect)
def test_market_RSI_up(self):
expect = | pd.DataFrame(1., index=[self.index[-1]], columns=self.ticker) | pandas.DataFrame |
import pandas as pd
import string
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy import signal
from scipy import constants
from scipy.integrate import cumtrapz
from numba import vectorize, jit
import os
import sys
import seaborn as sns
rc = {'legend.frameon': True, 'legend.fancybox': True, 'patch.facecolor': 'white', 'patch.edgecolor': 'black',
'axes.formatter.useoffset': False, 'text.usetex': True, 'font.weight': 'bold', 'mathtext.fontset': 'stix'}
sns.set(context='poster', style='white', font_scale=1.7, font='serif', rc=rc)
sns.set_style("ticks")
import exa
import exatomic
from exatomic import qe
import notebook as nb
import math
from nuc import *
nuc_df = pd.DataFrame.from_dict(nuc)
def read_efg_data(file_path,ensemble_average=False):
rawdf = | pd.io.parsers.read_csv(file_path) | pandas.io.parsers.read_csv |
"""
This script calls the networkSimulator to create voxelwise synapse counts and
can be used as starting point to integrate existing inference algorithms.
"""
################################################################################
### LOAD LIBRARIES
################################################################################
import os
import json
import subprocess
import numpy as np
import pandas as pd
import statsmodels.api as sm
# import sys
# import csv
################################################################################
### SET FILE NAMES
################################################################################
specFile = "synapseSpec.json"
simulator = "networkSimulator.exe"
featuresFile = "features.csv"
synapseFile = "synapses.csv"
################################################################################
### GENERATE RULE-BASED SYNAPSE COUNTS
################################################################################
# Load default spec file as template
with open(specFile) as f:
spec = json.load(f)
# Set theta in temporary spec file. Example with fixed theta [0, 1, 1, -1]:
true_beta = [0, 1, 1, -1]
spec["CONNECTIVITY_RULE_PARAMETERS"] = true_beta
newSpecFilePath = os.path.join(os.path.dirname(specFile),
"generatedSpecFile.json")
with open(newSpecFilePath, 'w') as generatedSpecFile:
json.dump(spec, generatedSpecFile)
# Call the simulator and remove temporary spec file.
subprocess.run([simulator, "SYNAPSE", newSpecFilePath])
os.remove(newSpecFilePath)
################################################################################
### MAXIMUM LIKELIHOOD ESTIMATION (MLE)
################################################################################
# Read data
data = | pd.read_csv("synapses.csv") | pandas.read_csv |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import codecs
import csv
from datetime import datetime
from io import StringIO
import os
import platform
from tempfile import TemporaryFile
from urllib.error import URLError
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas import DataFrame, Index, MultiIndex, Series, compat, concat
import pandas._testing as tm
from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser
def test_override_set_noconvert_columns():
# see gh-17351
#
# Usecols needs to be sorted in _set_noconvert_columns based
# on the test_usecols_with_parse_dates test from test_usecols.py
class MyTextFileReader(TextFileReader):
def __init__(self):
self._currow = 0
self.squeeze = False
class MyCParserWrapper(CParserWrapper):
def _set_noconvert_columns(self):
if self.usecols_dtype == "integer":
# self.usecols is a set, which is documented as unordered
# but in practice, a CPython set of integers is sorted.
# In other implementations this assumption does not hold.
# The following code simulates a different order, which
# before GH 17351 would cause the wrong columns to be
# converted via the parse_dates parameter
self.usecols = list(self.usecols)
self.usecols.reverse()
return | CParserWrapper._set_noconvert_columns(self) | pandas.io.parsers.CParserWrapper._set_noconvert_columns |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
@pytest.mark.parametrize(
"other",
[
np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]),
np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]),
np.array( # matching offsets
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
),
],
)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize("box_other", [True, False])
def test_dt64arr_add_sub_offset_array(
self, tz_naive_fixture, box_with_array, box_other, op, other
):
# GH#18849
# GH#10699 array of offsets
tz = tz_naive_fixture
dti = date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))])
expected = tm.box_expected(expected, box_with_array)
if box_other:
other = tm.box_expected(other, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dtarr, other)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100Y", periods=4)
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", Timestamp.max])
dtimin = pd.to_datetime(["now", Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
Timestamp("20111230"),
Timestamp("20120101"),
Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
Timestamp("20111231"),
Timestamp("20120102"),
Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = Series(dti)
expected = Series(TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([NaT, Timestamp("19900315")]),
Series([NaT, NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
def test_dt64_series_add_intlike(self, tz_naive_fixture):
# GH#19123
tz = tz_naive_fixture
dti = DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
assert_invalid_addsub_type(ser, 1, msg)
assert_invalid_addsub_type(ser, other, msg)
assert_invalid_addsub_type(ser, np.array(other), msg)
assert_invalid_addsub_type(ser, pd.Index(other), msg)
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
assert td2._values.freq is None
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "|".join(
["Addition/subtraction of integers", "cannot subtract DatetimeArray from"]
)
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz)
expected = expected._with_freq(None)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
expected = expected._with_freq(None)
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
# DTA.__isub__ GH#43904
dta = dti._data.copy()
dta -= tdi
tm.assert_datetime_array_equal(dta, expected._data)
out = dti._data.copy()
np.subtract(out, tdi, out=out)
tm.assert_datetime_array_equal(out, expected._data)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from ndarray"
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi._values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
assert_cannot_add(dtarr, addend, msg)
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = | date_range("20130101", periods=3, tz=tz_naive_fixture) | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Reading data for WB, PRO,
for kennisimpulse project
to read data from province, water companies, and any other sources
Created on Sun Jul 26 21:55:57 2020
@author: <NAME>
"""
import pytest
import numpy as np
import pandas as pd
from pathlib import Path
import pickle as pckl
from hgc import ner
from hgc import io
import tests
# import xlsxwriter
def test_province():
# WD = Path(tests.__file__).parent / 'provincie_data_long_preprocessed.csv'
WD = r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+'/provincie_data_long_preprocessed.csv'
df_temp = pd.read_csv(WD, encoding='ISO-8859-1', header=None)
# define the nrow here
n_row = None
feature_map, feature_unmapped, df_feature_map = ner.generate_feature_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 25].dropna()))
unit_map, unit_unmapped, df_unit_map = ner.generate_unit_map(entity_orig=list(df_temp.iloc[slice(2, n_row), 26].dropna()))
# create a df to record what has been mapped and what has not
df_map = pd.DataFrame((feature_map.keys(),feature_map.values(),unit_map.keys(),unit_map.values()), index=['Feature','Mapped feature','Unit','Mapped unit']).transpose()
if not not feature_unmapped:
df_map = df_map.join(pd.DataFrame(feature_unmapped, columns=['Unmapped feature']))
if not not unit_unmapped:
df_map = df_map.join(pd.DataFrame(unit_unmapped, columns=['Unmapped unit']))
dct2_arguments = {
'file_path': WD,
'sheet_name': 'stacked',
'shape': 'stacked',
'slice_header': [1, slice(1, None)],
'slice_data': [slice(1, n_row), slice(1, None)],
'map_header': {
**io.default_map_header(),
'MeetpuntId': 'LocationID',
'parameter':'Feature',
'eenheid': 'Unit',
'waarde': 'Value',
'Opgegeven bemonstering datum': 'Datetime',
'Monsternummer': 'SampleID', # "SampleID" already exists as header, but contains wrong date. Use "Sample number" as "SampleID"
# 'SampleID': None # otherwise exists twice in output file
},
'map_features': {**feature_map,'pH(1)':'pH'},
'map_units': {**unit_map, 'oC':'°C'},
}
df2 = io.import_file(**dct2_arguments)[0]
df2_hgc = io.stack_to_hgc(df2)
with | pd.ExcelWriter(r'C:\Users\beta6\Documents\Dropbox\008KWR\0081Projects\kennisimpulse'+r'/provincie_processed.xlsx') | pandas.ExcelWriter |
################################################################################################
# NOTE: I started this code to get better matching results than matching by address,
# but I never finished and thus this code hasn't actually been used yet.
################################################################################################
import pandas as pd
import numpy as np
import re
valid_street_types = [
'ST', 'AVE', 'CT', 'CIR', 'BLVD', 'WAY', 'DR', 'TER', 'HWY', 'HL',
'PL', 'LN', 'RD', 'PARK', 'ALY', 'PLZ', 'ROW', 'WALK', 'SQ', 'SW']
def clean_eviction_data(ev):
if 'index' not in ev.columns:
ev.reset_index(inplace=True)
ev = ev[(~pd.isnull(ev['address'])) & (ev['address'] != 'UNKNOWN')
& (ev['address'].str.contains('^[0-9]'))]
ev.address = ev.address.str.upper()
ev.loc[ev['address'].str.contains('APT\.'), 'apt'] = ev.loc[
ev['address'].str.contains('APT\.'), 'address'].str.split('\s#?APT\.?').str[1]
ev.loc[ev['address'].str.contains('APT\.'), 'address'] = ev.loc[
ev['address'].str.contains('APT\.'), 'address'].str.split('\s#?APT\.?').str[0]
ev.loc[ev['address'].str.contains('#'), 'apt'] = ev.loc[
ev['address'].str.contains('#'), 'address'].str.split('\s#').str[1]
ev.loc[ev['address'].str.contains('#'), 'address'] = ev.loc[
ev['address'].str.contains('#'), 'address'].str.split('\s#').str[0]
ev['address'] = ev['address'].str.replace(
'|'.join(['STRETT', 'STRRET', 'STREET31', 'SREET', 'DTREET', 'STREEET']), 'ST')
ev['address'] = ev['address'].str.replace(
'|'.join(['STRET', 'STRRE', 'STREE$']), 'ST')
ev['address'] = ev['address'].str.replace(
'|'.join(['AVENEU', 'AVENE', 'AVENE', 'AVNEUE', 'AAVE']), 'AVE')
ev['address'] = ev['address'].str.replace('BOUELVARD', 'BLVD')
ev['address'] = ev['address'].str.replace('VAN VAN', 'VAN')
ev['address'] = ev['address'].str.replace('ST ST', 'ST')
ev['address'] = ev['address'].str.replace('AVE AVE', 'AVE')
ev['address'] = ev['address'].str.replace('MERCED MERCED', 'MERCED')
ev['address'] = ev['address'].str.replace('POSTREET', 'POST ST')
ev['address'] = ev['address'].str.replace('21STREET', '21ST ST')
ev['address'] = ev['address'].str.replace('JOOSTREET', 'JOOST AVE')
ev['address'] = ev['address'].str.replace('LOCUSTREET', 'LOCUS ST')
ev['address'] = ev['address'].str.replace('BUSTREET', 'BUSH ST')
ev['address'] = ev['address'].str.replace('1STREET', '1ST ST')
ev['address'] = ev['address'].str.replace('AMHERSTREET', 'AMHERST ST')
ev['address'] = ev['address'].str.replace('TURKSTREET', 'TURK ST')
ev['address'] = ev['address'].str.replace('HARRISOIN', 'HARRISON')
ev['address'] = ev['address'].str.replace('BOARDWAY', 'BROADWAY')
ev['address'] = ev['address'].str.replace("檀蘝涛虧檀迦", "")
ev['address'] = ev['address'].str.replace("涛宕", "")
ev['address'] = ev['address'].str.replace("'", "")
ev.loc[ev['address'] == "20 FRANKLIN STREET", 'address'] = "1580-1598 MARKET ST"
ev.loc[ev['address'] == "57 TAYLOR STREET", 'address'] = "101-105 TURK ST"
ev.loc[ev['address'] == "455 EDDY STREET", 'address'] = "350 TURK ST"
ev.loc[ev['address'] == "790 VALLEJO STREET", 'address'] = "1500-1506 POWELL ST"
ev.loc[ev['address'] == "2 EMERY LANE", 'address'] = "734-752 VALLEJO ST"
ev.loc[ev['address'] == "1091 BUSH STREET", 'address'] = "850 LEAVENWORTH ST"
ev.loc[ev['address'] == "795 20TH AVENUE", 'address'] = "4400 FULTON ST"
ev.loc[ev['address'] == "440 DAVIS COURT", 'address'] = "100 WASHINGTON ST"
ev.loc[ev['address'] == "405 12TH AVENUE", 'address'] = "4801 GEARY BLVD"
ev.loc[ev['address'] == "4 BECKETT STREET", 'address'] = "670 JACKSON ST"
ev.loc[ev['address'] == "874 SACRAMENTO STREET", 'address'] = "800 STOCKTON ST"
ev.loc[ev['address'] == "265 NORTH POINT STREET", 'address'] = "2310-2390 POWELL ST"
ev.loc[ev['address'] == "20 12TH STREET", 'address'] = "1613 MARKET ST"
ev.loc[ev['address'] == "609 ASHBURY STREET", 'address'] = "1501-1509 HAIGHT ST"
ev.loc[ev['address'] == "22 VANDEWATER STREET", 'address'] = "333 BAY ST"
ev.loc[ev['address'] == "160 BAY STREET", 'address'] = "2210-2290 STOCKTON ST"
ev.loc[ev['address'] == "505 26TH AVENUE", 'address'] = "6201-6209 GEARY BLVD"
ev.loc[ev['address'] == "3410 22ND STREET", 'address'] = "994-998 GUERRERO ST"
ev.loc[ev['address'] == "1312 UTAH STREET", 'address'] = "2601-2611 24TH ST"
ev.loc[ev['address'] == "1290 HAYES STREET", 'address'] = "600-604 DIVISADERO ST"
ev.loc[ev['address'] == "130 COSO AVE", 'address'] = "1 LUNDY'S LN"
ev.loc[ev['address'] == '3444 16TH STREET', 'address'] = "3440 16TH ST"
ev.loc[ev['address'] == '603 NATOMA STREET', 'address'] = "170 7TH ST"
ev.loc[ev['address'].str.contains('[0-9]02ND'), 'address'] = ev.loc[
ev['address'].str.contains('[0-9]02ND'), 'address'].str.replace('02ND', ' 2ND')
ev.loc[ev['address'].str.contains('\s[0-9A-Z]$'), 'address'] = ev.loc[
ev['address'].str.contains('\s[0-9A-Z]$'), 'address'].str.split(' ').str[:-1].str.join(' ')
ev.loc[ev['address'].str.contains('BROADWAY'), 'street_type'] = 'ST'
# ev.loc[ev['address'].str.contains('CESAR CHAVEZ'), 'street_type'] = 'BLVD'
ev.loc[ev['address'].str.contains('RUSSIA'), 'street_type'] = 'AVE'
ev.loc[ev['petition'] == 'M101171', 'address'] = '531 GONZALEZ DRIVE'
ev.loc[ev['petition'] == 'M111009', 'address'] = '55 CHUMASERO DRIVE'
ev.loc[ev['petition'] == 'M112072', 'address'] = '125 CAMBON DRIVE'
ev.loc[ev['petition'] == 'M131872', 'address'] = '1921 ELLIS STREET'
ev.loc[ev['petition'] == 'M140347', 'address'] = '326 LONDON STREET'
ev.loc[ev['petition'] == 'E980001', 'address'] = '1551A 20TH AVE'
ev.loc[ev['petition'] == 'E991754', 'address'] = '1271 FILBERT ST'
ev.loc[ev['petition'] == 'M2K0279', 'address'] = '2364 FULTON ST'
ev.loc[ev['petition'] == 'S000521', 'address'] = '431 SOMERSET ST'
ev.loc[ev['petition'] == 'S000417', 'address'] = '1201 GUERRERO ST'
# parkmerced
ev.loc[ev['address'].str.contains(
'GONZALEZ|FONT|SERRANO|CHUMASERO|ARBALLO|GARCES|CAMBON|VIDAL|GRIJALVA|TAPIA|BUCARELI|RIVAS|CRESPI|CARDENAS|HIGUERA'),
'address'] = '3711 19TH AVE'
ev = ev[ev['address'] != 'NO ADDRESS PROVIDED']
# clean street types
ev['street_type'] = ev['address'].str.split(' ').str[-1]
st_typ_dict = {'STREET': 'ST', 'AVENUE': 'AVE', 'DRIVE': 'DR', 'BOULEVARD': 'BLVD', 'COURT': 'CT',
'TERRACE': 'TER', 'PLACE': 'PL', 'HIGHWAY': 'HWY', 'LANE': 'LN', 'ROAD': 'RD', 'ALLEY': 'ALY',
'CIRCLE': 'CIR', 'SQUARE': 'SQ', 'PLAZA': 'PLZ', 'HILLS': 'HL', 'HILL': 'HL'
}
ev = ev.replace({'street_type': st_typ_dict})
ev.loc[~ev['street_type'].isin(valid_street_types), 'street_type'] = None
# clean street numbers
ev['street_num'] = ev['address'].str.split(' ').str[0]
ev['house_1'] = ''
ev['house_2'] = ev['street_num']
ev.loc[ev['street_num'].str.contains('-'), 'house_1'] = ev.loc[
ev['street_num'].str.contains('-'), 'street_num'].str.split('-').str[0]
ev.loc[ev['street_num'].str.contains('-'), 'house_2'] = ev.loc[
ev['street_num'].str.contains('-'), 'street_num'].str.split('-').str[1]
ev['house_1'] = ev['house_1'].str.replace('\D', '')
ev['house_2'] = ev['house_2'].str.replace('\D', '')
# clean street names
ev['street_name'] = None
ev.loc[~pd.isnull(ev['street_type']), 'street_name'] = ev.loc[
~pd.isnull(ev['street_type']), 'address'].str.split(' ').str[1:-1].str.join(' ')
ev.loc[ | pd.isnull(ev['street_name']) | pandas.isnull |
'''
Scripts for loading various experimental datasets.
Created on Jul 6, 2017
@author: <NAME>
'''
import os
import pandas as pd
import numpy as np
from evaluation.experiment import data_root_dir
all_root_dir = data_root_dir#os.path.expanduser('~/data/bayesian_sequence_combination')
data_root_dir = os.path.join(all_root_dir, 'data')
def _load_bio_folder(anno_path_root, folder_name):
'''
Loads one data directory out of the complete collection.
:return: dataframe containing the data from this folder.
'''
from data.pico.corpus import Corpus
DOC_PATH = os.path.join(data_root_dir, "bio-PICO/docs/")
ANNOTYPE = 'Participants'
anno_path = anno_path_root + folder_name
anno_fn = anno_path + '/PICO-annos-crowdsourcing.json'
gt_fn = anno_path + '/PICO-annos-professional.json'
corpus = Corpus(doc_path=DOC_PATH, verbose=False)
corpus.load_annotations(anno_fn, docids=None)
if os.path.exists(gt_fn):
corpus.load_groundtruth(gt_fn)
# get a list of the docids
docids = []
workerids = np.array([], dtype=str)
all_data = None
#all_fv = _load_pico_feature_vectors_from_file(corpus)
for d, docid in enumerate(corpus.docs):
docids.append(docid)
annos_d = corpus.get_doc_annos(docid, ANNOTYPE)
spacydoc = corpus.get_doc_spacydoc(docid)
text_d = spacydoc #all_fv[d]
doc_length = len(text_d)
doc_data = None
for workerid in annos_d:
print('Processing data for doc %s and worker %s' % (docid, workerid))
if workerid not in workerids:
workerids = np.append(workerids, workerid)
# add the worker to the dataframe if not already there
if doc_data is None or workerid not in doc_data:
doc_data_w = np.ones(doc_length, dtype=int) # O tokens
if doc_data is None:
doc_data = pd.DataFrame(doc_data_w, columns=[workerid])
else:
doc_data_w = doc_data[workerid]
for span in annos_d[workerid]:
start = span[0]
fin = span[1]
doc_data_w[start] = 2
doc_data_w[start + 1:fin] = 0
doc_data[workerid] = doc_data_w
if os.path.exists(gt_fn):
gold_d = corpus.get_doc_groundtruth(docid, ANNOTYPE)
if 'gold' not in doc_data:
doc_data['gold'] = np.ones(doc_length, dtype=int)
for spans in gold_d:
start = spans[0]
fin = spans[1]
doc_data['gold'][start] = 2
doc_data['gold'][start + 1:fin] = 0
else:
doc_data['gold'] = np.zeros(doc_length, dtype=int) - 1 # -1 for missing gold values
text_d = [spacytoken.text for spacytoken in text_d]
doc_data['features'] = text_d
doc_start = np.zeros(doc_length, dtype=int)
doc_start[0] = 1
doc_gaps = doc_data['features'] == '\n\n' # sentence breaks
doc_start[doc_gaps[doc_gaps].index[:-1] + 1] = 1
doc_data['doc_start'] = doc_start
# doc_data = doc_data.replace(r'\n', ' ', regex=True)
doc_data = doc_data[np.invert(doc_gaps)]
doc_data['docid'] = docid
if all_data is None:
all_data = doc_data
else:
all_data = | pd.concat([all_data, doc_data], axis=0) | pandas.concat |
"""Load a model and evaluate its performance against an unknown test set"""
import glob
import logging
import os
import re
import sqlite3
from pathlib import Path
import configargparse
import keras.models
import numpy as np
import pandas
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report, confusion_matrix
IMG_DIM = (201, 201)
def evaluate(
model_file: str,
test_dir: str,
output_dir: str,
sample_lable_lst: str,
slices_per_structure: int = 60,
rgb: bool = False,
):
# Load model
try:
model = keras.models.load_model(model_file)
except Exception:
logging.error(f"Failed to load model from {model_file}")
raise
logging.info(f"Model loaded from {model_file}")
# Get test files prepared
# Load files
try:
test_files = glob.glob(f"{test_dir}/*")
logging.info(f"Found {len(test_files)} files for testing")
assert len(test_files) > 0, f"Could not find files at {test_dir}"
assert (
len(test_files) % slices_per_structure == 0
), f"Number of test files is not an exact multiple of slices per structure"
except AssertionError as e:
logging.error(e)
raise
except Exception as e:
logging.error(e)
raise
# Read table into pandas dataframe
# Load data CSV file with filenames and labels
print(sample_lable_lst)
data = pandas.read_csv(sample_lable_lst)
# remove image number from file name
names = [re.findall("(.*)(?=_[0-9]+)", Path(file).stem)[0] for file in test_files]
test_labels = []
for name in names:
sample = data.loc[data["file_path"].str.contains(name)]
label = sample["map_class_autobuild"].values[0]
test_labels.append(label)
print(test_labels)
# Create training dataframe
# testing_dataframe = pandas.DataFrame({"Files": test_files, "Labels": test_labels})
testing_dataframe = pandas.DataFrame(
{"Files": test_files, "Labels": [str(label) for label in test_labels]}
)
print(testing_dataframe.head())
testing_dataframe.set_index("Files")
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_batch_size = slices_per_structure
# Add color mode selection, this is necessary for testing pretrained models which are expecting RGB images
if rgb:
color_mode = "rgb"
else:
color_mode = "grayscale"
test_generator = test_datagen.flow_from_dataframe(
testing_dataframe,
x_col="Files",
y_col="Labels",
target_size=IMG_DIM,
class_mode=None,
color_mode=color_mode,
batch_size=test_batch_size,
shuffle=False,
)
logging.info("Getting predictions")
try:
predictions = model.predict(
test_generator, steps=int(len(testing_dataframe["Files"]) / test_batch_size)
)
except ValueError:
logging.exception(
"Ensure the RGB option is set correctly for your model - "
"Some models expect 3 channel data"
)
raise
# Per image analysis
predictions_1 = [x for x in predictions if x[1] > x[0]]
predictions_0 = [x for x in predictions if x[1] < x[0]]
logging.info(f"Predicted good value {len(predictions_1)} times")
logging.info(f"Predicted bad value {len(predictions_0)} times")
predictions_decoded = [int(pred[1] > pred[0]) for pred in predictions]
# Create an output directory if it doesn't exist
output_dir_path = Path(output_dir)
if not output_dir_path.exists():
# Make one
try:
os.mkdir(output_dir_path)
logging.info(f"Created new directory in {output_dir_path}")
except Exception as e:
logging.error(
f"Could not create directory at {output_dir_path}.\n"
f"Please check permissions and location."
)
logging.error(e)
raise
# Save raw predictions
raw_dataframe = pandas.DataFrame(
{
"File": testing_dataframe["Files"],
"0": predictions[:, 0],
"1": predictions[:, 1],
"True Score": test_labels,
}
)
raw_dataframe.set_index("File", inplace=True)
raw_dataframe.to_csv(output_dir_path / "raw_predictions.csv")
logging.info("Per image analysis:")
per_image_class = classification_report(
predictions_decoded, testing_dataframe["Labels"], output_dict=True
)
per_image_class_frame = pandas.DataFrame(per_image_class).transpose()
per_image_conf = confusion_matrix(predictions_decoded, testing_dataframe["Labels"])
per_image_conff_frame = | pandas.DataFrame(per_image_conf) | pandas.DataFrame |
#!/usr/bin/env python3
import argparse
import datetime
import concurrent
import concurrent.futures
import itertools
import logging
import os
import warnings
import rows.forecast.visit
import rows.forecast.cluster
import rows.forecast.forecast
import numpy
import pandas
import fbprophet
import fbprophet.plot
import tqdm
import matplotlib.pyplot
import matplotlib.dates
import matplotlib.ticker
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
prepare_cluster = subparsers.add_parser(name='prepare')
prepare_cluster.add_argument('data_set_file')
prepare_cluster.add_argument('--output')
cluster_parser = subparsers.add_parser(name='cluster')
cluster_parser.add_argument('data_set_file')
cluster_parser.add_argument('--output')
cluster_parser.add_argument('--table', required=True)
forecast_parser = subparsers.add_parser(name='forecast')
forecast_parser.add_argument('data_set_file')
residuals_parser = subparsers.add_parser(name='compute-residuals')
residuals_parser.add_argument('data_set_file')
investigate_parser = subparsers.add_parser(name='investigate')
investigate_parser.add_argument('data_set_file')
investigate_parser.add_argument('--client', required=True)
investigate_parser.add_argument('--cluster', required=True)
plot_residuals_parser = subparsers.add_parser(name='plot-residuals')
plot_residuals_parser.add_argument('data_set_file')
subparsers.add_parser(name='test')
return parser.parse_args()
def prepare_dataset_command(args):
data_set_file_path = getattr(args, 'data_set_file')
output_path = getattr(args, 'output')
frame = pandas.read_csv(data_set_file_path, header=None, names=['visit_id',
'client_id',
'tasks',
'area',
'carer',
'planned_start_time',
'planned_end_time',
'check_in',
'check_out',
'check_in_processed'],
index_col=False)
def parse_datetime(value):
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S.%f')
frame['planned_end_time'] = frame['planned_end_time'].apply(parse_datetime)
frame['planned_start_time'] = frame['planned_start_time'].apply(parse_datetime)
frame['check_in'] = frame['check_in'].apply(parse_datetime)
frame['check_out'] = frame['check_out'].apply(parse_datetime)
frame['planned_duration'] = frame['planned_end_time'] - frame['planned_start_time']
frame['real_duration'] = frame['check_out'] - frame['check_in']
frame['check_in_processed'] = frame['check_in_processed'].astype('bool')
frame['tasks'] = frame['tasks'].apply(rows.forecast.visit.Tasks)
frame.to_hdf(output_path, key='a')
class PandasTimeDeltaConverter:
def __init__(self, unit):
self.__unit = unit
def __call__(self, x, pos=None):
try:
timedelta = pandas.Timedelta(value=x, unit=self.__unit)
py_timedelta = timedelta.to_pytimedelta()
total_seconds = py_timedelta.total_seconds()
hours, minutes, seconds = PandasTimeDeltaConverter.split_total_seconds(total_seconds)
return '{0}{1:02d}:{2:02d}:{3:02d}'.format('' if total_seconds > 0.0 else '-', hours, minutes, seconds)
except:
logging.exception('Failure to convert {0} to time delta'.format(x))
return x
@staticmethod
def split_total_seconds(value):
abs_value = abs(value)
hours = int(abs_value // matplotlib.dates.SEC_PER_HOUR)
minutes = int((abs_value - hours * matplotlib.dates.SEC_PER_HOUR) // matplotlib.dates.SEC_PER_MIN)
seconds = int(abs_value - 3600 * hours - 60 * minutes)
return hours, minutes, seconds
def save_figure(figure, file_name):
matplotlib.pyplot.savefig(file_name + '.png', transparent=True)
matplotlib.pyplot.close(figure)
def visualize_cluster(data_frame, client_id):
selected_visits = data_frame[data_frame['client_id'] == client_id].copy()
selected_visits['planned_start_date'] \
= selected_visits.apply(lambda row: row['planned_start_time'].date(), axis=1)
selected_visits['real_start_time'] \
= selected_visits.apply(lambda row: row['planned_start_time'].time(), axis=1)
color_map = matplotlib.pyplot.get_cmap('tab20')
clusters = selected_visits['cluster'].unique()
fig, ax = matplotlib.pyplot.subplots(figsize=(5, 8))
for cluster in clusters:
cluster_frame = selected_visits[selected_visits['cluster'] == cluster]
ax.plot_date(cluster_frame['planned_start_date'],
cluster_frame['real_start_time'],
c=color_map.colors[cluster], fmt='s', markersize=3)
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(PandasTimeDeltaConverter('ns')))
ax.set_yticks(numpy.arange(0, 24 * matplotlib.dates.SEC_PER_HOUR + 1, 2 * matplotlib.dates.SEC_PER_HOUR))
ax.set_ylim(bottom=0, top=24 * matplotlib.dates.SEC_PER_HOUR)
matplotlib.pyplot.xticks(rotation=70)
ax.set_xlabel('Date')
ax.set_ylabel('Check In')
matplotlib.pyplot.tight_layout()
return fig, ax
def cluster_command(args):
data_set_file = getattr(args, 'data_set_file')
output_file = getattr(args, 'output')
table = getattr(args, 'table', None)
frame = pandas.read_hdf(data_set_file)
if not output_file:
file_name = os.path.basename(data_set_file)
data_set_file_no_ext, __ = os.path.splitext(file_name)
output_file_name = 'clusters_' + data_set_file_no_ext + '.hdf'
output_file = os.path.join(os.path.dirname(data_set_file), output_file_name)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '', tqdm.TqdmSynchronisationWarning)
with tqdm.tqdm(frame.itertuples(), desc='Loading the data set', leave=False) as reader:
visits = rows.forecast.visit.Visit.load_from_tuples(reader)
visits_to_use = rows.forecast.visit.filter_incorrect_visits(visits)
visits_to_use.sort(key=lambda v: v.client_id)
visit_groups = {client_id: list(visit_group)
for client_id, visit_group in itertools.groupby(visits_to_use, lambda v: v.client_id)}
cluster_frame = frame.copy()
cluster_frame['cluster'] = 0
def cluster(visit_group):
model = rows.forecast.cluster.AgglomerativeModel(rows.forecast.cluster.NoSameDayPlannedStarDurationDistanceMatrix())
return model.cluster(visit_group)
records = []
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '', tqdm.TqdmSynchronisationWarning)
with tqdm.tqdm(desc='Computing clusters', total=len(visit_groups), leave=False) as cluster_progress_bar:
with concurrent.futures.ThreadPoolExecutor() as executor:
futures_list = [executor.submit(cluster, visit_groups[visit_group]) for visit_group in visit_groups]
for f in concurrent.futures.as_completed(futures_list):
try:
customer_clusters = f.result()
if customer_clusters:
for label, clustered_visits in customer_clusters.items():
for visit in clustered_visits:
record = visit.to_list()
record.append(label)
records.append(record)
cluster_progress_bar.update(1)
except:
logging.exception('Exception in processing results')
record_columns = list(rows.forecast.Visit.columns())
record_columns.append('cluster')
data_frame = | pandas.DataFrame.from_records(records, columns=record_columns) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
import h5py
import geopandas as gp
import os
import datetime
import dask
import dask.dataframe as dd
from tqdm import tqdm
def latlon_iter(latdf, londf, valdf, date):
out_df = pd.concat([latdf, londf, valdf], axis = 1, keys = ['lat', 'lon', 'sst']).stack().reset_index().drop(columns = ['level_0', 'level_1'])
out_df['date'] = date
return out_df
def latlon_iter_chlorpar(latdf, londf, valdf1, valdf2, date):
out_df = pd.concat([latdf, londf, valdf1, valdf2], axis = 1, keys = ['lat', 'lon', 'chlor', 'par']).stack().reset_index().drop(columns = ['level_0', 'level_1'])
out_df['date'] = date
return out_df
# @dask.delayed
def get_outdf(h5file):
ds = h5py.File(h5file, 'r')
date = datetime.datetime.strptime(ds.attrs['time_coverage_start'].decode("utf-8"), "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%Y-%m-%d")
if 'L2_LAC_OC' in h5file:
#get chlor and par
lat_df = pd.DataFrame(ds['navigation_data']['latitude'])
lon_df = pd.DataFrame(ds['navigation_data']['longitude'])
chlor_df = pd.DataFrame(ds['geophysical_data']['chlor_a'])
par_df = | pd.DataFrame(ds['geophysical_data']['par']) | pandas.DataFrame |
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk import tokenize
import nltk
import pandas as pd
import json
from nltk.stem.snowball import SnowballStemmer
import itertools
from scipy.cluster.hierarchy import ward, dendrogram
import matplotlib.pyplot as plt
import random
from wordcloud import WordCloud
import nltk
import numpy as np
class sentanceanalyser:
"""docstring for ClassName"""
def __init__(self):
pass
def sentance_sentiment(self,text):
nltk.download('vader_lexicon')
cl_text=tokenize.sent_tokenize(text)
sid = SentimentIntensityAnalyzer()
counter = 0
sentiment =pd.DataFrame(columns=('Sentance','Positive', 'Negative', 'Neutral', 'Compound'))
sentiment12=[]
for sentence1 in range(len(cl_text)):
sentence=cl_text[sentence1]
article_compound = 0
article_neg = 0
article_pos = 0
article_neu = 0
counter = counter + 1
ss = sid.polarity_scores(sentence)
article_compound = article_compound + ss['compound']
article_compound=round(article_compound,2)
article_neg = article_neg + ss['neg']
article_neg=round(article_neg,2)
article_pos = article_pos + ss['pos']
article_pos=round(article_pos,2)
article_neu = article_neu + ss['neu']
article_neu=round(article_neg,2)
article_sentiment = | pd.DataFrame([[sentence,article_pos, article_neg, article_neu, article_compound]], columns=('Sentance','Positive', 'Negative', 'Neutral', 'Compound')) | pandas.DataFrame |
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
from cohort import CohortTable
# Common functions
color = '#800000ff'
def bar_chart(df_melted, y_axis, title):
chart = alt.Chart(df_melted).mark_bar(color=color, size=40).encode(
x = alt.X('Year:Q', axis=alt.Axis(tickCount=forecast_period), sort=list(df_melted.index)),
y = alt.Y(y_axis),
tooltip = [alt.Tooltip(y_axis, format=',.0f')]
).properties(title=title, width=alt.Step(60), height=400).interactive()
return chart
def line_chart(df_melted, y_axis, title):
nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['Year'], empty='none')
selectors = alt.Chart(df_melted).mark_point().encode(
x='Year:Q',
opacity=alt.value(0),
).add_selection(nearest)
line = alt.Chart(df_melted).mark_line(color=color).encode(
x = alt.X('Year:Q', axis=alt.Axis(tickCount=forecast_period), sort=list(df_melted.index)),
y = alt.Y(y_axis),
tooltip = [alt.Tooltip(y_axis, format=',.0%')]
)
points = line.mark_point(color=color, size=40).encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
chart = alt.layer(line, selectors, points).properties(title=title, width=alt.Step(60), height=400).interactive()
return chart
# Site
st.title('Cohort Tables with Productivity Ramp Up')
st.sidebar.title('Variables')
forecast_period = st.sidebar.slider('Forecast Period', 3, 15, 5)
n_years = st.sidebar.slider('Productivity Ramp Up Period', 1, 10, 3)
ramp_type = st.sidebar.selectbox('Ramp Up Type', ['Linear', 'Sigmoid']).lower()
if ramp_type == 'sigmoid':
beta = st.sidebar.slider('Beta for S Curve', .1, 1.0, value=.3, step=.1)
shift = st.sidebar.slider('Shift for S Curve', -10, 10, value=3, step=1)
x = np.linspace(-10,10,50)
source = pd.DataFrame({
'Time Passed to Full Productivity' : ((x+10)*5)/100,
'% of Productivity' : 1 / (1 + np.exp(beta*(-x-shift)))
})
s_chart = alt.Chart(source).mark_line().encode(
alt.X('Time Passed to Full Productivity', axis=alt.Axis(format='.0%')),
alt.Y('% of Productivity', axis=alt.Axis(format='.0%')),
tooltip=[alt.Tooltip('Time Passed to Full Productivity', format='.0%'), alt.Tooltip('% of Productivity', format='.0%')]
)
st.sidebar.altair_chart(s_chart, use_container_width=True)
else:
beta=.3
shift=3
hires_per_year_string = st.sidebar.text_input('Number of hires per year, seperated by commas', value='10, 12, 15, 18, 20')
revenue_goal = st.sidebar.number_input('Revenue Goal per Individual', min_value=0, format='%i')
annual_attrition=st.sidebar.number_input('Annual Attrition Rate', min_value=0.00, max_value=1.00, value=.10, step=.01, format='%f')
first_year_full_hire = st.sidebar.checkbox('First Year Full Hire?', value=True)
attrition_y0 = st.sidebar.checkbox('Attrition in First Year?', value=False)
try:
hires_per_year = hires_per_year_string.split(",")
hires_per_year = [float(i) for i in hires_per_year]
except ValueError:
st.error('The hires per year variable has been entered incorrectly. Please enter a series of numbers seperated by commas')
st.stop()
# Model
T = CohortTable(forecast_period=forecast_period, n_years=n_years, ramp_type=ramp_type, beta=beta, shift=shift, hires_per_year=hires_per_year, \
revenue_goal=revenue_goal, annual_attrition=annual_attrition, \
first_year_full_hire=first_year_full_hire, attrition_y0=attrition_y0)
# Sitewide variables
columns_years = [f'Year {i+1}' for i in range(forecast_period)]
columns_Q = [i+1 for i in range(forecast_period)]
rows_cohorts = [f'Cohort {i+1}' for i in range(forecast_period)]
# Main Page
st.write('Click to expand each of the sections below')
with st.beta_expander("Variables and Assumptions"):
st.write('##### General')
st.write('Forecast Period = ', '{:.0f} years'.format(forecast_period))
st.write('Productivity Ramp = ', '{:.0f} years'.format(n_years))
st.write('Revenue Goal per Employee = ', '${:,.0f}'.format(revenue_goal))
st.write('Annual Attrition Rate = ', '{:.0%}'.format(annual_attrition))
if attrition_y0:
st.write('This model assumes that there **is** attrition in the first year.')
else:
st.write('This model **does not** assume attrition in the first year')
if first_year_full_hire:
st.write('This model assumes that employees in the first year are hired at the **beginning** of the year.')
else:
st.write('This model assumes that employees in the first year are hired **throughout** the year, not at the beginning.')
st.write('##### Hires per Year')
st.write( | pd.DataFrame([hires_per_year], columns=columns_years, index=['No. Hires']) | pandas.DataFrame |
import argparse
import os
import seaborn as sns
import pandas as pd
import glob
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import numpy as np
def plot_primary(args):
"""
Plots the jointplot of model performance (predicted v. true) on held out
test or validation sets from the primary Stokes Cell 2020 dataset.
"""
for method in args.methods:
files = glob.glob(os.path.join(args.preds_path,f"{method}/*.csv"))
for fname in files:
df = pd.read_csv(fname)
smiles = df['smiles']
preds = df['mean_inhibition']
targets = df['true_mean_inhibition']
# rmse = np.sqrt(np.mean((df['true_target'] - df['mean_inhibition'])**2))
g = sns.jointplot(x='mean_inhibition', y='true_mean_inhibition', data=df, kind='reg')
g.ax_joint.set_xlim(0,1.4)
g.ax_joint.set_ylim(0,1.4)
g.ax_joint.plot([0, 1.4], [0, 1.4], 'k--')
save_name = fname.split('.csv')[0]
save_path = save_name + str(args.use_stds) + '_performance' + args.ext
plt.savefig(save_path)
plt.show()
plt.close()
def plot_broad(args):
"""
Plots the jointplot of model performance (predicted v. true);
jointplot of predictions v. uncertainty; as well as uncertainty cutoff v.
experimentally validated hit rate, on Broad repurposing hub data.
"""
for method in args.methods:
files = glob.glob(os.path.join(args.preds_path,f"{method}/broad/*.csv"))
df_percentiles = | pd.DataFrame() | pandas.DataFrame |
import os
import sys
import glob
import numpy as np
import pandas as pd
import shutil
import itertools
import random
import multiprocessing
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
scaler = StandardScaler()
fil = sys.argv[1]
num_splits = int(sys.argv[2])
num_repeats = int(sys.argv[3])
features = pd.read_csv(fil)
features = features.drop([features.columns[0]], axis=1)
labels = features[features.columns[-1]]
features = features.drop([features.columns[-1]], axis=1)
features = scaler.fit_transform(features)
model = KNeighborsClassifier()
params = {
'n_neighbors': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],
'leaf_size': [2,3,4,5,6,7,8,9,10,20,30,40,50],
'p': [1,2,3,4,5],
'weights':['uniform', 'distance'],
'algorithm':['auto']}
cv = RepeatedStratifiedKFold(n_splits=num_splits, n_repeats=num_repeats, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=params, n_jobs=-1, cv=cv, scoring='accuracy', error_score=0, return_train_score=True)
grid_result = grid_search.fit(features, labels)
df = | pd.DataFrame(grid_search.cv_results_) | pandas.DataFrame |
import pandas as pd
import datetime as dt
from typing import Dict
from typing import List
from src.typeDefs.pxiDamRecord import IPxiDamDataRecord
def getPxiDamData(targetFilePath: str) -> List[IPxiDamDataRecord]:
dataSheetDf = | pd.read_csv(targetFilePath) | pandas.read_csv |
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Shows how to use finmarketpy to create total return indices for FX forwards with appropriate roll rules
"""
import pandas as pd
# For plotting
from chartpy import Chart, Style
# For loading market data
from findatapy.market import Market, MarketDataGenerator, MarketDataRequest
from findatapy.timeseries import Calculations
from findatapy.util.loggermanager import LoggerManager
logger = LoggerManager().getLogger(__name__)
chart = Chart(engine='plotly')
market = Market(market_data_generator=MarketDataGenerator())
calculations = Calculations()
# Choose run_example = 0 for everything
# run_example = 1 - creating USDTRY total return index rolling forwards and compare with BBG indices
# run_example = 2 - creating AUDJPY (via AUDUSD and JPYUSD) total return index rolling forwards & compare with BBG indices
run_example = 0
from finmarketpy.curve.fxforwardscurve import FXForwardsCurve
###### Create total return indices plot for USDBRL using forwards
# We shall be using USDBRL 1M forward contracts and rolling them 5 business days before month end
if run_example == 1 or run_example == 0:
cross = 'USDBRL'
# Download more tenors
fx_forwards_tenors = ['1W', '1M', '2M', '3M']
# Get AUDUSD data for spot, forwards + depos
md_request = MarketDataRequest(start_date='02 Jan 2007', finish_date='01 Jun 2007',
data_source='bloomberg', cut='NYC', category='fx-forwards-market',
tickers=cross,
fx_forwards_tenor=fx_forwards_tenors,
base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return')
# In case any missing values fill down (particularly can get this for NDFs)
df_market = market.fetch_market(md_request=md_request).fillna(method='ffill')
fx_forwards_curve = FXForwardsCurve()
# Let's trade a 1M forward, and we roll 5 business days (based on both base + terms currency holidays)
# before month end
df_cuemacro_tot_1M = fx_forwards_curve.construct_total_return_index(cross, df_market,
fx_forwards_trading_tenor='1M',
roll_days_before=5,
roll_event='month-end',
roll_months=1,
fx_forwards_tenor_for_interpolation=fx_forwards_tenors,
output_calculation_fields=True)
df_cuemacro_tot_1M.columns = [x.replace('forward-tot', 'forward-tot-1M-cuemacro') for x in df_cuemacro_tot_1M.columns]
# Now do a 3M forward, and we roll 5 business days before end of quarter(based on both base + terms currency holidays)
# before month end
df_cuemacro_tot_3M = fx_forwards_curve.construct_total_return_index(cross, df_market,
fx_forwards_trading_tenor='3M',
roll_days_before=5,
roll_event='month-end',
roll_months=3,
fx_forwards_tenor_for_interpolation=fx_forwards_tenors,
output_calculation_fields=True)
df_cuemacro_tot_3M.columns = [x.replace('forward-tot', 'forward-tot-3M-cuemacro') for x in df_cuemacro_tot_3M.columns]
# Get spot data
md_request.abstract_curve = None
md_request.category = 'fx'
df_spot = market.fetch_market(md_request=md_request)
df_spot.columns = [x + '-spot' for x in df_spot.columns]
# Get Bloomberg calculated total return indices (for spot)
md_request.category = 'fx-tot'
df_bbg_tot = market.fetch_market(md_request)
df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns]
# Get Bloomberg calculated total return indices (for 1M forwards rolled)
md_request.category = 'fx-tot-forwards'
df_bbg_tot_forwards = market.fetch_market(md_request)
df_bbg_tot_forwards.columns = [x + '-bbg' for x in df_bbg_tot_forwards.columns]
# Combine into a single data frame and plot, we note that the Cuemacro constructed indices track the Bloomberg
# indices relatively well (both from spot and forwards). Also note the large difference with spot indices
# CAREFUL to fill down, before reindexing because forwards indices are likely to have different publishing dates
df = calculations.pandas_outer_join([pd.DataFrame(df_cuemacro_tot_1M[cross + '-forward-tot-1M-cuemacro.close']),
pd.DataFrame(df_cuemacro_tot_3M[cross + '-forward-tot-3M-cuemacro.close']),
df_bbg_tot, df_spot, df_bbg_tot_forwards]).fillna(method='ffill')
df = calculations.create_mult_index_from_prices(df)
chart.plot(df)
###### Create total return indices plot for AUDJPY using the underlying USD legs (ie. AUDUSD & JPYUSD)
if run_example == 2 or run_example == 0:
cross = 'AUDJPY'
# Download more tenors
fx_forwards_tenors = ['1W', '1M', '2M', '3M']
# Parameters for how to construct total return indices, and the rolling rule
# 1M forward contract, and roll it 5 working days before month end
# We'll be constructing our total return index from AUDUSD and JPYUSD
fx_forwards_curve = FXForwardsCurve(fx_forwards_trading_tenor='1M',
roll_days_before=5,
roll_event='month-end', construct_via_currency='USD',
fx_forwards_tenor_for_interpolation=fx_forwards_tenors,
roll_months=1,
output_calculation_fields=True)
# Get AUDUSD data for spot, forwards + depos and also construct the total returns forward index
md_request = MarketDataRequest(start_date='02 Jan 2007', finish_date='01 Jun 2007',
data_source='bloomberg', cut='NYC', category='fx',
tickers=cross,
fx_forwards_tenor=fx_forwards_tenors,
base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return',
abstract_curve=fx_forwards_curve)
# In case any missing values fill down (particularly can get this for NDFs)
df_cuemacro_tot_1M = market.fetch_market(md_request=md_request).fillna(method='ffill')
fx_forwards_curve = FXForwardsCurve()
df_cuemacro_tot_1M.columns = [x.replace('forward-tot', 'forward-tot-1M-cuemacro') for x in df_cuemacro_tot_1M.columns]
# Get spot data
md_request.abstract_curve = None
md_request.category = 'fx'
df_spot = market.fetch_market(md_request=md_request)
df_spot.columns = [x + '-spot' for x in df_spot.columns]
# Get Bloomberg calculated total return indices (for spot)
md_request.category = 'fx-tot'
df_bbg_tot = market.fetch_market(md_request)
df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns]
# Get Bloomberg calculated total return indices (for 1M forwards rolled)
md_request.category = 'fx-tot-forwards'
df_bbg_tot_forwards = market.fetch_market(md_request)
df_bbg_tot_forwards.columns = [x + '-bbg' for x in df_bbg_tot_forwards.columns]
# Combine into a single data frame and plot, we note that the Cuemacro constructed indices track the Bloomberg
# indices relatively well (both from spot and forwards). Also note the large difference with spot indices
# CAREFUL to fill down, before reindexing because forwards indices are likely to have different publishing dates
df = calculations.pandas_outer_join([ | pd.DataFrame(df_cuemacro_tot_1M[cross + '-forward-tot-1M-cuemacro.close']) | pandas.DataFrame |
import re
from copy import deepcopy
from contextlib import suppress
from collections.abc import Iterable
import numpy as np
import pandas as pd
__all__ = ['aes']
all_aesthetics = {
'alpha', 'angle', 'color', 'colour', 'fill', 'group', 'intercept',
'label', 'lineheight', 'linetype', 'lower', 'middle', 'radius',
'sample', 'shape', 'size', 'slope', 'stroke', 'upper', 'weight', 'x',
'xend', 'xintercept', 'xmax', 'xmin', 'y', 'yend', 'yintercept',
'ymax', 'ymin'}
scaled_aesthetics = {
'x', 'y', 'alpha', 'color', 'colour', 'fill',
'linetype', 'shape', 'size', 'stroke'
}
NO_GROUP = -1
# Calculated aesthetics searchers
STAT_RE = re.compile(r'\bstat\(')
DOTS_RE = re.compile(r'\.\.([a-zA-Z0-9_]+)\.\.')
class aes(dict):
"""
Create aesthetic mappings
Parameters
----------
x : expression | array_like | scalar
x aesthetic mapping
y : expression | array_like | scalar
y aesthetic mapping
**kwargs : dict
Other aesthetic mappings
Notes
-----
Only the **x** and **y** aesthetic mappings can be specified as
positional arguments. All the rest must be keyword arguments.
The value of each mapping must be one of:
- **string**::
import pandas as pd
import numpy as np
arr = [11, 12, 13]
df = pd.DataFrame({'alpha': [1, 2, 3],
'beta': [1, 2, 3],
'gam ma': [1, 2, 3]})
# Refer to a column in a dataframe
ggplot(df, aes(x='alpha', y='beta'))
- **array_like**::
# A variable
ggplot(df, aes(x='alpha', y=arr))
# or an inplace list
ggplot(df, aes(x='alpha', y=[4, 5, 6]))
- **scalar**::
# A scalar value/variable
ggplot(df, aes(x='alpha', y=4))
# The above statement is equivalent to
ggplot(df, aes(x='alpha', y=[4, 4, 4]))
- **String expression**::
ggplot(df, aes(x='alpha', y='2*beta'))
ggplot(df, aes(x='alpha', y='np.sin(beta)'))
ggplot(df, aes(x='df.index', y='beta'))
# If `count` is an aesthetic calculated by a stat
ggplot(df, aes(x='alpha', y='stat(count)'))
ggplot(df, aes(x='alpha', y='stat(count/np.max(count))'))
The strings in the expression can refer to;
1. columns in the dataframe
2. variables in the namespace
3. aesthetic values (columns) calculated by the ``stat``
with the column names having precedence over the variables.
For expressions, columns in the dataframe that are mapped to
must have names that would be valid python variable names.
This is okay::
# 'gam ma' is a column in the dataframe
ggplot(df, aes(x='df.index', y='gam ma'))
While this is not::
# 'gam ma' is a column in the dataframe, but not
# valid python variable name
ggplot(df, aes(x='df.index', y='np.sin(gam ma)'))
``aes`` has 2 internal methods you can use to transform variables being
mapped.
1. ``factor`` - This function turns the variable into a factor.
It is just an alias to ``pd.Caterogical``::
ggplot(mtcars, aes(x='factor(cyl)')) + geom_bar()
2. ``reorder`` - This function changes the order of first variable
based on values of the second variable::
df = pd.DataFrame({
'x': ['b', 'd', 'c', 'a'],
'y': [1, 2, 3, 4]
})
ggplot(df, aes('reorder(x, y)', 'y')) + geom_col()
.. rubric:: The group aesthetic
``group`` is a special aesthetic that the user can *map* to.
It is used to group the plotted items. If not specified, it
is automatically computed and in most cases the computed
groups are sufficient. However, there may be cases were it is
handy to map to it.
"""
def __init__(self, *args, **kwargs):
kwargs = rename_aesthetics(kwargs)
kwargs.update(zip(('x', 'y'), args))
self.update(kwargs)
def __deepcopy__(self, memo):
"""
Deep copy without copying the environment
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
# Just copy the keys and point to the env
for key, item in self.items():
result[key] = deepcopy(self[key], memo)
return result
def __radd__(self, gg, inplace=False):
gg = gg if inplace else deepcopy(gg)
self = deepcopy(self)
gg.mapping.update(self)
gg.labels.update(make_labels(self))
return gg
def rename_aesthetics(obj):
"""
Rename aesthetics in obj
Parameters
----------
obj : dict or list
Object that contains aesthetics names
Returns
-------
obj : dict or list
Object that contains aesthetics names
"""
if isinstance(obj, dict):
for name in tuple(obj.keys()):
new_name = name.replace('colour', 'color')
if name != new_name:
obj[new_name] = obj.pop(name)
else:
obj = [name.replace('colour', 'color') for name in obj]
return obj
def get_calculated_aes(aesthetics):
"""
Return a list of the aesthetics that are calculated
"""
calculated_aesthetics = []
for name, value in aesthetics.items():
if is_calculated_aes(value):
calculated_aesthetics.append(name)
return calculated_aesthetics
def is_calculated_aes(ae):
"""
Return a True if of the aesthetics that are calculated
Parameters
----------
ae : object
Aesthetic mapping
>>> is_calculated_aes('density')
False
>>> is_calculated_aes(4)
False
>>> is_calculated_aes('..density..')
True
>>> is_calculated_aes('stat(density)')
True
>>> is_calculated_aes('stat(100*density)')
True
>>> is_calculated_aes('100*stat(density)')
True
"""
if not isinstance(ae, str):
return False
for pattern in (STAT_RE, DOTS_RE):
if pattern.search(ae):
return True
return False
def stat(x):
"""
Return calculated aesthetic
Aesthetics wrapped around the stat function evaluated
*after* the statistics have been calculated. This gives
the user a chance to use any aesthetic columns created
by the statistic.
Paremeters
----------
x : object
An expression
"""
return x
def strip_stat(value):
"""
Remove stat function that mark calculated aesthetics
Parameters
----------
value : object
Aesthetic value. In most cases this will be a string
but other types will pass through unmodified.
Return
------
out : object
Aesthetic value with the dots removed.
>>> strip_stat('stat(density + stat(count))')
density + count
>>> strip_stat('stat(density) + 5')
density + 5
>>> strip_stat('5 + stat(func(density))')
5 + func(density)
>>> strip_stat('stat(func(density) + var1)')
func(density) + var1
>>> strip_stat('stat + var1')
stat + var1
>>> strip_stat(4)
4
"""
def strip_hanging_closing_parens(s):
"""
Remove leftover parens
"""
# Use and integer stack to track parens
# and ignore leftover closing parens
stack = 0
idx = []
for i, c in enumerate(s):
if c == '(':
stack += 1
elif c == ')':
stack -= 1
if stack < 0:
idx.append(i)
stack = 0
continue
yield c
with suppress(TypeError):
if STAT_RE.search(value):
value = re.sub(r'\bstat\(', '', value)
value = ''.join(strip_hanging_closing_parens(value))
return value
def strip_dots(value):
"""
Remove dots(if any) that mark calculated aesthetics
Parameters
----------
value : object
Aesthetic value. In most cases this will be a string
but other types will pass through unmodified.
Return
------
out : object
Aesthetic value with the dots removed.
"""
with suppress(TypeError):
value = DOTS_RE.sub(r'\1', value)
return value
def strip_calculated_markers(value):
"""
Remove markers for calculated aesthetics
Parameters
----------
value : object
Aesthetic value. In most cases this will be a string
but other types will pass through unmodified.
Return
------
out : object
Aesthetic value with the dots removed.
"""
return strip_stat(strip_dots(value))
def aes_to_scale(var):
"""
Look up the scale that should be used for a given aesthetic
"""
if var in {'x', 'xmin', 'xmax', 'xend', 'xintercept'}:
var = 'x'
elif var in {'y', 'ymin', 'ymax', 'yend', 'yintercept'}:
var = 'y'
return var
def is_position_aes(vars_):
"""
Figure out if an aesthetic is a position aesthetic or not
"""
try:
return all([aes_to_scale(v) in {'x', 'y'} for v in vars_])
except TypeError:
return aes_to_scale(vars_) in {'x', 'y'}
def make_labels(mapping):
"""
Convert aesthetic mapping into text labels
"""
def _make_label(ae, label):
if isinstance(label, pd.Series):
return label.name
# if label is a scalar
elif not isinstance(label, Iterable) or isinstance(label, str):
return strip_calculated_markers(str(label))
else:
return None
return {
ae: _make_label(ae, label)
for ae, label in mapping.items()
}
def is_valid_aesthetic(value, ae):
"""
Return True if `value` looks valid.
Parameters
----------
value : object
Value to check
ae : str
Aesthetic name
Returns
-------
out : bool
Whether the value is of a valid looking form.
Notes
-----
There are no guarantees that he value is spot on
valid.
"""
if ae == 'linetype':
named = {'solid', 'dashed', 'dashdot', 'dotted',
'_', '--', '-.', ':', 'None', ' ', ''}
if value in named:
return True
# tuple of the form (offset, (on, off, on, off, ...))
# e.g (0, (1, 2))
conditions = [isinstance(value, tuple),
isinstance(value[0], int),
isinstance(value[1], tuple),
len(value[1]) % 2 == 0,
all(isinstance(x, int) for x in value[1])]
if all(conditions):
return True
return False
elif ae == 'shape':
if isinstance(value, str):
return True
# tuple of the form (numsides, style, angle)
# where style is in the range [0, 3]
# e.g (4, 1, 45)
conditions = [isinstance(value, tuple),
all(isinstance(x, int) for x in value),
0 <= value[1] < 3]
if all(conditions):
return True
return False
elif ae in {'color', 'fill'}:
if isinstance(value, str):
return True
with suppress(TypeError):
if (isinstance(value, (tuple, list)) and
all(0 <= x <= 1 for x in value)):
return True
return False
# For any other aesthetics we return False to allow
# for special cases to be discovered and then coded
# for appropriately.
return False
def has_groups(data):
"""
Check if data is grouped
Parameters
----------
data : dataframe
Data
Returns
-------
out : bool
If True, the data has groups.
"""
# If any row in the group column is equal to NO_GROUP, then
# the data all of them are and the data has no groups
return data.loc[0, 'group'] != NO_GROUP
def reorder(x, y, fun=np.median, ascending=True):
"""
Reorder categorical by sorting along another variable
It is the order of the categories that changes. Values in x
are grouped by categories and summarised to determine the
new order.
Credit: Copied from plydata
Parameters
----------
x : list-like
Values that will make up the categorical.
y : list-like
Values by which ``c`` will be ordered.
fun : callable
Summarising function to ``x`` for each category in ``c``.
Default is the *median*.
ascending : bool
If ``True``, the ``c`` is ordered in ascending order of ``x``.
Examples
--------
>>> c = list('abbccc')
>>> x = [11, 2, 2, 3, 33, 3]
>>> cat_reorder(c, x)
[a, b, b, c, c, c]
Categories (3, object): [b, c, a]
>>> cat_reorder(c, x, fun=max)
[a, b, b, c, c, c]
Categories (3, object): [b, a, c]
>>> cat_reorder(c, x, fun=max, ascending=False)
[a, b, b, c, c, c]
Categories (3, object): [c, a, b]
>>> c_ordered = pd.Categorical(c, ordered=True)
>>> cat_reorder(c_ordered, x)
[a, b, b, c, c, c]
Categories (3, object): [b < c < a]
>>> cat_reorder(c + ['d'], x)
Traceback (most recent call last):
...
ValueError: Lengths are not equal. len(c) is 7 and len(x) is 6.
"""
if len(x) != len(y):
raise ValueError(
"Lengths are not equal. len(x) is {} and len(x) is {}.".format(
len(x), len(y)
)
)
summary = ( | pd.Series(y) | pandas.Series |
import datetime
import matplotlib
import numpy as np
import pandas as pd
import pytz
from finrl.config import config
from finrl.marketdata.utils import fetch_and_store, load
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import calculate_split, data_split
from finrl.env.env_stocktrading import StockTradingEnv
from finrl.model.models import DRLAgent
from finrl.trade.backtest import backtest_stats, backtest_plot
matplotlib.use("Agg")
def train_one(fetch=False):
"""
train an agent
"""
if fetch:
df = fetch_and_store()
else:
df = load()
counts = df[['date', 'tic']].groupby(['date']).count().tic
assert counts.min() == counts.max()
print("==============Start Feature Engineering===========")
fe = FeatureEngineer(
use_technical_indicator=True,
tech_indicator_list=config.TECHNICAL_INDICATORS_LIST,
use_turbulence=True,
# use_turbulence=False,
user_defined_feature=False,
)
processed = fe.preprocess_data(df)
# Training & Trading data split
start_date, trade_date, end_date = calculate_split(df, start=config.START_DATE)
print(start_date, trade_date, end_date)
train = data_split(processed, start_date, trade_date)
trade = data_split(processed, trade_date, end_date)
print(f'\n******\nRunning from {start_date} to {end_date} for:\n{", ".join(config.CRYPTO_TICKER)}\n******\n')
# calculate state action space
stock_dimension = len(train.tic.unique())
state_space = (1 + (2 * stock_dimension) + (len(config.TECHNICAL_INDICATORS_LIST) * stock_dimension))
env_kwargs = {
"hmax": 100,
"initial_amount": 100000,
"buy_cost_pct": 0.0026,
"sell_cost_pct": 0.0026,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
"action_space": stock_dimension,
"reward_scaling": 1e-4
}
e_train_gym = StockTradingEnv(df=train, **env_kwargs)
e_trade_gym = StockTradingEnv(df=trade, turbulence_threshold=250, make_plots=True, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
env_trade, obs_trade = e_trade_gym.get_sb_env()
agent = DRLAgent(env=env_train)
print("==============Model Training===========")
now = datetime.datetime.now().strftime(config.DATETIME_FMT)
model_sac = agent.get_model("sac")
trained_sac = agent.train_model(
model=model_sac,
tb_log_name="sac",
# total_timesteps=100
total_timesteps=80000
)
print("==============Start Trading===========")
df_account_value, df_actions = DRLAgent.DRL_prediction(
# model=trained_sac, test_data=trade, test_env=env_trade, test_obs=obs_trade
trained_sac,
e_trade_gym)
df_account_value.to_csv(f"./{config.RESULTS_DIR}/df_account_value_{now}.csv")
df_actions.to_csv(f"./{config.RESULTS_DIR}/df_actions_{now}.csv")
df_txns = pd.DataFrame(e_trade_gym.transactions, columns=['date', 'amount', 'price', 'symbol'])
df_txns = df_txns.set_index(pd.DatetimeIndex(df_txns['date'], tz=pytz.utc))
df_txns.to_csv(f'./{config.RESULTS_DIR}/df_txns_{now}.csv')
df_positions = | pd.DataFrame(e_trade_gym.positions, columns=['date', 'cash'] + config.CRYPTO_TICKER) | pandas.DataFrame |
import os
import pandas as pd
import csv
from sklearn.model_selection import train_test_split
import numpy as np
import random
import tensorflow as tf
import torch
#directory of tasks dataset
os.chdir("original_data")
#destination path to create tsv files, dipends on data cutting
path_0 = "mttransformer/data/0"
path_100_no_gan = "mttransformer/data/100/no_gan"
path_200_no_gan = "mttransformer/data/200/no_gan"
path_500_no_gan = "mttransformer/data/500/no_gan"
path_100_gan = "mttransformer/data/100/gan"
path_200_gan = "mttransformer/data/200/gan"
path_500_gan = "mttransformer/data/500/gan"
#if you use a model with gan the flag "apply_gan" is True, else False
apply_gan=False
#data cutting
number_labeled_examples=0 #0-100-200-500
#if you want activate balancing, that is used only in the model Multi-task, MT-DNN and MT-GANBERT
balancing=False
#path train and test dataset of the task
tsv_haspeede_train = 'haspeede_TW-train.tsv'
tsv_haspeede_test = 'haspeede_TW-reference.tsv'
tsv_AMI2018_train = 'AMI2018_it_training.tsv'
tsv_AMI2018_test = 'AMI2018_it_testing.tsv'
tsv_AMI2018_train = 'AMI2018_it_training.tsv'
tsv_AMI2018_test = 'AMI2018_it_testing.tsv'
tsv_DANKMEMES2020_train = 'dankmemes_task2_train.csv'
tsv_DANKMEMES2020_test = 'hate_test.csv'
tsv_SENTIPOLC2016_train = 'training_set_sentipolc16.csv'
tsv_SENTIPOLC2016_test = 'test_set_sentipolc16_gold2000.csv'
tsv_SENTIPOLC2016_train = 'training_set_sentipolc16.csv'
tsv_SENTIPOLC2016_test = 'test_set_sentipolc16_gold2000.csv'
#Upload the dataset of all task as dataframes
#haspeede_TW
df_train = pd.read_csv(tsv_haspeede_train, delimiter='\t', names=('id','sentence','label'))
df_train = df_train[['id']+['label']+['sentence']]
df_test = pd.read_csv(tsv_haspeede_test, delimiter='\t', names=('id','sentence','label'))
df_test = df_test[['id']+['label']+['sentence']]
#AMI2018A
df_train2 = pd.read_csv(tsv_AMI2018_train, delimiter='\t')
df_train2 = df_train2[['id']+['misogynous']+['text']]
df_test2 = pd.read_csv(tsv_AMI2018_test, delimiter='\t')
df_test2 = df_test2[['id']+['misogynous']+['text']]
#AMI2018B
df_train3 = pd.read_csv(tsv_AMI2018_train, delimiter='\t')
df = pd.DataFrame(columns=['id', 'misogyny_category', 'text'])
for ind in df_train3.index:
if df_train3.misogynous[ind]==1:
if df_train3.misogyny_category[ind] == 'stereotype':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 0, 'text' : df_train3['text'][ind] }, ignore_index=True)
#elif df_train3.misogyny_category[ind] == 'dominance':
#df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 1, 'text' : df_train3['text'][ind] }, ignore_index=True)
#elif df_train3.misogyny_category[ind] == 'derailing':
#df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 2, 'text' : df_train3['text'][ind] }, ignore_index=True)
elif df_train3.misogyny_category[ind] == 'sexual_harassment':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 1, 'text' : df_train3['text'][ind] }, ignore_index=True)
elif df_train3.misogyny_category[ind] == 'discredit':
df = df.append({'id' : df_train3['id'][ind], 'misogyny_category' : 2, 'text' : df_train3['text'][ind] }, ignore_index=True)
df_train3 = df
df_test3 = pd.read_csv(tsv_AMI2018_test, delimiter='\t')
df = pd.DataFrame(columns=['id', 'misogyny_category', 'text'])
for ind in df_test3.index:
if df_test3.misogynous[ind]==1:
if df_test3.misogyny_category[ind] == 'stereotype':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 0, 'text' : df_test3['text'][ind] }, ignore_index=True)
#elif df_test3.misogyny_category[ind] == 'dominance':
#df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 1, 'text' : df_test3['text'][ind] }, ignore_index=True)
#elif df_test3.misogyny_category[ind] == 'derailing':
#df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 2, 'text' : df_test3['text'][ind] }, ignore_index=True)
elif df_test3.misogyny_category[ind] == 'sexual_harassment':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 1, 'text' : df_test3['text'][ind] }, ignore_index=True)
elif df_test3.misogyny_category[ind] == 'discredit':
df = df.append({'id' : df_test3['id'][ind], 'misogyny_category' : 2, 'text' : df_test3['text'][ind] }, ignore_index=True)
df_test3 = df
#DANKMEMES2020
df_train4 = pd.read_csv(tsv_DANKMEMES2020_train, delimiter=',')
df_train4 = df_train4[['File']+['Hate Speech']+['Text']]
df_test4 = pd.read_csv(tsv_DANKMEMES2020_test, delimiter=',')
df_test4 = df_test4[['File']+['Hate Speech']+['Text']]
#SENTIPOLC20161
df_train5 = pd.read_csv(tsv_SENTIPOLC2016_train, delimiter=',')
df_train5 = df_train5[['idtwitter']+['subj']+['text']]
df_test5 = pd.read_csv(tsv_SENTIPOLC2016_test, delimiter=',')
df_test5 = df_test5[['idtwitter']+['subj']+['text']]
for ind in df_train5.index:
if "\t" in df_train5.text[ind]:
df_train5 = df_train5.replace(to_replace='\t', value='', regex=True)
#SENTIPOLC20162
df_train6 = pd.read_csv(tsv_SENTIPOLC2016_train, delimiter=',')
df = pd.DataFrame(columns=['idtwitter', 'polarity', 'text'])
for ind in df_train6.index:
if df_train6['subj'][ind] == 1:
if df_train6['opos'][ind] == 1 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 0, 'text' : df_train6['text'][ind] }, ignore_index=True)
elif df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 1:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 1, 'text' : df_train6['text'][ind] }, ignore_index=True)
elif df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 2, 'text' : df_train6['text'][ind] }, ignore_index=True)
else:
if df_train6['opos'][ind] == 0 and df_train6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_train6['idtwitter'][ind], 'polarity' : 2, 'text' : df_train6['text'][ind] }, ignore_index=True)
df_train6 = df
for ind in df_train6.index:
if "\t" in df_train6.text[ind]:
df_train6 = df_train6.replace(to_replace='\t', value='', regex=True)
df_test6 = pd.read_csv(tsv_SENTIPOLC2016_test, delimiter=',')
df = pd.DataFrame(columns=['idtwitter', 'polarity', 'text'])
for ind in df_test6.index:
if df_test6['subj'][ind] == 1:
if df_test6['opos'][ind] == 1 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 0, 'text' : df_test6['text'][ind] }, ignore_index=True)
elif df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 1:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 1, 'text' : df_test6['text'][ind] }, ignore_index=True)
elif df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 2, 'text' : df_test6['text'][ind] }, ignore_index=True)
else:
if df_test6['opos'][ind] == 0 and df_test6['oneg'][ind] == 0:
df = df.append({'idtwitter' : df_test6['idtwitter'][ind], 'polarity' : 2, 'text' : df_test6['text'][ind] }, ignore_index=True)
df_test6 = df
#split train dev, in all tasks
train_dataset, dev_dataset = train_test_split(df_train, test_size=0.2, shuffle = True)
train_dataset2, dev_dataset2 = train_test_split(df_train2, test_size=0.2, shuffle = True)
train_dataset3, dev_dataset3 = train_test_split(df_train3, test_size=0.2, shuffle = True)
train_dataset4, dev_dataset4 = train_test_split(df_train4, test_size=0.2, shuffle = True)
train_dataset5, dev_dataset5 = train_test_split(df_train5, test_size=0.2, shuffle = True)
train_dataset6, dev_dataset6 = train_test_split(df_train6, test_size=0.2, shuffle = True)
#reduction of datasets in case of data cutting 100, 200, 500
if number_labeled_examples!=0:
if number_labeled_examples==100:
labeled = train_dataset.sample(n=100)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=100)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=100)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=100)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=100)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=100)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
elif number_labeled_examples==200:
labeled = train_dataset.sample(n=200)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=200)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=200)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=200)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=200)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=200)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
elif number_labeled_examples==500:
labeled = train_dataset.sample(n=500)
unlabeled = train_dataset
labeled2 = train_dataset2.sample(n=500)
unlabeled2 = train_dataset2
labeled3 = train_dataset3.sample(n=500)
unlabeled3 = train_dataset3
labeled4 = train_dataset4.sample(n=500)
unlabeled4 = train_dataset4
labeled5 = train_dataset5.sample(n=500)
unlabeled5 = train_dataset5
labeled6 = train_dataset6.sample(n=500)
unlabeled6 = train_dataset6
cond = unlabeled['id'].isin(labeled['id'])
cond2 = unlabeled2['id'].isin(labeled2['id'])
cond3 = unlabeled3['id'].isin(labeled3['id'])
cond4 = unlabeled4['File'].isin(labeled4['File'])
cond5 = unlabeled5['idtwitter'].isin(labeled5['idtwitter'])
cond6 = unlabeled6['idtwitter'].isin(labeled6['idtwitter'])
unlabeled.drop(unlabeled[cond].index, inplace = True)
unlabeled2.drop(unlabeled2[cond2].index, inplace = True)
unlabeled3.drop(unlabeled3[cond3].index, inplace = True)
unlabeled4.drop(unlabeled4[cond4].index, inplace = True)
unlabeled5.drop(unlabeled5[cond5].index, inplace = True)
unlabeled6.drop(unlabeled6[cond6].index, inplace = True)
#model with or without gan
if apply_gan == True:
print("MT-GANBERT")
#dataset unlabeled with label -1
unlabeled['label'] = unlabeled['label'].replace(0,-1)
unlabeled['label'] = unlabeled['label'].replace(1,-1)
unlabeled2['misogynous'] = unlabeled2['misogynous'].replace(0,-1)
unlabeled2['misogynous'] = unlabeled2['misogynous'].replace(1,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(0,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(1,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(2,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(3,-1)
unlabeled3['misogyny_category'] = unlabeled3['misogyny_category'].replace(4,-1)
unlabeled4['Hate Speech'] = unlabeled4['Hate Speech'].replace(0,-1)
unlabeled4['Hate Speech'] = unlabeled4['Hate Speech'].replace(1,-1)
unlabeled5['subj'] = unlabeled5['subj'].replace(0,-1)
unlabeled5['subj'] = unlabeled5['subj'].replace(1,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(0,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(1,-1)
unlabeled6['polarity'] = unlabeled6['polarity'].replace(2,-1)
train = pd.concat([labeled, unlabeled])
train2 = pd.concat([labeled2, unlabeled2])
train3 = pd.concat([labeled3, unlabeled3])
train4 = pd.concat([labeled4, unlabeled4])
train5 = pd.concat([labeled5, unlabeled5])
train6 = pd.concat([labeled6, unlabeled6])
dev = dev_dataset
dev2 = dev_dataset2
dev3 = dev_dataset3
dev4 = dev_dataset4
dev5 = dev_dataset5
dev6 = dev_dataset6
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train),len(labeled), len(unlabeled)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train2),len(labeled2), len(unlabeled2)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train3),len(labeled3), len(unlabeled3)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train4),len(labeled4), len(unlabeled4)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train5),len(labeled5), len(unlabeled5)))
print("Size of Train dataset is {}, with {} labeled and {} not labeled ".format(len(train6),len(labeled6), len(unlabeled6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
else:
print("MT-DNN, with reduction dataset")
train = labeled
train2 = labeled2
train3 = labeled3
train4 = labeled4
train5 = labeled5
train6 = labeled6
dev = dev_dataset
dev2 = dev_dataset2
dev3 = dev_dataset3
dev4 = dev_dataset4
dev5 = dev_dataset5
dev6 = dev_dataset6
print("Size of Train dataset is {} ".format(len(labeled)))
print("Size of Train dataset is {} ".format(len(labeled2)))
print("Size of Train dataset is {} ".format(len(labeled3)))
print("Size of Train dataset is {} ".format(len(labeled4)))
print("Size of Train dataset is {} ".format(len(labeled5)))
print("Size of Train dataset is {} ".format(len(labeled6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
else:
print("MT-DNN")
train = train_dataset
train2 = train_dataset2
train3 = train_dataset3
train4 = train_dataset4
train5 = train_dataset5
train6 = train_dataset6
dev = dev_dataset
dev2 = dev_dataset2
dev3=dev_dataset3
dev4=dev_dataset4
dev5=dev_dataset5
dev6=dev_dataset6
print("Size of Train dataset is {} ".format(len(train)))
print("Size of Train dataset is {} ".format(len(train2)))
print("Size of Train dataset is {} ".format(len(train3)))
print("Size of Train dataset is {} ".format(len(train4)))
print("Size of Train dataset is {} ".format(len(train5)))
print("Size of Train dataset is {} ".format(len(train6)))
print("Size of Dev dataset is {} ".format(len(dev)))
print("Size of Dev dataset is {} ".format(len(dev2)))
print("Size of Dev dataset is {} ".format(len(dev3)))
print("Size of Dev dataset is {} ".format(len(dev4)))
print("Size of Dev dataset is {} ".format(len(dev5)))
print("Size of Dev dataset is {} ".format(len(dev6)))
#Balancing for:
#- MT-DNN, trained on the total dataset of each task
#- MT-GAN, trained on the chosen data cutting of each task
if balancing==True:
if apply_gan== True:
print("MT-GAN")
max_train_un = max(len(unlabeled), len(unlabeled2), len(unlabeled3), len(unlabeled4), len(unlabeled5), len(unlabeled6))
print(max_train_un)
else:
print("MT-DNN")
unlabeled=train
unlabeled2=train2
unlabeled3=train3
unlabeled4=train4
unlabeled5=train5
unlabeled6=train6
max_train_un = max(len(unlabeled), len(unlabeled2), len(unlabeled3), len(unlabeled4), len(unlabeled5), len(unlabeled6))
print(max_train_un)
#double dataset
df = pd.DataFrame(columns=['id', 'label', 'sentence'])
count=0
if len(unlabeled)<max_train_un:
for i in range(max_train_un):
if i < len(unlabeled):
df = df.append({'id' : unlabeled.iloc[i, 0], 'label' : unlabeled.iloc[i, 1], 'sentence' : unlabeled.iloc[i, 2] }, ignore_index=True)
else:
if count < len(unlabeled):
df = df.append({'id' : unlabeled.iloc[count, 0], 'label' : unlabeled.iloc[count, 1], 'sentence' : unlabeled.iloc[count, 2] }, ignore_index=True)
count = count+1
else:
count = 0
df = df.append({'id' : unlabeled.iloc[count, 0], 'label' : unlabeled.iloc[count, 1], 'sentence' : unlabeled.iloc[count, 2] }, ignore_index=True)
count = count+1
unlabeled = df
if apply_gan== True:
train = pd.concat([labeled, unlabeled])
else:
train=unlabeled
df = | pd.DataFrame(columns=['id', 'misogynous', 'text']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Measurements, COrrelation and sanitY of data
import argparse
import os
import sys
import csv
from typing import Callable, Any
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import keras
import tensorflow
from sklearn.model_selection import train_test_split
import ERIK
import XAVIER
import RAVEN
def cleanlogfile(filename, filename_out=None):
with open(filename, 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines if "] - ETA:" not in line]
for line in lines:
print(line)
lines2 = [i for i in lines if "=] - " in i]
lines2 = [i.split("] - ")[1] for i in lines2]
lines2 = [int(i.split("s ", 1)[0]) for i in lines2]
print("times per epoch:")
print(lines2)
print("sum:", sum(lines2))
print("min:", sum(lines2) / 60)
print("hours:", sum(lines2) / 3600)
lines.append(f"s_total: {sum(lines2)}")
lines.append(f"m_total: {sum(lines2) / 60}")
lines.append(f"h_total: {sum(lines2) / 3600}")
lines = [line + "\n" for line in lines]
with open(filename_out, "w+") as f:
f.writelines(lines)
def verify_model(model_paths, data_path, name_models=None, loss=None,
method_standardize_label_sfh=0,
method_standardize_label_z=0,
method_standardize_spectra=0,
method_standardize_magnitudes=0,
which_data_is_going_to_be_used=2,
first_id_plot=0, n_plot=3,
train_size=0.8, test_size=0.2, traintestrandomstate=42, traintestshuffle=True):
# Load Data
print("[INFO] Loading data...")
label_path = data_path.replace("Input_", "Label_")
metadata_path = data_path.replace("Input_", "MetaD_").replace(".fits", ".rda")
input_spectra, input_magnitudes, label_sfh, label_z, spectra_lambda, agevec, _ = \
ERIK.loadfiles(input_path=data_path, labels_path=label_path,
method_standardize_label_sfh=method_standardize_label_sfh,
method_standardize_label_z=method_standardize_label_z,
method_standardize_spectra=method_standardize_spectra,
method_standardize_magnitudes=method_standardize_magnitudes)
_, _, label_sfh_real, label_z_real, _, _, _ = \
ERIK.loadfiles(input_path=data_path, labels_path=label_path,
method_standardize_label_sfh=0,
method_standardize_label_z=0,
method_standardize_spectra=0,
method_standardize_magnitudes=0)
# Verified: If indices are added, they will STILL respect the output as if there were no indices
split_train_test = train_test_split(input_spectra, input_magnitudes, label_sfh, label_z, label_sfh_real, label_z_real,
range(input_spectra.shape[0]), # Indices
test_size=test_size,
train_size=train_size,
random_state=traintestrandomstate,
shuffle=traintestshuffle)
(trainSpect, testSpect,
trainMag, testMag,
trainLabSfh, testLabSfh,
trainLabZ, testLabZ,
trainLabSfh_real, testLabSfh_real,
trainLabZ_real, testLabZ_real,
trainIndices, testIndices) = split_train_test
if name_models is None:
name_models = model_paths
saved_models = []
for model_p in model_paths:
saved_models.append(keras.models.load_model(model_p,
custom_objects={"smape_loss": XAVIER.Cerebro.smape_loss}))
# saved_models[0].summary()
#################
# Decide which data is going to be used
if which_data_is_going_to_be_used == 0:
# All the data
in_data = np.concatenate([input_spectra, input_magnitudes], axis=1)
idx = list(range(input_spectra.shape[0]))
elif which_data_is_going_to_be_used == 1:
# Only Training data
in_data = np.concatenate([trainSpect, trainMag], axis=1)
label_sfh = trainLabSfh
label_z = trainLabZ
label_sfh_real = trainLabSfh_real
label_z_real = trainLabZ_real
idx = trainIndices
elif which_data_is_going_to_be_used == 2:
# Only Test data
in_data = np.concatenate([testSpect, testMag], axis=1)
label_sfh = testLabSfh
label_z = testLabZ
label_sfh_real = testLabSfh_real
label_z_real = testLabZ_real
idx = testIndices
else:
raise ValueError(f"which_data_is_going_to_be_used should be [0, 1, 2] and is {which_data_is_going_to_be_used}")
if loss is None:
loss = tensorflow.keras.losses.MeanSquaredError()
outputs = []
for model in saved_models:
outputs.append(model.predict(in_data[first_id_plot:(first_id_plot + n_plot)]))
legend_name = ["Label"] + name_models
for i in range(first_id_plot, first_id_plot + n_plot):
fig, ax = plt.subplots(2, 2, figsize=(25, 15))
plt.suptitle(f"i:{i + 1} - ID:{idx[i]}")
ax[0, 0].set_title("SFH")
ax[0, 0].plot(agevec, label_sfh[i, :], 'k')
for q, k in enumerate(outputs):
ax[0, 0].plot(agevec, k[0][(i - first_id_plot), :])
ax[0, 0].set_xscale('log')
ax[0, 0].legend(legend_name)
ax[1, 0].set_title("SFH - residuals")
ax[1, 0].plot(agevec, np.zeros(agevec.shape), 'k')
current_legend = ["Label"]
for q, k in enumerate(outputs):
ax[1, 0].scatter(agevec, np.subtract(k[0][(i - first_id_plot), :], label_sfh[i, :]))
current_legend.append(name_models[q] + "_" +
f"{loss(label_sfh[i, :], k[0][(i - first_id_plot), :]).numpy():3f}")
ax[1, 0].set_xscale('log')
ax[1, 0].legend(current_legend)
ax[0, 1].set_title("Metallicity")
ax[0, 1].plot(agevec, label_z[i, :], 'k')
for k in outputs:
ax[0, 1].plot(agevec, k[1][(i - first_id_plot), :])
ax[0, 1].set_xscale('log')
ax[0, 1].legend(legend_name)
ax[1, 1].set_title("Metallicity - residuals")
ax[1, 1].plot(agevec, np.zeros(agevec.shape), 'k')
current_legend = ["Label"]
for q, k in enumerate(outputs):
ax[1, 1].scatter(agevec, np.subtract(k[1][(i - first_id_plot), :], label_z[i, :]))
current_legend.append(name_models[q] + "_" +
f"{loss(label_z[i, :], k[1][(i - first_id_plot), :]).numpy():4f}")
ax[1, 1].set_xscale('log')
ax[1, 1].legend(current_legend)
plt.tight_layout()
plt.show()
tmp_dic = {"agevec": agevec,
"sfh_true": label_sfh[i, :],
"z_true": label_z[i, :],
"sfh_no_stand": label_sfh_real[i, :],
"z_no_stand": label_z_real[i, :]}
for q, k in enumerate(outputs):
tmp_dic[f"sfh{q}"] = k[0][(i - first_id_plot), :]
tmp_dic[f"z{q}"] = k[1][(i - first_id_plot), :]
tmp_sp = {"spectr_in": input_spectra[i, :],
"waveout": spectra_lambda}
tmp_mag = {"magnitudes_in": input_magnitudes[i, :],
"ID": [idx[i]]*5}
tmp_id_names = {"ID": [idx[i]]}
for q, k in enumerate(legend_name):
if q == 0:
continue
else:
tmp_id_names[f"name{q-1}"] = [k]
tmp_df = pd.DataFrame(tmp_dic)
tmp_df.to_csv("/Users/enrique/Documents/GitHub/LOGAN-SFH/tmp_file.pd", index=False)
tmp_df2 = pd.DataFrame(tmp_sp)
tmp_df3 = | pd.DataFrame(tmp_mag) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module contains class PandasDataframe.
PandasDataframe is a parent abstract class for any dataframe class
for pandas storage format.
"""
from collections import OrderedDict
import numpy as np
import pandas
import datetime
from pandas.core.indexes.api import ensure_index, Index, RangeIndex
from pandas.core.dtypes.common import is_numeric_dtype, is_list_like
from pandas._libs.lib import no_default
from typing import List, Hashable, Optional, Callable, Union, Dict
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.error_message import ErrorMessage
from modin.core.storage_formats.pandas.parsers import (
find_common_type_cat as find_common_type,
)
from modin.core.dataframe.base.dataframe.dataframe import ModinDataframe
from modin.core.dataframe.base.dataframe.utils import (
Axis,
JoinType,
)
from modin.pandas.indexing import is_range_like
from modin.pandas.utils import is_full_grab_slice, check_both_not_none
from modin.logging import LoggerMetaClass
def lazy_metadata_decorator(apply_axis=None, axis_arg=-1, transpose=False):
"""
Lazily propagate metadata for the ``PandasDataframe``.
This decorator first adds the minimum required reindexing operations
to each partition's queue of functions to be lazily applied for
each PandasDataframe in the arguments by applying the function
run_f_on_minimally_updated_metadata. The decorator also sets the
flags for deferred metadata synchronization on the function result
if necessary.
Parameters
----------
apply_axis : str, default: None
The axes on which to apply the reindexing operations to the `self._partitions` lazily.
Case None: No lazy metadata propagation.
Case "both": Add reindexing operations on both axes to partition queue.
Case "opposite": Add reindexing operations complementary to given axis.
Case "rows": Add reindexing operations on row axis to partition queue.
axis_arg : int, default: -1
The index or column axis.
transpose : bool, default: False
Boolean for if a transpose operation is being used.
Returns
-------
Wrapped Function.
"""
def decorator(f):
from functools import wraps
@wraps(f)
def run_f_on_minimally_updated_metadata(self, *args, **kwargs):
for obj in (
[self]
+ [o for o in args if isinstance(o, PandasDataframe)]
+ [v for v in kwargs.values() if isinstance(v, PandasDataframe)]
+ [
d
for o in args
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
+ [
d
for _, o in kwargs.items()
if isinstance(o, list)
for d in o
if isinstance(d, PandasDataframe)
]
):
if apply_axis == "both":
if obj._deferred_index and obj._deferred_column:
obj._propagate_index_objs(axis=None)
elif obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif apply_axis == "opposite":
if "axis" not in kwargs:
axis = args[axis_arg]
else:
axis = kwargs["axis"]
if axis == 0 and obj._deferred_column:
obj._propagate_index_objs(axis=1)
elif axis == 1 and obj._deferred_index:
obj._propagate_index_objs(axis=0)
elif apply_axis == "rows":
obj._propagate_index_objs(axis=0)
result = f(self, *args, **kwargs)
if apply_axis is None and not transpose:
result._deferred_index = self._deferred_index
result._deferred_column = self._deferred_column
elif apply_axis is None and transpose:
result._deferred_index = self._deferred_column
result._deferred_column = self._deferred_index
elif apply_axis == "opposite":
if axis == 0:
result._deferred_index = self._deferred_index
else:
result._deferred_column = self._deferred_column
elif apply_axis == "rows":
result._deferred_column = self._deferred_column
return result
return run_f_on_minimally_updated_metadata
return decorator
class PandasDataframe(object, metaclass=LoggerMetaClass):
"""
An abstract class that represents the parent class for any pandas storage format dataframe class.
This class provides interfaces to run operations on dataframe partitions.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = None
_query_compiler_cls = PandasQueryCompiler
# These properties flag whether or not we are deferring the metadata synchronization
_deferred_index = False
_deferred_column = False
@property
def __constructor__(self):
"""
Create a new instance of this object.
Returns
-------
PandasDataframe
"""
return type(self)
def __init__(
self,
partitions,
index,
columns,
row_lengths=None,
column_widths=None,
dtypes=None,
):
self._partitions = partitions
self._index_cache = | ensure_index(index) | pandas.core.indexes.api.ensure_index |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import seaborn as sns
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
house_df = pd.read_csv(filename)
house_df = house_df.drop(labels=["id", "lat", "long"], axis=1).dropna().drop_duplicates()
house_df.zipcode = house_df.zipcode.astype(int)
house_df = house_df[
(house_df.bedrooms > 0) & (house_df.bathrooms > 0) & (house_df.sqft_above > 0) &
(house_df.sqft_above > 0) & (house_df.floors >= 0) & (house_df.sqft_basement >= 0)
]
house_df["is_renovated_lately"] = np.where(house_df["yr_renovated"] >= 1995, 1, 0)
house_df = house_df.drop(labels="yr_renovated", axis=1)
house_df['date'] = pd.to_datetime(house_df['date'])
house_df['month'] = house_df['date'].dt.month
house_df['date_day'] = house_df['date'].dt.day
house_df['weekday'] = house_df['date'].dt.weekday
house_df['year'] = house_df['date'].dt.year
house_df['quarter'] = house_df['date'].dt.quarter
house_df = house_df.drop(labels="date", axis=1)
house_df["built_decade"] = (house_df["yr_built"] / 10).astype(int)
house_df = house_df.drop(labels="yr_built", axis=1)
house_df = pd.get_dummies(house_df, prefix="zipcode", columns=["zipcode"])
house_df = pd.get_dummies(house_df, prefix="month", columns=["month"])
house_df = | pd.get_dummies(house_df, prefix="built_decade", columns=["built_decade"]) | pandas.get_dummies |
#%%[markdown]
## Carregar base de dados para o SQLite
#Definir localização da base de dados
#%%
path_to_database='data/raw/elo7_recruitment_dataset.csv'
#%%[markdown]
#Definir localização onde SQLite vai ser guardado, é recomendavel usar uma partição
#mapeada em RAM para aumentar a performance (exemplo /dev/shm)
#%%
path_to_sqlite='/dev/shm/database.sqlite3' #Store the database in ram partition (as /dev/shm) to increase the performance
#%%[markdown]
#Passar os dados de CSV para SQLITE, isso é para facilitar algumas
#análises porque o SQLITE é uma linguagem de consulta de dados que permite
#facíl manipulação dos dados.
#%%
import pandas as pd
import sqlite3
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
import numpy as np
from nltk import word_tokenize
from nltk.corpus import stopwords
import sklearn
from sklearn import tree
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from IPython.display import display
from sklearn import linear_model
import pickle
#########nltk.download('punkt')
#########nltk.download('stopwords')
#%%
def xorshift32(x):
for i in range(16):
x ^= (x << 13) &4294967295
x ^= (x >> 17) &4294967295
x ^= (x << 5)&4294967295
return x%20
def pass_csv_database_to_sqlite3(path_to_sqlite3,path_raw_data_in_csv):
conn = sqlite3.connect(path_to_sqlite3)
df=pd.read_csv(path_raw_data_in_csv)
df=df.reset_index()
df=df.rename(columns = {"index": "query_elo7_id"})
df['hash_for_query_elo7_id']=df.apply((lambda row: xorshift32(row.query_elo7_id)), axis = 1)
df['hash_for_product_id']=df.apply((lambda row: xorshift32(row.product_id)), axis = 1)
df['price_group']=df.apply((lambda row: -1), axis = 1)
df.to_sql("query_elo7", conn, if_exists="replace",index=False)
conn.commit()
cur=conn.cursor()
cur.execute("""
CREATE INDEX index_query_elo7_querys_elo7_id ON query_elo7 (
query_elo7_id
);
""")
cur.execute("""
CREATE INDEX index_query_elo7_hash_for_query_elo7_id ON query_elo7 (
hash_for_query_elo7_id
);
""")
conn.commit()
conn.close()
#########pass_csv_database_to_sqlite3(path_to_sqlite,path_to_database)
#%%[markdown]
#Associar individualmente cada palavras digitadas nas consultas com as
#consultas em que foram digitadas, isso é util para futuras análises
#estatísticas
#%%
def create_schema_for_tables_that_associate_words_in_querys_with_querys_typed(path_to_sqlite3):
conn = sqlite3.connect(path_to_sqlite3)
cur=conn.cursor()
cur.execute("DROP TABLE IF EXISTS word_typed_in_query;")
cur.execute("VACUUM;")
cur.execute("""
CREATE TABLE word_typed_in_query (
word VARCHAR (256),
word_typed_in_query_id INTEGER PRIMARY KEY AUTOINCREMENT
);
""")
conn.commit()
conn.close()
def create_schema_for_table___word_typed_in_query___query_elo7(path_to_sqlite3):
conn = sqlite3.connect(path_to_sqlite3)
cur=conn.cursor()
cur.execute("DROP TABLE IF EXISTS word_typed_in_query___query_elo7;")
cur.execute("VACUUM;")
cur.execute("""
CREATE TABLE word_typed_in_query___query_elo7 (
word_typed_in_query_id INTEGER REFERENCES word_typed_in_query (word_typed_in_query_id) ON DELETE CASCADE
ON UPDATE CASCADE,
query_elo7_id INTEGER REFERENCES query_elo7 (query_elo7_id) ON DELETE CASCADE
ON UPDATE CASCADE,
word_typed_in_query___query_elo7_id INTEGER PRIMARY KEY AUTOINCREMENT
);
""")
conn.commit()
conn.close()
def create_schema_for_table___vector_element(path_to_sqlite3):
conn = sqlite3.connect(path_to_sqlite3)
cur=conn.cursor()
cur.execute("DROP TABLE IF EXISTS vector_element;")
cur.execute("VACUUM;")
sql="""
CREATE TABLE vector_element (
vector_element_id INTEGER PRIMARY KEY AUTOINCREMENT,
query_elo7_id INTEGER REFERENCES query_elo7 (query_elo7_id) ON DELETE CASCADE
ON UPDATE CASCADE,
position_in_vector INT,
word VARCHAR (256),
value DOUBLE,
hash_for_query_elo7_id INTEGER
);
"""
cur.execute(sql)
cur.execute("""
CREATE INDEX index_vector_element___query_elo7_id ON vector_element (
query_elo7_id
);
""")
cur.execute("""
CREATE INDEX index_vector_element___hash_for_query_elo7_id ON vector_element (
hash_for_query_elo7_id
);
""")
conn.commit()
conn.close()
def populate_table__word_typed_in_query(path_to_sqlite3):
conn = sqlite3.connect(path_to_sqlite3)
cur=conn.cursor()
cur.execute(""" SELECT query
FROM query_elo7;
""")
words={}
for line in cur:
words_line= word_tokenize(line[0])
for word_line in words_line:
if(not(word_line in words)):
words[word_line]=None
for word in words.keys():
cur.execute("""
INSERT INTO word_typed_in_query (
word
)
VALUES (
'{word}'
);
""".format(word=word))
conn.commit()
conn.close()
def word_typed_in_query___query_elo7(path_to_sqlite3):
conn = sqlite3.connect(path_to_sqlite3)
cur=conn.cursor()
converter_word_to_word_id_in_table={}
cur.execute('''
SELECT word,
word_typed_in_query_id
FROM word_typed_in_query;
''')
for line in cur:
converter_word_to_word_id_in_table[line[0]]=line[1]
cur.execute(""" SELECT query_elo7_id,
query
FROM query_elo7;
""")
cur2=conn.cursor()
for line in cur:
query_words= word_tokenize(line[1])
for word in query_words:
word_id=converter_word_to_word_id_in_table[word]
sql='''
INSERT INTO word_typed_in_query___query_elo7 (
word_typed_in_query_id,
query_elo7_id
)
VALUES (
{word_typed_in_query_id},
{query_elo7_id}
);
'''.format(word_typed_in_query_id=word_id, query_elo7_id=line[0])
cur2.execute(sql)
conn.commit()
conn.close()
#########create_schema_for_tables_that_associate_words_in_querys_with_querys_typed(path_to_sqlite)
#########populate_table__word_typed_in_query(path_to_sqlite)
#########create_schema_for_table___word_typed_in_query___query_elo7(path_to_sqlite)
#########word_typed_in_query___query_elo7(path_to_sqlite)
#########create_schema_for_table___vector_element(path_to_sqlite)
#%%[markdown]
## Análise exploratoria
### Análises das palavras dígitadas nas consultas
###### Para estas análises foram retiradas as palavras conhecidas em *PLN* (processamento de linguagem natural) como *stopwords*, *stopwords* são palavras muito comuns que não adicionam significado para o texto (preposições por exemplo)
#Contagem do número de palavras distintas que já foram digitadas por
#usuários em consultadas, admitindo que nos modelos que vão ser desenvolvidos as
#palavras vão ser as variáveis de entrada, estas são as variáveis que primeiro vão ser análisadas.
#%%
#########conn=sqlite3.connect(path_to_sqlite)
#########sql="""SELECT COUNT(DISTINCT word)
######### FROM word_typed_in_query
######### WHERE word NOT IN ({stopwords})
######### """.format(stopwords=str(list(stopwords.words('portuguese')))[1:-1])
#########print("Número de palavras distintas já digitadas: "+str(conn.execute(sql).fetchall()[0][0]))
#########conn.close()
#%%[markdown]
# Por causa da quantidade de palavras que existem é preciso realizar
# uma contagem de frequência de palavras nas consultas, isso pode ser
# utíl para diminuir quantidade de palavras que são necessárias serem
# tratadas pelos modelos desenvolvidos. Com menos palavras para se
# preocupar em analisar os modelos podem ser mais acertivos, essa
# é uma tática comum para melhor a qualidade de modelos de IA.
#%%
def count_number_of_times_that_word_appear_in_query(path_to_sqlite3):
sql="""
WITH word_typed_in_query___query_elo7_distinct AS (
SELECT DISTINCT word_typed_in_query_id, query_elo7_id
FROM word_typed_in_query___query_elo7
)
SELECT COUNT(query_elo7_id) AS numbero_de_consultas_onde_a_palavra_foi_digitada,
word_typed_in_query.word AS palavra
FROM word_typed_in_query___query_elo7_distinct
INNER JOIN word_typed_in_query ON word_typed_in_query.word_typed_in_query_id=word_typed_in_query___query_elo7_distinct.word_typed_in_query_id
WHERE word_typed_in_query.word NOT IN ({stopwords})
GROUP BY word_typed_in_query___query_elo7_distinct.word_typed_in_query_id
ORDER BY COUNT(query_elo7_id) DESC
""".format(stopwords=str(list(stopwords.words('portuguese')))[1:-1])
conn = sqlite3.connect(path_to_sqlite3)
df=pd.read_sql_query(sql,conn)
conn.close()
return df
#########print("Análise de frequência das vinte palavras mais digitadas nas consultas:")
#########df_number_of_times_for_words_in_querys=count_number_of_times_that_word_appear_in_query(path_to_sqlite)
#########df_number_of_times_for_words_in_querys.head(20)
# %%[markdown]
# Pode-se notar um decaimento exponencial (muito rápido) na
# frequencia da palavra mais digitada para vigesima mais digitada. <br>
# Uma melhor análise seria plotar um gráfico para confirmar
# este decaimento exponencial onde, o eixo X representa o
# *ranking* da palavra entre as que mais aparecem e o eixo Y o número de
# vezes que ela aparece.
# %%
#########df_number_of_times_for_words_in_querys=count_number_of_times_that_word_appear_in_query(path_to_sqlite)
#########df_number_of_times_for_words_in_querys=df_number_of_times_for_words_in_querys.reset_index()
#########df_number_of_times_for_words_in_querys.rename(columns = {'index':'ranking da palavra', 'numbero_de_consultas_onde_a_palavra_foi_digitada':'número de vezes que aparece'}, inplace = True)
#########sns.lineplot(data=df_number_of_times_for_words_in_querys.reset_index(), x="ranking da palavra", y="número de vezes que aparece")
# %% [markdown]
# Com as análises apresentadas até agora pode-se dizer que com poucas
# palavras, algumas centenas, esta dentro da maioria das consultas
# (querys). Para confirmar isso
# é necessario criar grupos de palavras onde o primeiro grupo possui
# apenas a palavras mais frequente, o segundo grupo as duas palavras
# mais frequentes, o terceiro as três palavras mais frequentes e assim
# por diante, para então analisar em quantas consultas existe pelo uma das
# palavras de um determinado grupo. Para simplificar a escrita vai ser
# utilizado o termo *cobertura* neste relatorio tem a seguinte
# definição: "Um grupo de palavras cobre um consulta se e somente se existe
# pelo menos uma palavra do grupo que esta dentro da consulta".<br> <br>
# A seguir um plot que mostra quantas consultas são cobertas por grupos com
# as N palavras mais frequentes:
#%%
def words_are_in_query(listOfWordsToSearch, query_string):
for word in listOfWordsToSearch:
if(word in query_string):
return 1
return 0
def number_of_queries_coverage_by_groups_with_the_N_most_frequent_words(path_to_sqlite3,path_to_csv):
ranking_for_occurrence_words_in_querys=(count_number_of_times_that_word_appear_in_query(path_to_sqlite3)['palavra']).values
prototype_for_dataframe_with_result={'grupo com as N palavras mais frequentes':[],'número de consultas cobertas pelo grupo':[]}
for i in range(384):
wordGroup=ranking_for_occurrence_words_in_querys[:(i+1)]
queryBelongToWordGroup=lambda query: words_are_in_query(wordGroup,query)
df=pd.read_csv(path_to_csv)
numberOfQuerysCoverage=np.sum(df['query'].apply(queryBelongToWordGroup).values)
prototype_for_dataframe_with_result['grupo com as N palavras mais frequentes'].append(i+1)
prototype_for_dataframe_with_result['número de consultas cobertas pelo grupo'].append(numberOfQuerysCoverage)
return pd.DataFrame.from_dict(prototype_for_dataframe_with_result)
#########df_with_number_of_queries_coverage_by_groups_with_the_N_most_frequent_words=number_of_queries_coverage_by_groups_with_the_N_most_frequent_words(path_to_sqlite,path_to_database)
#########sns.lineplot(data=df_with_number_of_queries_coverage_by_groups_with_the_N_most_frequent_words, x="grupo com as N palavras mais frequentes", y="número de consultas cobertas pelo grupo")
# %%
#########last_row_for_infomation_about_group_of_words=(df_with_number_of_queries_coverage_by_groups_with_the_N_most_frequent_words.values)[-1]
#########print ("Quantidade consultas cobertas pelo grupo com as {num_of_words} palavras mais frequentes: {num_of_querys}".format(num_of_words=last_row_for_infomation_about_group_of_words[0],num_of_querys=last_row_for_infomation_about_group_of_words[1]))
# %%[markdown]
# Observando o gráfico anterior observa-se que as 384 das palavras mais frequentes
# estão em 35591 das 38507 consultas que estão disponiveis na base dados fornecida,
# aproximadamente 92% de cobertura. Deste modo os modelos de IA desenvolvidos caso
# recebam uma consulta/query como uma de suas entradas vão analisa apenas estas 384
# palavras.
# %%[markdown]
### Análises dos atributos dos produtos existentes (peso, preço e entrega rápida).
#### A seguir histogramas para diferentes faixas peso que os produtos possuem em cada categoria:
# %%[markdown]
# Histogramas com a distribuição de peso por categoria.
#%%
#########conn=sqlite3.connect(path_to_sqlite)
#########df=pd.read_sql_query("""SELECT DISTINCT product_id, category, weight FROM query_elo7 WHERE weight""",conn)
#########sns.histplot(hue="category", x="weight", data=df,bins=10)
#########conn.close()
# %%
# %%[markdown]
# Histogramas com a distribuição de peso por categoria com limite de peso de até 40, isso é equivalente a um zoom no inicio no eixo X dos histogramas do gráfico anterior.
#%%
#########conn=sqlite3.connect(path_to_sqlite)
#########df=pd.read_sql_query("""SELECT DISTINCT product_id, category, weight FROM query_elo7 WHERE weight<40""",conn)
#########sns.histplot(hue="category", x="weight", data=df,bins=10)
#########conn.close()
# %%[markdown]
#### A seguir histogramas para diferentes faixas preço que os produtos possuem em cada categoria:
# %%[markdown]
# Histogramas com a distribuição de preço por categoria.
#%%
#########conn=sqlite3.connect(path_to_sqlite)
#########df=pd.read_sql_query("""SELECT DISTINCT product_id, category, price FROM query_elo7 WHERE price""",conn)
#########sns.histplot(hue="category", x="price", data=df,bins=10)
#########conn.close()
# %%[markdown]
# Histogramas com a distribuição de peso por categoria com limite de preço de até 100, isso é equivalente a um zoom no inicio do eixo X dos histogramas do gráfico anterior.
#%%
#########conn=sqlite3.connect(path_to_sqlite)
#########df=pd.read_sql_query("""SELECT DISTINCT product_id, category, price FROM query_elo7 WHERE price<200 """,conn)
#ax = sns.boxplot(x="category", y="weight", data=df)
#########sns.histplot(hue="category", x="price", data=df,bins=10)
#########conn.close()
# %%[markdown]
# Nos histogramas do gráfico anterior a categoria "Lembrancinhas" esta atrapalhando
# a visualizaçao das distribuição de preços das outras categorias, ela vai ser retirada
# do próximo gráfico então. O gráifco a seguir é um replot dos histogramas do gráfico
# anterior porem sem a categoria "Lembrancinhas":
#%%
#########conn=sqlite3.connect(path_to_sqlite)
#########df=pd.read_sql_query("""SELECT DISTINCT product_id, category, price FROM query_elo7 WHERE price<200 AND category!='Lembrancinhas' """,conn)
#ax = sns.boxplot(x="category", y="weight", data=df)
#########sns.histplot(hue="category", x="price", data=df,bins=10)
#########conn.close()
# %%[markdown]
#### Análise sobre a distribuição de peso e preço
# Como pode ser observado nem o preço nem o peso dos produtos segue uma distribuição
# normal portanto, a melhores estrategias para discretizar estes valores seriam:
# <ul>
# <li>Discretizar os valores resultantes de uma transformação logaritma ou .</li>
# <li>Clusteriza-los via algum algoritmo de aprendizado de aprendizado não supervisionado como o K-means.</li>
# </ul>
# É importante pensar em tecnicas para discretizar de valores continuos porque isso pode melhorar
# qualidade dos modelos de IA desenvolvidos.
# %%[markdown]
#### Análise sobre a distribuição do atributo envio expresso
#%%
#########conn=sqlite3.connect(path_to_sqlite)
#########df=pd.read_sql_query("""SELECT DISTINCT product_id, category, express_delivery FROM query_elo7 WHERE price<200 AND category!='Lembrancinhas' """,conn)
#########df['express_delivery']=df['express_delivery'].apply(lambda x: 'yes' if x>0.0 else 'no')
#########categories=df['category'].unique()
#########for category in categories:
######### print("Distribuição para a categoria:"+str(category)+"\n")
######### dfT=df[df['category']==category]
######### sns.histplot( x="express_delivery", data=dfT.sort_values(by=['express_delivery']), stat='probability',discrete=True, shrink=.8)
######### plt.show()
######### print("\n\n\n\n")
#########conn.close()
# %%[markdown]
# Como pode ser observado existem diferentes distribuições de envio expresso
# por categoria, isso faz este atributo interessante para usar em um
# classificador de categorias.
#%%[markdown]
## Sistema de Classificação de Produtos
#### Histograma de palavras dos produtos associados as palavras digitadas as buscas
# Esse histograma foi uma das metricas criadas que servem de entrada para os modelos descritos
# nas seções **Encontrar o melhor modelo de predição de categorias com base na metodologia K-Fold**, **Sistema de termos de busca** e **Colaboração entre os sistemas**.
# Para fazer essa métrica devem ser calculados os histogramas das palavras que cada produto
# da base de dados possui com base nas consultas associadas ao produto para então, calcular a média
# dos histogramas dos produtos associados a uma categoria.
# Os histogramas podem ser feitos seguindo os seguintes passos: <br>
# <ul>
# <li>
# Com base nas 384 palavras mais frequentes associamos cada palavra a uma posição
# vetores de 384 elementos onde, cada produto que esta base de dados vai ter um vetor destes.
# Estes são os vetores que guardam o histograma dos produtos.
# </li>
# <li>
# Percorrer todas as consultas que existem para cada um dos produtos cadastrados na
# base de dados e sempre que achar uma palavra que associada ao vetor de histograma do produto
# incrementar o valor do elemento do vetor associado a palavra.
# </li>
#</ul> <br> <br>
# A seguir o código que cria os histogramas para cada consulta:
#%%
def populate_table____vector_element(path_to_sqlite3):
ranking_for_occurrence_words_in_querys=list((count_number_of_times_that_word_appear_in_query(path_to_sqlite3)['palavra']).values)[:384]
ranking_for_occurrence_words_in_querys.sort()
conn = sqlite3.connect(path_to_sqlite3)
cur=conn.cursor()
cur2=conn.cursor()
cur.execute("SELECT query_elo7_id,query FROM query_elo7")
for line in cur:
query_words= word_tokenize(line[1])
elementsToInsert=""
for i in range(len(ranking_for_occurrence_words_in_querys)):
number_of_times=query_words.count(ranking_for_occurrence_words_in_querys[i])
elementsToInsert=elementsToInsert+"({query_elo7_id},{position_in_vector},'{word}',{number_of_times},{hash_for_query_elo7_id}),".format(query_elo7_id=line[0],position_in_vector=i,word=ranking_for_occurrence_words_in_querys[i],number_of_times=number_of_times,hash_for_query_elo7_id=xorshift32(line[0]))
cur2.execute("INSERT INTO vector_element (query_elo7_id,position_in_vector,word,value,hash_for_query_elo7_id) VALUES "+elementsToInsert[:-1])
conn.commit()
conn.close()
#########populate_table____vector_element(path_to_sqlite)
#%%[markdown]
#### Variáveis utilizadas para a criação do sistema
# Para a criação de sistema foram utilizados os campos price, weight e express_delivery das consultas
# e o histograma de palavras dos produtos associados as palavras digitadas as buscas.
#### Encontrar o melhor modelo de predição de categorias com base na metodologia K-Fold
# A maioria dos passos a seguir vão ser repetidos várias vezes com o objetivo de encontrar o melho modelo, esta
# repetição usa a metodoliga K-Fold e ela consiste de:
# <ul>
# <li>
# Dividir um conjunto de dados em validação e não validação de forma aleatória.
# </li>
# <li>
# Dividir a não validação do conjunto de dados em treinamento e teste, com 86.67% do conjunto como treinamento e 13.33% teste.
# </li>
# <li>
# Treinar um modelo no conjunto de treinamento e testa-lo no conjunto de teste e no conjunto de validação.
# </li>
# <li>
# Repartir a não validação entre treinamento e teste de modo que 13.33% do conjunto de treino anterior vire o conjunto de teste atual e os elementos do conjunto de teste anterior entrem no conjunto de treinamento atual.
# </li>
# <li>
# Voltar para o passo 3 até todos os elementos do conjunto de não validação tenham sido utilizados para treinar ao menos um modelo.
# </li>
# <li>
# Utilizar o modelo que obteve melhor resultado no conjunto de validação.
# </li>
# </ul>
#
# Para facilitar o escolha dos conjuntos que dados que vão ser parte do treino, teste e
# validação as tabelas vector_element e query_elo7 possuem uma coluna chamada hash_for_query_elo7_id
# onde o valor de hash vai de 0 a 19 deste modo, a divisão pode ser feita com base nos valores do
# hash que devem ser utilizados para que elementos destas tabelas façam parte dos conjuntos de treino,
# teste e validação. Para a seleção do melhor modelo a métrica utilizada foi a media das acuracias
# alcançadas para classificar cada categoria. <br><br>
# A seguir a matriz de confusão e a acurácia média para o melhor de modelo de predição encontrado para a elaboração do sistema:
#%%
def transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(database,column_to_be_y='category',fields_to_drop=[]):
fields=list(database[list(database.keys())[0]].keys())
dataFrameInDict={}
for field in fields:
if(field !="vector"):
dataFrameInDict[field]=[]
dataFrameInDict['Y']=[]
for i in range(384):
dataFrameInDict['vector_e'+str(i)]=[]
for data_id in database:
data=database[data_id]
for field in fields:
if(field!="vector"):
dataFrameInDict[field].append(data[field])
for i in range(384):
dataFrameInDict['vector_e'+str(i)].append(data['vector'][i])
dataFrameInDict['Y'].append(data[column_to_be_y])
df=pd.DataFrame.from_dict(dataFrameInDict)
df=df.dropna()
y=df['Y'].values
df=df.drop(['Y'],axis=1)
#df['weight'].fillna(value=df['weight'].mean(), inplace=True)
#df['price'].fillna(value=df['price'].mean(), inplace=True)
df=df.drop([column_to_be_y],axis=1)
if(len(fields_to_drop)>0):
df=df.drop(fields_to_drop,axis=1)
return {'dataframeX':df,'Y':y}
def merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,
query_to_get_words_histograms,
query_to_get_varied_information):
#Important Note=========================================
# The first field of query_to_get_words_histograms
# and query_to_get_varied_information have to be
# equals and they are the for the dictonaries
#End====================================================
conn = sqlite3.connect(path_to_sqlite3)
cur=conn.cursor()
information_about_products={}
cur.execute(query_to_get_varied_information)
fields=[]
for field in cur.description:
fields.append(field[0])
for row in cur:
dictTemp={}
keyForDict=fields[0]
for i in range(len(fields)-1):
dictTemp[fields[i+1]]=row[i+1]
information_about_products[row[0]]=dictTemp
cur.execute(query_to_get_words_histograms)
productsDict={}
for row in cur:
if(not (row[0] in productsDict)):
dictTemp={'vector':np.zeros(384)}
for i in range(len(fields)-1):
dictTemp[fields[i+1]]=information_about_products[row[0]][fields[i+1]]
productsDict[row[0]]=dictTemp
productsDict[row[0]]['vector'][row[1]]=row[2]
conn.close()
return productsDict
def get_data_to_validation_to_use_in_the_model_to_predict_categories(path_to_sqlite3):
query_to_get_words_histograms="""
WITH vector_products AS (
SELECT query_elo7.product_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_product_id>=15
GROUP BY query_elo7.product_id,vector_element.position_in_vector
ORDER BY query_elo7.product_id,vector_element.position_in_vector
)
SELECT product_id,
position_in_vector,
value
FROM vector_products
WHERE value>0
"""
query_to_get_varied_information="""
SELECT DISTINCT product_id,
category,
weight,
price,
express_delivery
FROM query_elo7;
"""
products_dict_validation=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms,query_to_get_varied_information)
return transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_validation)
def get_data_to_train_and_test_to_use_in_the_model_to_predict_categories(path_to_sqlite3,folder):
query_to_get_varied_information="""
SELECT DISTINCT product_id,
category,
weight,
price,
express_delivery
FROM query_elo7;
"""
query_to_get_words_histograms_to_train="""
WITH vector_products AS (
SELECT query_elo7.product_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_product_id<15 AND query_elo7.hash_for_product_id NOT IN ({folder1},{folder2})
GROUP BY query_elo7.product_id,vector_element.position_in_vector
ORDER BY query_elo7.product_id,vector_element.position_in_vector
)
SELECT product_id,
position_in_vector,
value
FROM vector_products
WHERE value>0
""".format(folder1=folder*2,folder2=(folder*2)+1)
products_dict_train=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms_to_train,query_to_get_varied_information)
query_to_get_words_histograms_to_test="""
WITH vector_products AS (
SELECT query_elo7.product_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_product_id<15 AND query_elo7.hash_for_product_id IN ({folder1},{folder2})
GROUP BY query_elo7.product_id,vector_element.position_in_vector
ORDER BY query_elo7.product_id,vector_element.position_in_vector
)
SELECT product_id,
position_in_vector,
value
FROM vector_products
WHERE value>0
""".format(folder1=folder*2,folder2=(folder*2)+1)
products_dict_test=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms_to_test,query_to_get_varied_information)
return {
"train":transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_train),
"test":transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_test)
}
def define_better_model_to_predict_categories(path_to_sqlite,data_validation,dropColumnsFromDataTrainAndTest=[]):
bestModel=None
averageAcuracyForBestModel=None
for i in range(7):
data_train_test=get_data_to_train_and_test_to_use_in_the_model_to_predict_categories(path_to_sqlite,i)
if(len(dropColumnsFromDataTrainAndTest)>0):
data_train_test['train']['dataframeX']=data_train_test['train']['dataframeX'].drop(dropColumnsFromDataTrainAndTest, axis=1)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(data_train_test['train']['dataframeX'], data_train_test['train']['Y'])
ypred=clf.predict(data_validation['dataframeX'])
confusion_mat=confusion_matrix(data_validation['Y'], ypred, labels=pd.Series(list(ypred)+list(data_validation['Y'])).unique())
avg_acc=0.0
for i in range(len(confusion_mat)):
avg_acc=avg_acc+(float(confusion_mat[i,i])/float(sum((confusion_mat[i,:]))))
avg_acc=avg_acc/float(len(confusion_mat))
if(averageAcuracyForBestModel== None or avg_acc>averageAcuracyForBestModel):
averageAcuracyForBestModel=avg_acc
bestModel=clf
return {'bestModel':bestModel,'averageAcuracy':averageAcuracyForBestModel}
def create_dataFrame_with_confusion_matrix(real,pred, labels_arg=None):
if(labels_arg==None):
labels=list(pd.Series(list(real)+list(pred)).unique())
else:
labels=labels_arg
confusion_mat=confusion_matrix(real, pred, labels=labels)
df=pd.DataFrame(confusion_mat,columns=[['predito']*len(labels),labels], index=[['real']*len(labels),labels])
return df
#########columns_to_drop=[]
#########data_validation=get_data_to_validation_to_use_in_the_model_to_predict_categories(path_to_sqlite)
#########if(len(columns_to_drop)>0):
######### data_validation['dataframeX']=data_validation['dataframeX'].drop(columns_to_drop,axis=1)
#########return_from_defineBetterModel=define_better_model_to_predict_categories(path_to_sqlite,data_validation,dropColumnsFromDataTrainAndTest=columns_to_drop)
#########model=return_from_defineBetterModel['bestModel']
#########ypred=model.predict(data_validation['dataframeX'])
#########confusion_mat_0=create_dataFrame_with_confusion_matrix(data_validation['Y'],ypred)
#########print("Matriz de confusão para classificar produtos:")
#########display(confusion_mat_0)
#########print("Acurácia media: "+str(return_from_defineBetterModel['averageAcuracy']))
#%%[markdown]
# Um fato interessante é que as variávies price, weight e express_delivery parecem não contribuir para
# na construção modelo, podendo até mesmo atrapalhar na elaboração do mesmo. Tal afirmação pode ser
# confirmada nos resultados obtidos pelo modelo a seguir, onde estas variáveis de entrada foram retiradas da criação do mesmo:
#%%
#########columns_to_drop=['price', 'express_delivery','weight']
#########data_validation=get_data_to_validation_to_use_in_the_model_to_predict_categories(path_to_sqlite)
#########if(len(columns_to_drop)>0):
######### data_validation['dataframeX']=data_validation['dataframeX'].drop(columns_to_drop,axis=1)
#########return_from_defineBetterModel=define_better_model_to_predict_categories(path_to_sqlite,data_validation,dropColumnsFromDataTrainAndTest=columns_to_drop)
#########model=return_from_defineBetterModel['bestModel']
#########ypred=model.predict(data_validation['dataframeX'])
#########confusion_mat_1=create_dataFrame_with_confusion_matrix(data_validation['Y'],ypred)
#########print("Matriz de confusão para classificar produtos apenas com os histogramas:")
#########display(confusion_mat_1)
#########print("Acurácia media: "+str(return_from_defineBetterModel['averageAcuracy']))
#########model_predict_category_fp=open( "data/models/predict_category/mode.pickle", "wb" )
#########pickle.dump( model, model_predict_category_fp)
#########model_predict_category_fp.close()
#%%[markdown]
## Sistema de termos de busca
# Para criar este sistema foi primeiro pensado na ideia de criar grupos de preço e
# a partir destes grupos utilizar um algoritmo de aprendizado de maquina que
# encontre uma relação entre os histogramas de palavras das consultas
# com um determinado grupo de preço, descobrindo assim a intenção do usuário
# de comprar produtos de um terminado preço com base naquilo que digita em suas
# consultas.
# Para a criação dos grupo de preço foi utilizado o algoritmo K-médias. A seguir
# o código que divide os preços em grupos:
#%%
def create_discretization_model_from_k_means_for_product_price(path_to_sqlite3,n_clusters):
conn = sqlite3.connect(path_to_sqlite3)
sql="""
SELECT query_elo7_id, price, minimum_quantity
FROM query_elo7;
"""
df=pd.read_sql_query(sql,conn)
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
df['price_log']=df['price'].apply(np.log)
#df['price_log']=df['price']
cluster_labels = clusterer.fit_predict(df['price_log'].values.reshape(-1, 1) )
silhouette_avg = silhouette_score(df['price_log'].values.reshape(-1, 1), cluster_labels)
conn.close()
return {"model":clusterer,'score':silhouette_avg}
#%% [markdown]
# Para computar o número ideal de valores discretos que a variável price deve ter vai ser
# utilizado o metódo da silueta média, onde quanto menor melhor, a seguir os valores obtidos
# pelo metodo da silueta quando price e discretizado para 2,3,5,8,13 e 21 valores distintos.
#%%
#########for i in [2,3,5,8,13,21]:
######### score=create_discretization_model_from_k_means_for_product_price(path_to_sqlite,i)['score']
######### print('Discretizando price para {i} valores distintos o valor da silueta média é {resu}.'.format(i=i,resu=score))
#Use Kmeans with 13 clusters to discretize price==============================
def update_column_price_group_with_groups_found_in_kmeans_for_price(path_to_sqlite):
conn = sqlite3.connect(path_to_sqlite)
model=create_discretization_model_from_k_means_for_product_price(path_to_sqlite,8)['model']
df=pd.read_sql_query("SELECT query_elo7_id,price, minimum_quantity FROM query_elo7;",conn)
df['price_log']=df['price'].apply(np.log)
df['price_group']=df['price_log'].apply(lambda x: model.predict([[x]])[0])
cur=conn.cursor()
progress=0
for index, row in df.iterrows():
cur.execute("UPDATE query_elo7 SET price_group={group} WHERE query_elo7_id={query_elo7_id}".format(group=row['price_group'],query_elo7_id=row['query_elo7_id']))
progress=progress+1
conn.commit()
conn.close()
#update_column_price_group_with_groups_found_in_kmeans_for_price(path_to_sqlite)
#end===========================================================================
#%%[markdown]
#Como pode ser observado a divisão da variável price em oito grupos apresenta um bom resultado
#para o metódo da silueta. A seguir um dataframe com os valores mínimo e máximo que cada um dos 13
#grupos da variável price apresenta:
#########conn=sqlite3.connect(path_to_sqlite)
#########df=pd.read_sql_query("""SELECT price_group,
######### MIN(price),
######### MAX(price)
######### FROM query_elo7
######### GROUP BY price_group
######### ORDER BY MIN(price),MAX(price)
######### """,conn)
#########conn.close()
#########display(df)
#%%[markdown]
### Modelo para predição da intenção de preço do usuario por árvore de decisão
# A seguir um modelo que infere a intenção de preço do usuário,
# assim como o modelo que categoriza produtos ele foi criado via
#árvore de decisão e utilizando a metodologia K-fold:
#%%
def get_data_to_validation_to_use_in_the_model_to_predict_price(path_to_sqlite3):
query_to_get_words_histograms="""
WITH vector_query AS (
SELECT query_elo7.query_elo7_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_query_elo7_id>=15
GROUP BY query_elo7.query_elo7_id,vector_element.position_in_vector
ORDER BY query_elo7.query_elo7_id,vector_element.position_in_vector
)
SELECT query_elo7_id,
position_in_vector,
value
FROM vector_query
WHERE value>0
"""
query_to_get_varied_information="""
SELECT query_elo7_id,
price_group
FROM query_elo7;
"""
products_dict_validation=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms,query_to_get_varied_information)
return transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_validation, column_to_be_y='price_group')
def get_data_to_validation_to_use_in_the_model_to_regress_price(path_to_sqlite3):
query_to_get_words_histograms="""
WITH vector_query AS (
SELECT query_elo7.query_elo7_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_query_elo7_id>=15
GROUP BY query_elo7.query_elo7_id,vector_element.position_in_vector
ORDER BY query_elo7.query_elo7_id,vector_element.position_in_vector
)
SELECT query_elo7_id,
position_in_vector,
value
FROM vector_query
WHERE value>0
"""
query_to_get_varied_information="""
SELECT query_elo7_id,
price
FROM query_elo7;
"""
products_dict_validation=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms,query_to_get_varied_information)
return transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_validation, column_to_be_y='price')
def get_data_to_train_and_test_to_use_in_the_model_to_predict_prices(path_to_sqlite3,folder):
query_to_get_varied_information="""
SELECT query_elo7_id,
price_group
FROM query_elo7;
"""
query_to_get_words_histograms_to_train="""
WITH vector_query AS (
SELECT query_elo7.query_elo7_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_query_elo7_id<15 AND query_elo7.hash_for_query_elo7_id NOT IN ({folder1},{folder2})
GROUP BY query_elo7.query_elo7_id,vector_element.position_in_vector
ORDER BY query_elo7.query_elo7_id,vector_element.position_in_vector
)
SELECT query_elo7_id,
position_in_vector,
value
FROM vector_query
WHERE value>0
""".format(folder1=folder*2,folder2=(folder*2)+1)
products_dict_train=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms_to_train,query_to_get_varied_information)
query_to_get_words_histograms_to_test="""
WITH vector_query AS (
SELECT query_elo7.query_elo7_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_query_elo7_id<15 AND query_elo7.hash_for_query_elo7_id IN ({folder1},{folder2})
GROUP BY query_elo7.query_elo7_id,vector_element.position_in_vector
ORDER BY query_elo7.query_elo7_id,vector_element.position_in_vector
)
SELECT query_elo7_id,
position_in_vector,
value
FROM vector_query
WHERE value>0
""".format(folder1=folder*2,folder2=(folder*2)+1)
products_dict_test=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms_to_test,query_to_get_varied_information)
return {
"train":transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_train, column_to_be_y='price_group'),
"test":transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_test, column_to_be_y='price_group')
}
def get_data_to_train_and_test_to_use_in_the_model_to_regress_prices(path_to_sqlite3,folder):
query_to_get_varied_information="""
SELECT query_elo7_id,
price
FROM query_elo7;
"""
query_to_get_words_histograms_to_train="""
WITH vector_query AS (
SELECT query_elo7.query_elo7_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_query_elo7_id<15 AND query_elo7.hash_for_query_elo7_id NOT IN ({folder1},{folder2})
GROUP BY query_elo7.query_elo7_id,vector_element.position_in_vector
ORDER BY query_elo7.query_elo7_id,vector_element.position_in_vector
)
SELECT query_elo7_id,
position_in_vector,
value
FROM vector_query
WHERE value>0
""".format(folder1=folder*2,folder2=(folder*2)+1)
products_dict_train=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms_to_train,query_to_get_varied_information)
query_to_get_words_histograms_to_test="""
WITH vector_query AS (
SELECT query_elo7.query_elo7_id,
vector_element.position_in_vector,
SUM(vector_element.value) AS value
FROM vector_element
INNER JOIN query_elo7 ON query_elo7.query_elo7_id=vector_element.query_elo7_id
WHERE query_elo7.hash_for_query_elo7_id<15 AND query_elo7.hash_for_query_elo7_id IN ({folder1},{folder2})
GROUP BY query_elo7.query_elo7_id,vector_element.position_in_vector
ORDER BY query_elo7.query_elo7_id,vector_element.position_in_vector
)
SELECT query_elo7_id,
position_in_vector,
value
FROM vector_query
WHERE value>0
""".format(folder1=folder*2,folder2=(folder*2)+1)
products_dict_test=merge_query_to_get_words_histograms_and_query_to_get_varied_information_into_dictonaries(path_to_sqlite3,query_to_get_words_histograms_to_test,query_to_get_varied_information)
return {
"train":transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_train, column_to_be_y='price'),
"test":transform_products_load_as_dict_to_dataframeX_variable_to_fit_Y(products_dict_test, column_to_be_y='price')
}
def define_better_model_to_predict_price_from_text_in_query(path_to_sqlite,data_validation,dropColumnsFromDataTrainAndTest=[]):
bestModel=None
averageAcuracyForBestModel=None
for i in range(7):
data_train_test=get_data_to_train_and_test_to_use_in_the_model_to_predict_prices(path_to_sqlite,i)
if(len(dropColumnsFromDataTrainAndTest)>0):
data_train_test['train']['dataframeX']=data_train_test['train']['dataframeX'].drop(dropColumnsFromDataTrainAndTest, axis=1)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(data_train_test['train']['dataframeX'], data_train_test['train']['Y'])
ypred=clf.predict(data_validation['dataframeX'])
confusion_mat=confusion_matrix(data_validation['Y'], ypred, labels=pd.Series(list(ypred)+data_validation['Y']).unique())
avg_acc=0.0
for i in range(len(confusion_mat)):
try:
avg_acc=avg_acc+(float(confusion_mat[i,i])/float(sum((confusion_mat[i,:]))))
except:
pass
avg_acc=avg_acc/float(len(confusion_mat))
if(averageAcuracyForBestModel== None or avg_acc>averageAcuracyForBestModel):
averageAcuracyForBestModel=avg_acc
bestModel=clf
return {'bestModel':bestModel,'averageAcuracy':averageAcuracyForBestModel}
def create_a_list_with_range_of_values_from_list_with_price_group(path_to_sqlite,list_with_price_group):
conn=sqlite3.connect(path_to_sqlite)
df= | pd.read_sql_query("""SELECT price_group,
MIN(price),
MAX(price)
FROM query_elo7
GROUP BY price_group
ORDER BY MIN(price),MAX(price)
""",conn) | pandas.read_sql_query |
import sys
import argparse
import pandas as pd
import numpy as np
import pyfsdb
import dnssplitter
splitter = dnssplitter.DNSSplitter()
splitter.init_tree()
def get_psl(x):
noval = [np.NaN, np.NaN, np.NaN]
try:
ret = splitter.search_tree(x)
if not ret or len(ret) != 3:
return noval
return ret
except:
return noval
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", help="Input file",
type=argparse.FileType('r'), action="append", default=None)
parser.add_argument("-o", "--outfile", help="Output file", type=argparse.FileType('w'), default="-")
parser.add_argument("-s", "--sortkey", help="Sort by key", type=str, default=None)
parser.add_argument("-H", "--addheader", help="Add FSDB header", action="store_true")
parser.add_argument("-p", "--pslkey", help="Split PSL key", type=str, action="append", default=None)
parser.add_argument("-M", "--pslmerged", help="Make merged psl columns", action="store_true")
p = parser.parse_args(argv)
# create empty dataframe
df = pd.DataFrame()
# Process each file and add to dataframe
for fp in p.infile:
db = pyfsdb.Fsdb(file_handle=fp)
rows = [r for r in db]
data = np.array(rows)
dfp = pd.DataFrame(data=data, columns=db.column_names)
df = pd.concat([df,dfp], ignore_index=True)
cols = list(df.columns)
# Sort dataframe
if p.sortkey:
df.sort_values(by=[p.sortkey], inplace=True)
# Move sortkey to first col
if p.sortkey in cols:
cols.remove(p.sortkey)
cols.insert(0, p.sortkey)
df = df[cols]
# Add psl where needed
if p.pslkey:
pslcols = ["_pslpfx", "_psldom", "_pslpub"]
dfncons = pd.DataFrame(columns=pslcols)
for k in p.pslkey:
vals = df[k].apply(lambda x: get_psl(x))
dfn = vals.to_frame()
listvals = dfn[k].values.tolist()
dfn_split = | pd.DataFrame(listvals, index=dfn.index, columns=pslcols) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 18:21:32 2020
@author: dhbubu18
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import os
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['font.size'] = 16
climate = 'LA'
file1 = '{0}/BICBase.txt'.format(climate)
#file2 = 'BICBase_{0}_hLoad_lHours/BICBase_ts.csv'.format(climate)
file3 = '{0}/BICMid.txt'.format(climate)
#file4 = 'BICMid_{0}_hLoad_lHours/BICMid_ts.csv'.format(climate)
file5 = '{0}/Guideline36.txt'.format(climate)
file6 = '{0}/Guideline36_small.txt'.format(climate)
#def load_results(result_file1, result_file2, resample=False):
def load_results(result_file1, result_file2=None, resample=False):
result_folder = '/mnt/hgfs/VMShare18/202010'
result_path1 = os.path.join(result_folder,result_file1)
df_ts = pd.read_csv(result_path1, index_col='Time')
df_ts.index = pd.to_datetime(df_ts.index,unit='s')
df_ts = df_ts[~df_ts.index.duplicated(keep='first')]
if result_file2:
result_path2 = os.path.join(result_folder,result_file2)
df_2 = pd.read_csv(result_path2, index_col='Time')
df_2.index = pd.to_datetime(df_2.index,unit='s')
df_2 = df_2[~df_2.index.duplicated(keep='first')]
print(df_ts)
print(df_2)
df = | pd.concat([df_ts,df_2],axis=1) | pandas.concat |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import unittest.mock as mock
from datetime import datetime, timedelta
from unittest import TestCase
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.detectors.stat_sig_detector import (
MultiStatSigDetectorModel,
StatSigDetectorModel,
SeasonalityHandler,
)
from kats.utils.simulator import Simulator
from parameterized.parameterized import parameterized
from operator import attrgetter
_SERIALIZED = b'{"n_control": 20, "n_test": 7, "time_unit": "s"}'
_SERIALIZED2 = b'{"n_control": 20, "n_test": 7, "time_unit": "s", "rem_season": false, "seasonal_period": "weekly", "use_corrected_scores": true, "max_split_ts_length": 500}'
class TestStatSigDetector(TestCase):
def setUp(self) -> None:
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
previous_seq = [date_start + timedelta(days=x) for x in range(60)]
values = np.random.randn(len(previous_seq))
self.ts_init = TimeSeriesData(
pd.DataFrame({"time": previous_seq[0:30], "value": values[0:30]})
)
self.ts_later = TimeSeriesData(
pd.DataFrame({"time": previous_seq[30:35], "value": values[30:35]})
)
self.ss_detect = StatSigDetectorModel(n_control=20, n_test=7)
def test_detector(self) -> None:
np.random.seed(100)
pred_later = self.ss_detect.fit_predict(
historical_data=self.ts_init, data=self.ts_later
)
self.ss_detect.visualize()
# prediction returns scores of same length
self.assertEqual(len(pred_later.scores), len(self.ts_later))
def test_logging(self) -> None:
np.random.seed(100)
date_start_str = "2020-03-01"
date_start = datetime.strptime(date_start_str, "%Y-%m-%d")
num_seq = 3
previous_seq = [date_start + timedelta(days=x) for x in range(60)]
values = [np.random.randn(len(previous_seq)) for _ in range(num_seq)]
ts_init = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq[0:30]},
**{f"value_{i}": values[i][0:30] for i in range(num_seq)},
}
)
)
ts_later = TimeSeriesData(
pd.DataFrame(
{
**{"time": previous_seq[30:35]},
**{f"value_{i}": values[i][30:35] for i in range(num_seq)},
}
)
)
self.assertEqual(self.ss_detect.n_test, 7)
with self.assertRaises(ValueError):
self.ss_detect.fit_predict(historical_data=ts_init, data=ts_later)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
@parameterized.expand(
[
["n_control", 20],
["n_test", 7],
["time_unit", "s"],
["rem_season", False],
["seasonal_period", "weekly"],
]
)
def test_load_from_serialized(self, attribute:str, expected:object) -> None:
detector = StatSigDetectorModel(serialized_model=_SERIALIZED)
self.assertEqual(attrgetter(attribute)(detector), expected)
def test_serialize(self) -> None:
detector = StatSigDetectorModel(n_control=20, n_test=7, time_unit="s")
self.assertEqual(_SERIALIZED2, detector.serialize())
def test_missing_values(self) -> None:
with self.assertRaises(ValueError):
_ = StatSigDetectorModel()
def test_visualize_unpredicted(self) -> None:
detector = StatSigDetectorModel(n_control=20, n_test=7)
with self.assertRaises(ValueError):
detector.visualize()
def test_missing_time_unit(self) -> None:
detector = StatSigDetectorModel(n_control=20, n_test=7)
with mock.patch.object(detector, "_set_time_unit"):
with self.assertRaises(ValueError):
detector.fit_predict(data=self.ts_later, historical_data=self.ts_init)
def test_no_update(self) -> None:
detector = StatSigDetectorModel(n_control=20, n_test=7)
with mock.patch.object(detector, "_should_update") as su:
su.return_value = False
result = detector.fit_predict(
data=self.ts_later, historical_data=self.ts_init
)
self.assertEqual(detector.response, result)
def test_fallback_on_historical_time_unit(self) -> None:
data = TimeSeriesData(
pd.DataFrame(
{
"time": [
datetime(2021, 1, 1),
datetime(2021, 1, 2),
datetime(2021, 2, 1),
],
"values": [0, 1, 2],
}
)
)
detector = StatSigDetectorModel(n_control=20, n_test=7)
detector.fit_predict(data=data, historical_data=self.ts_init)
self.assertEqual("D", detector.time_unit)
def test_remove_season(self) -> None:
sim3 = Simulator(n=120, start="2018-01-01")
ts3 = sim3.level_shift_sim(
cp_arr=[60],
level_arr=[1.35, 1.05],
noise=0.05,
seasonal_period=7,
seasonal_magnitude=0.575,
)
n_control = 14 * 86400
n_test = 14 * 86400
ss_detect5 = StatSigDetectorModel(
n_control=n_control,
n_test=n_test,
time_unit="sec",
rem_season=True,
seasonal_period="biweekly",
)
anom3 = ss_detect5.fit_predict(data=ts3)
self.assertEqual(np.min(anom3.scores.value.values) < -5, True)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
@parameterized.expand(
[
["weekly", 0.1],
["daily"],
]
)
def test_season_handler(self, period:str, lpj_factor:float=0.1) -> None:
sim3 = Simulator(n=120, start="2018-01-01")
ts3 = sim3.level_shift_sim(
cp_arr=[60],
level_arr=[1.35, 1.05],
noise=0.05,
seasonal_period=7,
seasonal_magnitude=0.575,
)
with self.assertRaises(ValueError):
if period == "weekly":
SeasonalityHandler(data=ts3, seasonal_period=period, lpj_factor=lpj_factor)
else:
SeasonalityHandler(data=ts3, seasonal_period=period)
class TestStatSigDetectorPMM(TestCase):
def setUp(self) -> None:
random.seed(100)
time_unit = 86400
hist_data_time = [x * time_unit for x in range(0, 28)]
data_time = [x * time_unit for x in range(28, 35)]
hist_data_value = [random.normalvariate(100, 10) for _ in range(0, 28)]
data_value = [random.normalvariate(130, 10) for _ in range(28, 35)]
self.hist_ts = TimeSeriesData(
time=pd.Series(hist_data_time),
value=pd.Series(hist_data_value),
use_unix_time=True,
unix_time_units="s",
)
self.data_ts = TimeSeriesData(
time=pd.Series(data_time),
value=pd.Series(data_value),
use_unix_time=True,
unix_time_units="s",
)
# default
pmm_model = StatSigDetectorModel(n_control=20 * 86400, n_test=7 * 86400, time_unit="S")
self.pred_default = pmm_model.fit_predict(historical_data=self.hist_ts, data=self.data_ts)
# remove seasonality
pmm_no_seasonality_model = StatSigDetectorModel(
n_control=20 * 86400,
n_test=7 * 86400,
time_unit="S",
rem_season=True,
seasonal_period="weekly",
)
self.pred_no_seasonality = pmm_no_seasonality_model.fit_predict(historical_data=self.hist_ts, data=self.data_ts)
# no history
pmm_no_history_model = StatSigDetectorModel(
n_control=10 * 86400, n_test=10 * 86400, time_unit="S"
)
self.pred_no_history = pmm_no_history_model.fit_predict(data=self.hist_ts)
# no history, remove seasonality
pmm_no_history_no_seasonality_model = StatSigDetectorModel(
n_control=10 * 86400,
n_test=10 * 86400,
time_unit="S",
rem_season=True,
seasonal_period="weekly",
)
self.pred_no_history_no_seasonality = pmm_no_history_no_seasonality_model.fit_predict(data=self.hist_ts)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
@parameterized.expand(
[
["pred_default", "data_ts"],
["pred_no_seasonality", "data_ts"],
["pred_no_history", "hist_ts"],
["pred_no_history_no_seasonality", "hist_ts"],
]
)
def test_pmm_length(self, attr_pred: str, attr_actual: str) -> None:
self.assertEqual(len(attrgetter(attr_pred)(self).scores), len(attrgetter(attr_actual)(self)))
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
@parameterized.expand(
[
["pred_default"],
["pred_no_seasonality"],
]
)
def test_pmm_max(self, attr_pred: str) -> None:
self.assertTrue(attrgetter(attr_pred)(self).scores.value.values.max() > 2.0)
class TestStatSigDetectorBigData(TestCase):
def setUp(self) -> None:
n_control = 28
n_test = 7
random.seed(0)
control_time = pd.date_range(
start="2018-01-06", freq="D", periods=(n_control + n_test - 5)
)
test_time = | pd.date_range(start="2018-02-05", freq="D", periods=500) | pandas.date_range |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
import netCDF4 as nc
import datetime as dt
from salishsea_tools import evaltools as et, places, viz_tools, visualisations, geo_tools
import xarray as xr
import pandas as pd
import pickle
import os
import gsw
# Extracting winds from the correct path
def getWindVarsYear(year,loc):
''' Given a year, returns the correct directory and nam_fmt for wind forcing as well as the
location of S3 on the corresponding grid.
Parameters:
year: a year value in integer form
loc: the location name as a string. Eg. loc='S3'
Returns:
jW: y-coordinate for the location
iW: x-coordinate for the location
opsdir: path to directory where wind forcing file is stored
nam_fmt: naming convention of the appropriate files
'''
if year>2014:
opsdir='/results/forcing/atmospheric/GEM2.5/operational/'
nam_fmt='ops'
jW,iW=places.PLACES[loc]['GEM2.5 grid ji']
else:
opsdir='/data/eolson/results/MEOPAR/GEMLAM/'
nam_fmt='gemlam'
with xr.open_dataset('/results/forcing/atmospheric/GEM2.5/gemlam/gemlam_y2012m03d01.nc') as gridrefWind:
# always use a post-2011 file here to identify station grid location
lon,lat=places.PLACES[loc]['lon lat']
jW,iW=geo_tools.find_closest_model_point(lon,lat,
gridrefWind.variables['nav_lon'][:,:]-360,gridrefWind.variables['nav_lat'][:,:],
grid='GEM2.5')
# the -360 is needed because longitudes in this case are reported in postive degrees East
return jW,iW,opsdir,nam_fmt
# Metric 1:
def metric1_bloomtime(phyto_alld,no3_alld,bio_time):
''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time
and depth, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 1'):
'The spring bloom date is the peak phytoplankton concentration (averaged from the surface to
3 m depth) within four days of the average upper 3 m nitrate concentration going below 0.5 uM
(the half-saturation concentration) for two consecutive days'
EDIT: 0.5 uM was changed to 2.0 uM to yield more accurate results
Parameters:
phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time
range of 'bio_time'
no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as phyto_alld and no3_alld
Returns:
bloomtime1: the spring bloom date as a single datetime value
'''
# a) get avg phytplankton in upper 3m
phyto_alld_df=pd.DataFrame(phyto_alld)
upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_phyto.columns=['upper_3m_phyto']
#upper_3m_phyto
# b) get average no3 in upper 3m
no3_alld_df=pd.DataFrame(no3_alld)
upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_no3.columns=['upper_3m_no3']
#upper_3m_no3
# make bio_time into a dataframe
bio_time_df=pd.DataFrame(bio_time)
bio_time_df.columns=['bio_time']
metric1_df=pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1)
# c) Find first location where nitrate crosses below 0.5 micromolar and
# stays there for 2 days
# NOTE: changed the value to 2 micromolar
location1=np.nan
for i, row in metric1_df.iterrows():
try:
if metric1_df['upper_3m_no3'].iloc[i]<2 and metric1_df['upper_3m_no3'].iloc[i+1]<2:
location1=i
break
except IndexError:
location1=np.nan
print('bloom not found')
# d) Find date with maximum phytoplankton concentration within four days (say 9 day window) of date in c)
if np.isnan(location1):
bloomrange=np.nan
bloomtime1=np.nan
else:
bloomrange=metric1_df[location1-4:location1+5]
bloomtime1=bloomrange.loc[bloomrange.upper_3m_phyto.idxmax(), 'bio_time']
return bloomtime1
# Metric 2:
def metric2_bloomtime(phyto_alld,no3_alld,bio_time):
''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time
and depth, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 2'):
'The first peak in which chlorophyll concentrations in upper 3m are above 5 ug/L for more than two days'
Parameters:
phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time
range of 'bio_time'
no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as sphyto and sno3
Returns:
bloomtime2: the spring bloom date as a single datetime value
'''
# a) get avg phytplankton in upper 3m
phyto_alld_df=pd.DataFrame(phyto_alld)
upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_phyto.columns=['sphyto']
#upper_3m_phyto
# b) get average no3 in upper 3m
no3_alld_df=pd.DataFrame(no3_alld)
upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_no3.columns=['sno3']
#upper_3m_no3
# make bio_time into a dataframe
bio_time_df=pd.DataFrame(bio_time)
bio_time_df.columns=['bio_time']
df=pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1)
# to find all the peaks:
df['phytopeaks'] = df.sphyto[(df.sphyto.shift(1) < df.sphyto) & (df.sphyto.shift(-1) < df.sphyto)]
# need to covert the value of interest from ug/L to uM N (conversion factor: 1.8 ug Chl per umol N)
chlvalue=5/1.8
# extract the bloom time date
for i, row in df.iterrows():
try:
if df['sphyto'].iloc[i-1]>chlvalue and df['sphyto'].iloc[i-2]>chlvalue and pd.notna(df['phytopeaks'].iloc[i]):
bloomtime2=df.bio_time[i]
break
elif df['sphyto'].iloc[i+1]>chlvalue and df['sphyto'].iloc[i+2]>chlvalue and pd.notna(df['phytopeaks'].iloc[i]):
bloomtime2=df.bio_time[i]
break
except IndexError:
bloomtime2=np.nan
print('bloom not found')
return bloomtime2
# Metric 3:
def metric3_bloomtime(sphyto,sno3,bio_time):
''' Given datetime array and two 1D arrays of surface phytplankton and nitrate concentrations
over time, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 3'):
'The median + 5% of the annual Chl concentration is deemed “threshold value” for each year.
For a given year, bloom initiation is determined to be the week that first reaches the
threshold value (by looking at weekly averages) as long as one of the two following weeks
was >70% of the threshold value'
Parameters:
sphyto: 1D array of phytoplankton concentrations (in uM N) over time
range of 'bio_time'
sno3: 1D array of nitrate concentrations (in uM N) over time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as sphyto and sno3
Returns:
bloomtime3: the spring bloom date as a single datetime value
'''
# 1) determine threshold value
df = pd.DataFrame({'bio_time':bio_time, 'sphyto':sphyto, 'sno3':sno3})
# a) find median chl value of that year, add 5% (this is only feb-june, should we do the whole year?)
threshold=df['sphyto'].median()*1.05
# b) secondthresh = find 70% of threshold value
secondthresh=threshold*0.7
# 2) Take the average of each week and make a dataframe with start date of week and weekly average
weeklychl = pd.DataFrame(df.resample('W', on='bio_time').sphyto.mean())
weeklychl.reset_index(inplace=True)
# 3) Loop through the weeks and find the first week that reaches the threshold.
# Is one of the two week values after this week > secondthresh?
for i, row in weeklychl.iterrows():
try:
if weeklychl['sphyto'].iloc[i]>threshold and weeklychl['sphyto'].iloc[i+1]>secondthresh:
bloomtime3=weeklychl.bio_time[i]
break
elif weeklychl['sphyto'].iloc[i]>threshold and weeklychl['sphyto'].iloc[i+2]>secondthresh:
bloomtime3=weeklychl.bio_time[i]
break
except IndexError:
bloomtime2=np.nan
print('bloom not found')
return bloomtime3
# Surface monthly average calculation given 2D array with depth and time:
def D2_3monthly_avg(time,x):
''' Given datetime array of 3 months and a 2D array of variable x, over time
and depth, returns an array containing the 3 monthly averages of the
surface values of variable x
Parameters:
time: datetime array of each day starting from the 1st day
of the first month, ending on the last day of the third month
x: 2-dimensional numpy array containing daily averages of the
same length and time frame as 'time', and depth profile
Returns:
jan_x, feb_x, mar_x: monthly averages of variable x at surface
'''
depthx= | pd.DataFrame(x) | pandas.DataFrame |
from datetime import datetime
import os
import re
import numpy as np
import pandas as pd
from fetcher.extras.common import MaRawData, zipContextManager
from fetcher.utils import Fields, extract_arcgis_attributes
NULL_DATE = datetime(2020, 1, 1)
DATE = Fields.DATE.name
TS = Fields.TIMESTAMP.name
DATE_USED = Fields.DATE_USED.name
def add_query_constants(df, query):
for k, v in query.constants.items():
df[k] = v
return df
def build_leveled_mapping(mapping):
tab_mapping = {x.split(":")[0]: {} for x in mapping.keys() if x.find(':') > 0}
for k, v in mapping.items():
if k.find(':') < 0:
continue
tab, field = k.split(":")
tab_mapping[tab][field] = v
return tab_mapping
def prep_df(values, mapping):
df = pd.DataFrame(values).rename(columns=mapping).set_index(DATE)
for c in df.columns:
if c.find('status') >= 0:
continue
# convert to numeric
df[c] = pd.to_numeric(df[c])
df.index = pd.to_datetime(df.index, errors='coerce')
return df
def make_cumsum_df(data, timestamp_field=Fields.TIMESTAMP.name):
df = pd.DataFrame(data)
df.set_index(timestamp_field, inplace=True)
df.sort_index(inplace=True)
df = df.select_dtypes(exclude=['string', 'object'])
# .groupby(level=0).last() # can do it here, but not mandatory
cumsum_df = df.cumsum()
cumsum_df[Fields.TIMESTAMP.name] = cumsum_df.index
return cumsum_df
def handle_ak(res, mapping):
tests = res[0]
collected = [x['attributes'] for x in tests['features']]
df = pd.DataFrame(collected)
df = df.pivot(columns='Test_Result', index='Date_Collected')
df.columns = df.columns.droplevel()
df['tests_total'] = df.sum(axis=1)
df = df.rename(columns=mapping).cumsum()
df[TS] = df.index
df[DATE_USED] = 'Specimen Collection'
tagged = df.to_dict(orient='records')
return tagged
def handle_ar(res, mapping):
# simply a cumsum table
data = extract_arcgis_attributes(res[0], mapping)
cumsum_df = make_cumsum_df(data)
return cumsum_df.to_dict(orient='records')
def handle_ct(res, mapping, queries):
tests = res[0]
df = pd.DataFrame(tests).rename(columns=mapping).set_index(DATE)
for c in df.columns:
# convert to numeric
df[c] = pd.to_numeric(df[c])
df.index = df.index.fillna(NULL_DATE.strftime(mapping.get('__strptime')))
df = df.sort_index().cumsum()
df[TS] = pd.to_datetime(df.index)
df[TS] = df[TS].values.astype(np.int64) // 10 ** 9
add_query_constants(df, queries[0])
tagged = df.to_dict(orient='records')
# by report
df = res[1].rename(columns=mapping).sort_values('DATE')
add_query_constants(df, queries[1])
df[TS] = df['DATE']
tagged.extend(df.to_dict(orient='records'))
# death + cases
for i, df in enumerate(res[2:]):
df = res[2+i].rename(columns=mapping).set_index('DATE').sort_index().cumsum()
add_query_constants(df, queries[2+i])
df[TS] = df.index
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_dc(res, mapping, queries):
df = res[0]
# make it pretty
df = df[df['Unnamed: 0'] == 'Testing'].T
df.columns = df.loc['Unnamed: 1']
df = df.iloc[2:]
df.index = | pd.to_datetime(df.index, errors='coerce') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Importing modules
import os
import sys
import numpy as np
import pandas as pd
import tqdm
import gc
import csv
import argparse
import scipy
import scipy.stats
# This function is to read the file and to get the DataFrame with unweighted counts
# Input:
# - input_file_path: Path to the file with reads and their corresponding transcripts (total_summary.txt file)
# Output: DataFrame with unweighted counts without 'Total' and 'N/A' rows and columns
def FileRead(input_file_path):
# Reading the file to DataFrame
to_return = pd.read_csv(input_file_path, sep="\t", header=0)
return to_return
# This function is used to normalize the counts using TMM
# Input:
# - input_downweighted_df: DataFrame with downweighted counts
# Output: DataFrame with normalized read counts
def TMMNormalization(input_downweighted_df):
# Adding small number to prevent doing log(0)
final_df = input_downweighted_df.copy()
# Output DataFrames
first_df = pd.DataFrame()
geom_mean_df = pd.DataFrame()
# Iterate on the sample names list to obtain a DataFrame of downweighted read counts divided by geometric mean of a transcript
for i in range(final_df.shape[0]):
# Obtain list of counts for a particular transcript
transcript_counts = list(final_df.iloc[i,2:])
# Obtain geometric mean
transcript_geom_mean = scipy.stats.mstats.gmean(transcript_counts)
# If the geometric mean of the transcript is equal 0, omit the transcript
if transcript_geom_mean == 0:
del transcript_counts, transcript_geom_mean
continue
# Creating output DataFrames
transcript_geom_mean_df = pd.DataFrame({0: [final_df.iloc[i,0]], 1: [final_df.iloc[i,1]] , 2: [transcript_geom_mean]})
geom_mean_df = geom_mean_df.append(transcript_geom_mean_df)
del transcript_geom_mean_df
# Obtain DataFrame with downweighted read counts multiplied by geometric mean of a transcript
temp_df = pd.DataFrame([final_df.iloc[i,:]])
temp_df.iloc[:,2:] = temp_df.iloc[:,2:]/transcript_geom_mean
# Append the row to the final DataFrame
first_df = first_df.append(temp_df)
del transcript_counts, transcript_geom_mean, temp_df
del i
geom_mean_df.columns = ['#Transcript_splicing_pattern', 'Transcript_ID', 'geometric_mean']
# Read sample names from column names of DataFrame
iteration_list = list(final_df.columns)[2:]
# Iterate on the DataFrame sample name columns with normalization factors and normalize the read counts by multiplification of each read count by median of normalization factor per sample
sample_factor_df = | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
import unittest
import numpy as np
import pandas as pd
from clustermatch.utils.data import merge_sources
from .utils import get_data_file
class ReadTomateTest(unittest.TestCase):
def test_merge_sources_using_ps(self):
## Preparar
data_file = get_data_file('ps_2011_2012.csv')
## Correr
ps_pro = merge_sources(data_file)[0]
## Validar
assert ps_pro is not None
assert hasattr(ps_pro, 'shape')
assert ps_pro.shape[0] == 10
assert ps_pro.shape[1] == 13
assert ps_pro.notnull().all().all()
# arriba izquierda
assert ps_pro.round(3).loc['Arom-1', '552'] == 0.000
assert ps_pro.round(3).loc['Arom-1', '553'] == 0.000
assert ps_pro.round(3).loc['Arom-5', '552'] == 0.533
# arriba derecha
assert ps_pro.round(3).loc['Arom-1', 'Bigua'] == 0.111
assert ps_pro.round(3).loc['Arom-1', 'Elpida'] == 0.037
assert ps_pro.round(3).loc['Arom-5', 'Elpida'] == 0.296
# abajo derecha
assert ps_pro.round(3).loc['Jug-4', 'Bigua'] == 0.172
assert ps_pro.round(3).loc['Jug-4', 'Elpida'] == 0.586
assert ps_pro.round(3).loc['Jug-1', 'Elpida'] == 0.000
# abajo izquierda
assert ps_pro.round(3).loc['Jug-4', '553'] == 0.158
assert ps_pro.round(3).loc['Jug-4', '552'] == 0.533
assert ps_pro.round(3).loc['Jug-1', '552'] == 0.000
def test_merge_sources_using_vo(self):
## Preparar
data_file = get_data_file('vo_2011_2012.csv')
## Correr
vo_pro = merge_sources(data_file)[0]
## Validar
assert vo_pro is not None
assert hasattr(vo_pro, 'shape')
assert vo_pro.shape[0] == 42
assert vo_pro.shape[1] == 11
assert vo_pro.notnull().all().all()
# arriba izquierda
assert vo_pro.round(3).loc['UNK 43', '552'] == 5.12
assert vo_pro.round(3).loc['UNK 43', '553'] == 4.77
assert vo_pro.round(3).loc['3mBUTANAL', '552'] == 0.000
# arriba derecha
assert vo_pro.round(3).loc['UNK 43', 'Bigua'] == 2.43
assert vo_pro.round(3).loc['UNK 43', 'Elpida'] == 3.40
assert vo_pro.round(3).loc['3mBUTANAL', 'Elpida'] == 1.34
# abajo derecha
assert vo_pro.round(3).loc['TRANS2HEXENAL', 'Bigua'] == 0.00
assert vo_pro.round(3).loc['TRANS2HEXENAL', 'Elpida'] == 7.11
assert vo_pro.round(3).loc['CIS2HEXENAL', 'Elpida'] == 0.00
# abajo izquierda
assert vo_pro.round(3).loc['TRANS2HEXENAL', '553'] == 6.90
assert vo_pro.round(3).loc['TRANS2HEXENAL', '552'] == 5.40
assert vo_pro.round(3).loc['CIS2HEXENAL', '552'] == 0.000
def test_merge_sources_using_me_with_rep_merge_mean(self):
## Preparar
data_file = get_data_file('me_2011_2012.csv')
## Correr
me_pro = merge_sources(data_file, rep_merge=np.mean)[0]
## Validar
assert me_pro is not None
assert hasattr(me_pro, 'shape')
assert me_pro.shape[0] == 89
assert me_pro.shape[1] == 44
# chequear todos los valores nulos
assert pd.isnull(me_pro.loc['NA_2106.37', '3806'])
assert pd.isnull(me_pro.loc['NA_1608.87', '3815'])
assert pd.isnull(me_pro.loc['NA_2106.37', '4748'])
assert pd.isnull(me_pro.loc['Glucoheptonic acid-1.4-lactone', '4748'])
assert pd.isnull(me_pro.loc['NA_2106.37', '560'])
assert pd.isnull(me_pro.loc['Glucoheptonic acid-1.4-lactone', '560'])
# arriba izquierda
assert me_pro.round(3).loc['serine', '549'] == 19.905
assert me_pro.round(3).loc['serine', '551'] == 13.735
# arriba derecha
assert me_pro.round(3).loc['serine', '4751'] == 38.439
assert me_pro.round(3).loc['Ethanolamine', '4751'] == 1.619
# abajo izquierda
assert me_pro.round(3).loc['Sucrose', '549'] == 171.211
assert me_pro.round(3).loc['NA_2627.66', '549'] == 3.853
# abajo derecha
assert me_pro.round(3).loc['NA_2627.66', '4751'] == 5.018
assert me_pro.round(3).loc['NA_2627.66', '4750'] == 13.353
def test_merge_sources_using_ag(self):
## Preparar
data_file = get_data_file('ag_2011_2012.csv')
## Correr
ag_pro = merge_sources(data_file)[0]
## Validar
assert ag_pro is not None
assert hasattr(ag_pro, 'shape')
assert ag_pro.shape[0] == 16
assert ag_pro.shape[1] == 19
# chequear todos los valores nulos
# assert pd.isnull(ag_pro.loc['perim', '549'])
# arriba izquierda
assert ag_pro.round(3).loc['peso', '549'] == 287.247
assert ag_pro.round(3).loc['peso', '550'] == 189.247
assert ag_pro.round(3).loc['perim', '549'] == 280.336
# arriba derecha
assert ag_pro.round(3).loc['peso', '572'] == 10.31
assert ag_pro.round(3).loc['firmeza', '572'] == 1.383
# abajo izquierda
assert ag_pro.round(3).loc['a_cielab', '549'] == 44.870
assert ag_pro.round(3).loc['b_cielab', '549'] == 61.691
# abajo derecha
assert ag_pro.round(3).loc['b_cielab', '572'] == 57.386
assert ag_pro.round(3).loc['b_cielab', '571'] == 61.842
# Casos especiales
# todos ceros
assert ag_pro.round(3).loc['area_indent', '572'] == 0.000
# valores cercanos a cero
assert ag_pro.round(3).loc['area_indent', '571'] == 0.038
def test_merge_sources_using_ap(self):
## Preparar
data_file = get_data_file('ap_2011_2012.csv')
## Correr
ap_pro = merge_sources(data_file)[0]
## Validar
assert ap_pro is not None
assert hasattr(ap_pro, 'shape')
assert ap_pro.shape[0] == 7
assert ap_pro.shape[1] == 42
# chequear todos los valores nulos
# assert pd.isnull(ag_pro.loc['perim', '549'])
# arriba izquierda
assert ap_pro.round(3).loc['Peso', '549'] == 0.532
assert ap_pro.round(3).loc['Peso', '550'] == 0.620
# arriba derecha
assert ap_pro.round(3).loc['Peso', 'elpida'] == 0.540
assert ap_pro.round(3).loc['TEAC HID (meq. Trolox %)', 'elpida'] == 0.351
# abajo izquierda
assert ap_pro.round(3).loc['carotenos (mg%)', '549'] == 0.260
assert ap_pro.round(3).loc['LICOP (mg%)', '549'] == 3.969
# abajo derecha
assert ap_pro.round(3).loc['carotenos (mg%)', 'elpida'] == 0.511
assert ap_pro.round(3).loc['carotenos (mg%)', 'bigua'] == 0.319
# Casos especiales
# un nan en el medio
assert ap_pro.round(3).loc['TEAC LIP (meq. Trolox %)', '558'] == 0.029
def test_merge_sources_index_name(self):
## Preparar
data_file = get_data_file('ap_2011_2012.csv')
## Correr
ap_pro = merge_sources(data_file)[0]
## Validar
assert ap_pro is not None
assert hasattr(ap_pro, 'index')
assert ap_pro.index.name == 'features'
def test_merge_source_returning_names_using_ag(self):
## Preparar
data_file = get_data_file('ag_2011_2012.csv')
## Correr
ag_pro, ag_nom, _ = merge_sources(data_file)
## Validar
assert ag_pro is not None
assert ag_nom is not None
assert len(ag_nom) == 16
assert ag_nom[0] == 'peso'
assert ag_nom[1] == 'firmeza'
assert ag_nom[7] == 'area_indent'
assert ag_nom[14] == 'a_cielab'
assert ag_nom[15] == 'b_cielab'
def test_merge_source_returning_names_using_ap(self):
## Preparar
data_file = get_data_file('ap_2011_2012.csv')
## Correr
ap_pro, ap_nom, _ = merge_sources(data_file)
## Validar
assert ap_pro is not None
assert ap_nom is not None
assert len(ap_nom) == 7
assert ap_nom[0] == 'Peso'
assert ap_nom[1] == 'TEAC HID (meq. Trolox %)'
assert ap_nom[2] == 'TEAC LIP (meq. Trolox %)'
assert ap_nom[3] == 'FRAP (meq. Trolox %)'
assert ap_nom[4] == 'FOLIN (mg Ac Galico/100g)'
assert ap_nom[5] == 'LICOP (mg%)'
assert ap_nom[6] == 'carotenos (mg%)'
def test_merge_source_returning_names_using_ap_ps(self):
## Preparar
data_files = [get_data_file('ap_2011_2012.csv'),
get_data_file('ps_2011_2012.csv')]
## Correr
pro, nom, _ = merge_sources(data_files)
## Validar
assert pro is not None
assert nom is not None
assert len(nom) == 7 + 10
ap_var_names = ['Peso', 'TEAC HID (meq. Trolox %)', 'TEAC LIP (meq. Trolox %)',
'FRAP (meq. Trolox %)', 'FOLIN (mg Ac Galico/100g)', 'LICOP (mg%)',
'carotenos (mg%)']
if not (ap_var_names == nom[:7] or ap_var_names == nom[-7:]):
self.fail('ap variables not found')
ps_var_names = ['Arom-1', 'Arom-5', 'Sab-1', 'Sab-5', 'Dulz-1', 'Dulz-5', 'Acid-1',
'Acid-5', 'Jug-1', 'Jug-4']
if not (ps_var_names == nom[:10] or ps_var_names == nom[-10:]):
self.fail('ap variables not found')
def test_merge_source_returning_sources_using_ap_ps(self):
## Preparar
data_files = [get_data_file('ap_2011_2012.csv'),
get_data_file('ps_2011_2012.csv')]
## Correr
pro, nom, sources = merge_sources(data_files)
## Validar
assert pro is not None
assert nom is not None
assert sources is not None
assert len(sources) == 7 + 10
assert len(set(sources)) == 2 # unique source names
assert 'ps_2011_2012' in sources
assert 'ap_2011_2012' in sources
if sources[0] == 'ps_2011_2012':
assert len(set(sources[:10])) == 1
assert 'ps_2011_2012' in set(sources[:10])
assert len(set(sources[-7:])) == 1
assert 'ap_2011_2012' in set(sources[-7:])
else:
assert len(set(sources[:7])) == 1
assert 'ap_2011_2012' in set(sources[:7])
assert len(set(sources[-10:])) == 1
assert 'ps_2011_2012' in set(sources[-10:])
def test_merge_sources_multiple_using_ps_vo(self):
## Preparar
ps_data_file = get_data_file('ps_2011_2012.csv')
vo_data_file = get_data_file('vo_2011_2012.csv')
fuentes = [ps_data_file, vo_data_file]
## Correr
procesado, nombres, _ = merge_sources(fuentes)
## Validar
assert procesado is not None
assert hasattr(procesado, 'shape')
assert procesado.shape[0] == 10 + 42
assert procesado.shape[1] == 13 # columnas totales, se cuenta una sola vez las compartidas
# ps
assert procesado.round(3).loc['Arom-1', '552'] == 0.00
assert procesado.round(3).loc['Arom-1', '3837'] == 0.00
assert procesado.round(3).loc['Arom-1', '4735'] == 0.063
assert procesado.round(3).loc['Arom-1', '1589'] == 0.231
assert procesado.round(3).loc['Arom-1', 'Bigua'] == 0.111
assert procesado.round(3).loc['Arom-1', 'Elpida'] == 0.037
assert procesado.round(3).loc['Jug-4', '552'] == 0.533 # abajo izquierda
assert procesado.round(3).loc['Jug-4', 'Elpida'] == 0.586 # abajo derecha
# vo
assert procesado.round(3).loc['UNK 43', '552'] == 5.12
assert procesado.round(3).loc['UNK 43', '3837'] == 3.98
assert pd.isnull(procesado.round(3).loc['UNK 43', '4735'])
assert pd.isnull(procesado.round(3).loc['UNK 43', '1589'])
assert procesado.round(3).loc['UNK 43', 'Bigua'] == 2.430
assert procesado.round(3).loc['UNK 43', 'Elpida'] == 3.400
assert procesado.round(3).loc['TRANS2HEXENAL', '552'] == 5.400 # abajo izquierda
assert procesado.round(3).loc['TRANS2HEXENAL', 'Elpida'] == 7.110 # abajo derecha
def test_merge_sources_multiple_using_me_ag(self):
## Preparar
me_data_file = get_data_file('me_2011_2012.csv')
ag_data_file = get_data_file('ag_2011_2012.csv')
fuentes = [me_data_file, ag_data_file]
## Correr
procesado, nombres, _ = merge_sources(fuentes)
## Validar
assert procesado is not None
assert hasattr(procesado, 'shape')
assert procesado.shape[0] == 89 + 16
assert procesado.shape[1] == 47 # columnas totales, se cuenta una sola vez las compartidas
# me
## valores nulos
assert pd.isnull(procesado.loc['NA_2106.37', '3806'])
assert pd.isnull(procesado.loc['NA_1608.87', '3815'])
assert pd.isnull(procesado.loc['NA_2106.37', '4748'])
assert pd.isnull(procesado.loc['Glucoheptonic acid-1.4-lactone', '4748'])
assert pd.isnull(procesado.loc['NA_2106.37', '560'])
assert | pd.isnull(procesado.loc['Glucoheptonic acid-1.4-lactone', '560']) | pandas.isnull |
'''Assignment 4 - Understanding and Predicting Property Maintenance Fines
This assignment is based on a data challenge from the Michigan Data Science Team (MDST).
The Michigan Data Science Team (MDST) and the Michigan Student Symposium for Interdisciplinary Statistical
Sciences (MSSISS) have partnered with the City of Detroit to help solve one of the most pressing problems
facing Detroit - blight. Blight violations are issued by the city to individuals who allow their properties
to remain in a deteriorated condition. Every year, the city of Detroit issues millions of dollars in fines
to residents and every year, many of these fines remain unpaid. Enforcing unpaid blight fines is a costly
and tedious process, so the city wants to know: how can we increase blight ticket compliance?
The first step in answering this question is understanding when and why a resident might fail to comply
with a blight ticket. This is where predictive modeling comes in. For this assignment, your task is to
predict whether a given blight ticket will be paid on time.
All data for this assignment has been provided to us through the Detroit Open Data Portal. Only the data
already included in your Coursera directory can be used for training the model for this assignment.
Nonetheless, we encourage you to look into data from other Detroit datasets to help inform feature creation
and model selection. We recommend taking a look at the following related datasets:
Building Permits
Trades Permits
Improve Detroit: Submitted Issues
DPD: Citizen Complaints
Parcel Map
We provide you with two data files for use in training and validating your models: train.csv and test.csv.
Each row in these two files corresponds to a single blight ticket, and includes information about when, why,
and to whom each ticket was issued. The target variable is compliance, which is True if the ticket was paid
early, on time, or within one month of the hearing data, False if the ticket was paid after the hearing date or not at all, and Null if the violator was found not responsible. Compliance, as well as a handful of other variables that will not be available at test-time, are only included in train.csv.
Note: All tickets where the violators were found not responsible are not considered during evaluation.
They are included in the training set as an additional source of data for visualization, and to enable
unsupervised and semi-supervised approaches. However, they are not included in the test set.
File descriptions (Use only this data for training your model!)
readonly/train.csv - the training set (all tickets issued 2004-2011)
readonly/test.csv - the test set (all tickets issued 2012-2016)
readonly/addresses.csv & readonly/latlons.csv - mapping from ticket id to addresses, and from addresses to
lat/lon coordinates.
Note: misspelled addresses may be incorrectly geolocated.
Data fields
train.csv & test.csv
ticket_id - unique identifier for tickets
agency_name - Agency that issued the ticket
inspector_name - Name of inspector that issued the ticket
violator_name - Name of the person/organization that the ticket was issued to
violation_street_number, violation_street_name, violation_zip_code - Address where the violation occurred
mailing_address_str_number, mailing_address_str_name, city, state, zip_code, non_us_str_code, country - Mailing address of the violator
ticket_issued_date - Date and time the ticket was issued
hearing_date - Date and time the violator's hearing was scheduled
violation_code, violation_description - Type of violation
disposition - Judgment and judgement type
fine_amount - Violation fine amount, excluding fees
admin_fee - $20 fee assigned to responsible judgments
state_fee - $10 fee assigned to responsible judgments
late_fee - 10% fee assigned to responsible judgments
discount_amount - discount applied, if any
clean_up_cost - DPW clean-up or graffiti removal cost
judgment_amount - Sum of all fines and fees
grafitti_status - Flag for graffiti violations
train.csv only
payment_amount - Amount paid, if any
payment_date - Date payment was made, if it was received
payment_status - Current payment status as of Feb 1 2017
balance_due - Fines and fees still owed
collection_status - Flag for payments in collections
compliance [target variable for prediction]
Null = Not responsible
0 = Responsible, non-compliant
1 = Responsible, compliant
compliance_detail - More information on why each ticket was marked compliant or non-compliant
Evaluation
Your predictions will be given as the probability that the corresponding blight ticket will be paid on time.
The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
Your grade will be based on the AUC score computed for your classifier. A model which with an AUROC of 0.7
passes this assignment, over 0.75 will recieve full points.
For this assignment, create a function that trains a model to predict blight ticket compliance in Detroit
using readonly/train.csv. Using this model, return a series of length 61001 with the data being the
probability that each corresponding ticket from readonly/test.csv will be paid, and the index being the
ticket_id.
Example:
ticket_id
284932 0.531842
285362 0.401958
285361 0.105928
285338 0.018572
...
376499 0.208567
376500 0.818759
369851 0.018528
Name: compliance, dtype: float32
Hints
Make sure your code is working before submitting it to the autograder.
Print out your result to see whether there is anything weird (e.g., all probabilities are the same).
Generally the total runtime should be less than 10 mins. You should NOT use Neural Network related
classifiers (e.g., MLPClassifier) in this question.
Try to avoid global variables. If you have other functions besides blight_model, you should move those
functions inside the scope of blight_model.
Refer to the pinned threads in Week 4's discussion forum when there is something you could not figure it
out.'''
#---------- ANSWER CODE ----------
import pandas as pd
import numpy as np
def blight_model():
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.metrics import roc_auc_score
train = pd.read_csv('train.csv',encoding='latin1',engine='python')
train.set_index('ticket_id',inplace=True)
test = pd.read_csv('test.csv')
test.set_index('ticket_id',inplace=True)
#Cleaning Data
train = train[train.compliance.notnull()]
train = train.loc[train.city.str.lower() == 'detroit']
y_train = train.compliance
train = train.loc[:,train.columns.isin(test.columns)]
drop_columns = [
'admin_fee','state_fee','late_fee','clean_up_cost',
'mailing_address_str_number','mailing_address_str_name',
'grafitti_status','non_us_str_code', 'inspector_name',
'violation_zip_code','violation_street_name','violation_description',
'violation_code','violation_street_number','violator_name',
'country','city','zip_code','state',
'ticket_issued_date', 'hearing_date',
]
train.drop(drop_columns, axis=1, inplace=True)
test.drop(drop_columns, axis=1, inplace=True)
X_train = | pd.get_dummies(train) | pandas.get_dummies |
'''
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
ABOUT:
This handles how the static and live figures are plotted in the DataAnalysis
and Telemetry pages
'''
import threading
import lib.app_settings as settings
import pandas as pd
import numpy as np
from matplotlib.figure import Figure
from matplotlib import style
import time
import lib.app_settings as settings
live_plot = Figure(figsize=(5,10), dpi=100)
live_plot_subplot1 = live_plot.add_subplot(221)
live_plot_subplot2 = live_plot.add_subplot(222)
live_plot_subplot3 = live_plot.add_subplot(223)
live_plot_subplot4 = live_plot.add_subplot(224)
live_table = Figure(figsize=(5,2), dpi=100)
live_table_subplot = live_table.add_subplot(111)
static_plot = Figure(figsize=(5,10), dpi=100)
static_plot_subplot1 = static_plot.add_subplot(221)
static_plot_subplot2 = static_plot.add_subplot(222)
static_plot_subplot3 = static_plot.add_subplot(223)
static_plot_subplot4 = static_plot.add_subplot(224)
static_table = Figure(figsize=(5,2), dpi=100)
static_table_subplot = static_table.add_subplot(111)
def plotting_init():
'''Initialize and configure subplots'''
style.use("ggplot")
global live_plot, live_plot_subplot1, live_plot_subplot2, live_plot_subplot3, live_plot_subplot4
global live_table, live_table_subplot
global static_plot, static_plot_subplot1, static_plot_subplot2, static_plot_subplot3, static_plot_subplot4
global static_table, static_table_subplot
live_plot = Figure(figsize=(5,10), dpi=100)
live_plot_subplot1 = live_plot.add_subplot(221)
live_plot_subplot2 = live_plot.add_subplot(222)
live_plot_subplot3 = live_plot.add_subplot(223)
live_plot_subplot4 = live_plot.add_subplot(224)
live_table = Figure(figsize=(5,2), dpi=100)
live_table_subplot = live_table.add_subplot(111)
static_plot = Figure(figsize=(5,10), dpi=100)
static_plot_subplot1 = static_plot.add_subplot(221)
static_plot_subplot2 = static_plot.add_subplot(222)
static_plot_subplot3 = static_plot.add_subplot(223)
static_plot_subplot4 = static_plot.add_subplot(224)
static_table = Figure(figsize=(5,2), dpi=100)
static_table_subplot = static_table.add_subplot(111)
def animate_live_plot(i):
'''Used to create an animated matplotlib plot'''
if (settings.CURRENT_PAGE == "Telemetry"): # To do: add additional statement to require new data to update plot
if (settings.DEBUG.status == True):
start = time.time()
print("\nTelemetry plot performance:")
data = pd.read_csv(settings.PATH_LIVEDATA)
try:
data.drop(["Events"], axis=1)
except:
if (settings.DEBUG.status == True):
print("WARNING: No 'Events' in data file")
if (settings.DEBUG.status == True):
data_time_stop = time.time()
print("Data Read Time: %f sec" %(data_time_stop-start))
start = time.time()
live_plot.subplots_adjust(hspace = 0.3)
# Multi-threading start
def plot_altitude():
live_plot_subplot1.clear()
live_plot_subplot1.plot(data['Time'], data['Altitude'], color='k')
live_plot_subplot1.set_xlabel("Time (sec)")
live_plot_subplot1.set_ylabel("AGL Altitude (ft)")
def plot_velocity():
live_plot_subplot2.clear()
live_plot_subplot2.plot(data['Time'], data['Velocity'], color='k')
live_plot_subplot2.set_xlabel("Time (sec)")
live_plot_subplot2.set_ylabel("Velocity (ft/s)")
def plot_acceleration():
live_plot_subplot3.clear()
live_plot_subplot3.plot(data['Time'], data['Acceleration'], color='k')
live_plot_subplot3.set_xlabel("Time (sec)")
live_plot_subplot3.set_ylabel("Acceleration (G)")
def plot_coordinates():
live_plot_subplot4.clear()
live_plot_subplot4.plot(data['Longitude'], data['Latitude'], color='k')
live_plot_subplot4.set_xlabel("Longitude (deg)")
live_plot_subplot4.set_ylabel("Latitude (deg)")
t1 = threading.Thread(target=plot_altitude)
t2 = threading.Thread(target=plot_velocity)
t3 = threading.Thread(target=plot_acceleration)
t4 = threading.Thread(target=plot_coordinates)
t1.start()
t2.start()
t3.start()
t4.start()
if (settings.DEBUG.status == True):
data_plot_stop = time.time()
print("Plot Time: %f sec\n" %(data_plot_stop-start))
def animate_live_table(i):
'''Used to create an animated matplotlib table'''
if (settings.CURRENT_PAGE == "Telemetry"): # To Do: add additional statement to require new data flag
data = | pd.read_csv(settings.PATH_LIVEDATA) | pandas.read_csv |
import pandas as pd
from dateutil.relativedelta import relativedelta
from datacode.typing import StrList
def expand_entity_date_selections(full_df: pd.DataFrame, selections_df: pd.DataFrame, cols: StrList = None,
num_firms: int = 3, expand_months: int = 3,
entity_id_col: str = 'TICKER',
date_col: str = 'Date',
begin_datevar: str = 'Begin Date', end_datevar: str = 'End Date',
) -> pd.DataFrame:
entity_date_df = _firm_date_range_df_from_df(
selections_df,
num_firms=num_firms,
firm_id_col=entity_id_col,
date_col=date_col,
begin_datevar=begin_datevar,
end_datevar=end_datevar
)
_expand_date_df(
entity_date_df,
expand_months=expand_months,
begin_datevar=begin_datevar,
end_datevar=end_datevar
)
entity_df = _select_orig_df_from_date_df(
full_df,
entity_date_df,
firm_id_col=entity_id_col,
date_col=date_col,
begin_datevar=begin_datevar,
end_datevar=end_datevar,
associated_cols=cols
)
return entity_df
def _firm_date_range_df_from_df(df: pd.DataFrame, num_firms: int = 3, firm_id_col: str = 'TICKER',
date_col: str = 'Date',
begin_datevar: str = 'Begin Date', end_datevar: str = 'End Date') -> pd.DataFrame:
firm_date_vars = [firm_id_col, date_col]
firms = df[firm_id_col].unique()[:num_firms]
firm_dates = df[df[firm_id_col].isin(firms)][firm_date_vars].sort_values(firm_date_vars)
earliest_dates = firm_dates.groupby(firm_id_col).min()
earliest_dates.rename(columns={date_col: begin_datevar}, inplace=True)
latest_dates = firm_dates.groupby(firm_id_col).max()
latest_dates.rename(columns={date_col: end_datevar}, inplace=True)
return earliest_dates.join(latest_dates)
def _expand_date_df(date_df: pd.DataFrame, expand_months: int = 3,
begin_datevar: str = 'Begin Date', end_datevar: str = 'End Date'):
"""
Note: inplace
"""
date_df[begin_datevar] = date_df[begin_datevar].apply(lambda x: x - relativedelta(months=expand_months))
date_df[end_datevar] = date_df[end_datevar].apply(lambda x: x + relativedelta(months=expand_months))
def _select_orig_df_from_date_df(df: pd.DataFrame, date_df: pd.DataFrame,
firm_id_col: str = 'TICKER',
date_col: str = 'Date',
begin_datevar: str = 'Begin Date', end_datevar: str = 'End Date',
associated_cols: StrList = None) -> pd.DataFrame:
if associated_cols == None:
associated_cols = [firm_id_col, date_col]
out_df = | pd.DataFrame() | pandas.DataFrame |
import copy
import os
import warnings
from collections import OrderedDict
import numpy as np
import pandas as pd
import woodwork as ww
from sklearn.exceptions import NotFittedError
from sklearn.inspection import partial_dependence as sk_partial_dependence
from sklearn.inspection._partial_dependence import (
_grid_from_X,
_partial_dependence_brute,
)
from sklearn.manifold import TSNE
from sklearn.metrics import auc as sklearn_auc
from sklearn.metrics import confusion_matrix as sklearn_confusion_matrix
from sklearn.metrics import (
precision_recall_curve as sklearn_precision_recall_curve,
)
from sklearn.metrics import roc_curve as sklearn_roc_curve
from sklearn.preprocessing import LabelBinarizer
from sklearn.tree import export_graphviz
from sklearn.utils.multiclass import unique_labels
import evalml
from evalml.exceptions import NoPositiveLabelException, NullsInColumnWarning
from evalml.model_family import ModelFamily
from evalml.model_understanding.permutation_importance import (
calculate_permutation_importance,
)
from evalml.objectives.utils import get_objective
from evalml.problem_types import ProblemTypes
from evalml.utils import import_or_raise, infer_feature_types, jupyter_check
def confusion_matrix(y_true, y_predicted, normalize_method="true"):
"""Confusion matrix for binary and multiclass classification.
Arguments:
y_true (pd.Series or np.ndarray): True binary labels.
y_pred (pd.Series or np.ndarray): Predictions from a binary classifier.
normalize_method ({'true', 'pred', 'all', None}): Normalization method to use, if not None. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
Returns:
pd.DataFrame: Confusion matrix. The column header represents the predicted labels while row header represents the actual labels.
"""
y_true = infer_feature_types(y_true)
y_predicted = infer_feature_types(y_predicted)
y_true = y_true.to_numpy()
y_predicted = y_predicted.to_numpy()
labels = unique_labels(y_true, y_predicted)
conf_mat = sklearn_confusion_matrix(y_true, y_predicted)
conf_mat = pd.DataFrame(conf_mat, index=labels, columns=labels)
if normalize_method is not None:
return normalize_confusion_matrix(conf_mat, normalize_method=normalize_method)
return conf_mat
def normalize_confusion_matrix(conf_mat, normalize_method="true"):
"""Normalizes a confusion matrix.
Arguments:
conf_mat (pd.DataFrame or np.ndarray): Confusion matrix to normalize.
normalize_method ({'true', 'pred', 'all'}): Normalization method. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
Returns:
pd.DataFrame: normalized version of the input confusion matrix. The column header represents the predicted labels while row header represents the actual labels.
"""
conf_mat = infer_feature_types(conf_mat)
col_names = conf_mat.columns
conf_mat = conf_mat.to_numpy()
with warnings.catch_warnings(record=True) as w:
if normalize_method == "true":
conf_mat = conf_mat.astype("float") / conf_mat.sum(axis=1)[:, np.newaxis]
elif normalize_method == "pred":
conf_mat = conf_mat.astype("float") / conf_mat.sum(axis=0)
elif normalize_method == "all":
conf_mat = conf_mat.astype("float") / conf_mat.sum().sum()
else:
raise ValueError(
'Invalid value provided for "normalize_method": {}'.format(
normalize_method
)
)
if w and "invalid value encountered in" in str(w[0].message):
raise ValueError(
"Sum of given axis is 0 and normalization is not possible. Please select another option."
)
conf_mat = pd.DataFrame(conf_mat, index=col_names, columns=col_names)
return conf_mat
def graph_confusion_matrix(
y_true, y_pred, normalize_method="true", title_addition=None
):
"""Generate and display a confusion matrix plot.
If `normalize_method` is set, hover text will show raw count, otherwise hover text will show count normalized with method 'true'.
Arguments:
y_true (pd.Series or np.ndarray): True binary labels.
y_pred (pd.Series or np.ndarray): Predictions from a binary classifier.
normalize_method ({'true', 'pred', 'all', None}): Normalization method to use, if not None. Supported options are: 'true' to normalize by row, 'pred' to normalize by column, or 'all' to normalize by all values. Defaults to 'true'.
title_addition (str or None): if not None, append to plot title. Defaults to None.
Returns:
plotly.Figure representing the confusion matrix plot generated
"""
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
_ff = import_or_raise(
"plotly.figure_factory",
error_msg="Cannot find dependency plotly.figure_factory",
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
conf_mat = confusion_matrix(y_true, y_pred, normalize_method=None)
conf_mat_normalized = confusion_matrix(
y_true, y_pred, normalize_method=normalize_method or "true"
)
labels = conf_mat.columns.tolist()
title = "Confusion matrix{}{}".format(
"" if title_addition is None else (" " + title_addition),
""
if normalize_method is None
else (', normalized using method "' + normalize_method + '"'),
)
z_data, custom_data = (
(conf_mat, conf_mat_normalized)
if normalize_method is None
else (conf_mat_normalized, conf_mat)
)
z_data = z_data.to_numpy()
z_text = [["{:.3f}".format(y) for y in x] for x in z_data]
primary_heading, secondary_heading = (
("Raw", "Normalized") if normalize_method is None else ("Normalized", "Raw")
)
hover_text = (
"<br><b>"
+ primary_heading
+ " Count</b>: %{z}<br><b>"
+ secondary_heading
+ " Count</b>: %{customdata} <br>"
)
# the "<extra> tags at the end are necessary to remove unwanted trace info
hover_template = (
"<b>True</b>: %{y}<br><b>Predicted</b>: %{x}" + hover_text + "<extra></extra>"
)
layout = _go.Layout(
title={"text": title},
xaxis={"title": "Predicted Label", "type": "category", "tickvals": labels},
yaxis={"title": "True Label", "type": "category", "tickvals": labels},
)
fig = _ff.create_annotated_heatmap(
z_data,
x=labels,
y=labels,
annotation_text=z_text,
customdata=custom_data,
hovertemplate=hover_template,
colorscale="Blues",
showscale=True,
)
fig.update_layout(layout)
# put xaxis text on bottom to not overlap with title
fig["layout"]["xaxis"].update(side="bottom")
# plotly Heatmap y axis defaults to the reverse of what we want: https://community.plotly.com/t/heatmap-y-axis-is-reversed-by-default-going-against-standard-convention-for-matrices/32180
fig.update_yaxes(autorange="reversed")
return fig
def precision_recall_curve(y_true, y_pred_proba, pos_label_idx=-1):
"""
Given labels and binary classifier predicted probabilities, compute and return the data representing a precision-recall curve.
Arguments:
y_true (pd.Series or np.ndarray): True binary labels.
y_pred_proba (pd.Series or np.ndarray): Predictions from a binary classifier, before thresholding has been applied. Note this should be the predicted probability for the "true" label.
pos_label_idx (int): the column index corresponding to the positive class. If predicted probabilities are two-dimensional, this will be used to access the probabilities for the positive class.
Returns:
list: Dictionary containing metrics used to generate a precision-recall plot, with the following keys:
* `precision`: Precision values.
* `recall`: Recall values.
* `thresholds`: Threshold values used to produce the precision and recall.
* `auc_score`: The area under the ROC curve.
"""
y_true = infer_feature_types(y_true)
y_pred_proba = infer_feature_types(y_pred_proba)
if isinstance(y_pred_proba, pd.DataFrame):
y_pred_proba_shape = y_pred_proba.shape
try:
y_pred_proba = y_pred_proba.iloc[:, pos_label_idx]
except IndexError:
raise NoPositiveLabelException(
f"Predicted probabilities of shape {y_pred_proba_shape} don't contain a column at index {pos_label_idx}"
)
precision, recall, thresholds = sklearn_precision_recall_curve(y_true, y_pred_proba)
auc_score = sklearn_auc(recall, precision)
return {
"precision": precision,
"recall": recall,
"thresholds": thresholds,
"auc_score": auc_score,
}
def graph_precision_recall_curve(y_true, y_pred_proba, title_addition=None):
"""Generate and display a precision-recall plot.
Arguments:
y_true (pd.Series or np.ndarray): True binary labels.
y_pred_proba (pd.Series or np.ndarray): Predictions from a binary classifier, before thresholding has been applied. Note this should be the predicted probability for the "true" label.
title_addition (str or None): If not None, append to plot title. Default None.
Returns:
plotly.Figure representing the precision-recall plot generated
"""
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
precision_recall_curve_data = precision_recall_curve(y_true, y_pred_proba)
title = "Precision-Recall{}".format(
"" if title_addition is None else (" " + title_addition)
)
layout = _go.Layout(
title={"text": title},
xaxis={"title": "Recall", "range": [-0.05, 1.05]},
yaxis={"title": "Precision", "range": [-0.05, 1.05]},
)
data = []
data.append(
_go.Scatter(
x=precision_recall_curve_data["recall"],
y=precision_recall_curve_data["precision"],
name="Precision-Recall (AUC {:06f})".format(
precision_recall_curve_data["auc_score"]
),
line=dict(width=3),
)
)
return _go.Figure(layout=layout, data=data)
def roc_curve(y_true, y_pred_proba):
"""
Given labels and classifier predicted probabilities, compute and return the data representing a Receiver Operating Characteristic (ROC) curve. Works with binary or multiclass problems.
Arguments:
y_true (pd.Series or np.ndarray): True labels.
y_pred_proba (pd.Series or np.ndarray): Predictions from a classifier, before thresholding has been applied.
Returns:
list(dict): A list of dictionaries (with one for each class) is returned. Binary classification problems return a list with one dictionary.
Each dictionary contains metrics used to generate an ROC plot with the following keys:
* `fpr_rate`: False positive rate.
* `tpr_rate`: True positive rate.
* `threshold`: Threshold values used to produce each pair of true/false positive rates.
* `auc_score`: The area under the ROC curve.
"""
y_true = infer_feature_types(y_true).to_numpy()
y_pred_proba = infer_feature_types(y_pred_proba).to_numpy()
if len(y_pred_proba.shape) == 1:
y_pred_proba = y_pred_proba.reshape(-1, 1)
if y_pred_proba.shape[1] == 2:
y_pred_proba = y_pred_proba[:, 1].reshape(-1, 1)
nan_indices = np.logical_or(pd.isna(y_true), np.isnan(y_pred_proba).any(axis=1))
y_true = y_true[~nan_indices]
y_pred_proba = y_pred_proba[~nan_indices]
lb = LabelBinarizer()
lb.fit(np.unique(y_true))
y_one_hot_true = lb.transform(y_true)
n_classes = y_one_hot_true.shape[1]
curve_data = []
for i in range(n_classes):
fpr_rates, tpr_rates, thresholds = sklearn_roc_curve(
y_one_hot_true[:, i], y_pred_proba[:, i]
)
auc_score = sklearn_auc(fpr_rates, tpr_rates)
curve_data.append(
{
"fpr_rates": fpr_rates,
"tpr_rates": tpr_rates,
"thresholds": thresholds,
"auc_score": auc_score,
}
)
return curve_data
def graph_roc_curve(y_true, y_pred_proba, custom_class_names=None, title_addition=None):
"""Generate and display a Receiver Operating Characteristic (ROC) plot for binary and multiclass classification problems.
Arguments:
y_true (pd.Series or np.ndarray): True labels.
y_pred_proba (pd.Series or np.ndarray): Predictions from a classifier, before thresholding has been applied. Note this should a one dimensional array with the predicted probability for the "true" label in the binary case.
custom_class_labels (list or None): If not None, custom labels for classes. Default None.
title_addition (str or None): if not None, append to plot title. Default None.
Returns:
plotly.Figure representing the ROC plot generated
"""
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
title = "Receiver Operating Characteristic{}".format(
"" if title_addition is None else (" " + title_addition)
)
layout = _go.Layout(
title={"text": title},
xaxis={"title": "False Positive Rate", "range": [-0.05, 1.05]},
yaxis={"title": "True Positive Rate", "range": [-0.05, 1.05]},
)
all_curve_data = roc_curve(y_true, y_pred_proba)
graph_data = []
n_classes = len(all_curve_data)
if custom_class_names and len(custom_class_names) != n_classes:
raise ValueError(
"Number of custom class names does not match number of classes"
)
for i in range(n_classes):
roc_curve_data = all_curve_data[i]
name = i + 1 if custom_class_names is None else custom_class_names[i]
graph_data.append(
_go.Scatter(
x=roc_curve_data["fpr_rates"],
y=roc_curve_data["tpr_rates"],
hovertemplate="(False Postive Rate: %{x}, True Positive Rate: %{y})<br>"
+ "Threshold: %{text}",
name=f"Class {name} (AUC {roc_curve_data['auc_score']:.06f})",
text=roc_curve_data["thresholds"],
line=dict(width=3),
)
)
graph_data.append(
_go.Scatter(
x=[0, 1], y=[0, 1], name="Trivial Model (AUC 0.5)", line=dict(dash="dash")
)
)
return _go.Figure(layout=layout, data=graph_data)
def graph_permutation_importance(pipeline, X, y, objective, importance_threshold=0):
"""Generate a bar graph of the pipeline's permutation importance.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (pd.DataFrame): The input data used to score and compute permutation importance
y (pd.Series): The target data
objective (str, ObjectiveBase): Objective to score on
importance_threshold (float, optional): If provided, graph features with a permutation importance whose absolute value is larger than importance_threshold. Defaults to zero.
Returns:
plotly.Figure, a bar graph showing features and their respective permutation importance.
"""
go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
perm_importance = calculate_permutation_importance(pipeline, X, y, objective)
perm_importance["importance"] = perm_importance["importance"]
if importance_threshold < 0:
raise ValueError(
f"Provided importance threshold of {importance_threshold} must be greater than or equal to 0"
)
# Remove features with close to zero importance
perm_importance = perm_importance[
abs(perm_importance["importance"]) >= importance_threshold
]
# List is reversed to go from ascending order to descending order
perm_importance = perm_importance.iloc[::-1]
title = "Permutation Importance"
subtitle = (
"The relative importance of each input feature's "
"overall influence on the pipelines' predictions, computed using "
"the permutation importance algorithm."
)
data = [
go.Bar(
x=perm_importance["importance"],
y=perm_importance["feature"],
orientation="h",
)
]
layout = {
"title": "{0}<br><sub>{1}</sub>".format(title, subtitle),
"height": 800,
"xaxis_title": "Permutation Importance",
"yaxis_title": "Feature",
"yaxis": {"type": "category"},
}
fig = go.Figure(data=data, layout=layout)
return fig
def binary_objective_vs_threshold(pipeline, X, y, objective, steps=100):
"""Computes objective score as a function of potential binary classification
decision thresholds for a fitted binary classification pipeline.
Arguments:
pipeline (BinaryClassificationPipeline obj): Fitted binary classification pipeline
X (pd.DataFrame): The input data used to compute objective score
y (pd.Series): The target labels
objective (ObjectiveBase obj, str): Objective used to score
steps (int): Number of intervals to divide and calculate objective score at
Returns:
pd.DataFrame: DataFrame with thresholds and the corresponding objective score calculated at each threshold
"""
objective = get_objective(objective, return_instance=True)
if not objective.is_defined_for_problem_type(ProblemTypes.BINARY):
raise ValueError(
"`binary_objective_vs_threshold` can only be calculated for binary classification objectives"
)
if objective.score_needs_proba:
raise ValueError("Objective `score_needs_proba` must be False")
pipeline_tmp = copy.copy(pipeline)
thresholds = np.linspace(0, 1, steps + 1)
costs = []
for threshold in thresholds:
pipeline_tmp.threshold = threshold
scores = pipeline_tmp.score(X, y, [objective])
costs.append(scores[objective.name])
df = pd.DataFrame({"threshold": thresholds, "score": costs})
return df
def graph_binary_objective_vs_threshold(pipeline, X, y, objective, steps=100):
"""Generates a plot graphing objective score vs. decision thresholds for a fitted binary classification pipeline.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (pd.DataFrame): The input data used to score and compute scores
y (pd.Series): The target labels
objective (ObjectiveBase obj, str): Objective used to score, shown on the y-axis of the graph
steps (int): Number of intervals to divide and calculate objective score at
Returns:
plotly.Figure representing the objective score vs. threshold graph generated
"""
_go = import_or_raise(
"plotly.graph_objects", error_msg="Cannot find dependency plotly.graph_objects"
)
if jupyter_check():
import_or_raise("ipywidgets", warning=True)
objective = get_objective(objective, return_instance=True)
df = binary_objective_vs_threshold(pipeline, X, y, objective, steps)
title = f"{objective.name} Scores vs. Thresholds"
layout = _go.Layout(
title={"text": title},
xaxis={"title": "Threshold", "range": _calculate_axis_range(df["threshold"])},
yaxis={
"title": f"{objective.name} Scores vs. Binary Classification Decision Threshold",
"range": _calculate_axis_range(df["score"]),
},
)
data = []
data.append(_go.Scatter(x=df["threshold"], y=df["score"], line=dict(width=3)))
return _go.Figure(layout=layout, data=data)
def _is_feature_of_type(feature, X, ltype):
"""Determine whether the feature the user passed in to partial dependence is a Woodwork logical type."""
if isinstance(feature, int):
is_type = isinstance(X.ww.logical_types[X.columns[feature]], ltype)
else:
is_type = isinstance(X.ww.logical_types[feature], ltype)
return is_type
def _put_categorical_feature_first(features, first_feature_categorical):
"""If the user is doing a two-way partial dependence plot and one of the features is categorical,
we need to make sure the categorical feature is the first element in the tuple that's passed to sklearn.
This is because in the two-way grid calculation, sklearn will try to coerce every element of the grid to the
type of the first feature in the tuple. If we put the categorical feature first, the grid will be of type 'object'
which can accommodate both categorical and numeric data. If we put the numeric feature first, the grid will be of
type float64 and we can't coerce categoricals to float64 dtype.
"""
new_features = features if first_feature_categorical else (features[1], features[0])
return new_features
def _get_feature_names_from_str_or_col_index(X, names_or_col_indices):
"""Helper function to map the user-input features param to column names."""
feature_list = []
for name_or_index in names_or_col_indices:
if isinstance(name_or_index, int):
feature_list.append(X.columns[name_or_index])
else:
feature_list.append(name_or_index)
return feature_list
def _raise_value_error_if_any_features_all_nan(df):
"""Helper for partial dependence data validation."""
nan_pct = df.isna().mean()
all_nan = nan_pct[nan_pct == 1].index.tolist()
all_nan = [f"'{name}'" for name in all_nan]
if all_nan:
raise ValueError(
"The following features have all NaN values and so the "
f"partial dependence cannot be computed: {', '.join(all_nan)}"
)
def _raise_value_error_if_mostly_one_value(df, percentile):
"""Helper for partial dependence data validation."""
one_value = []
values = []
for col in df.columns:
normalized_counts = df[col].value_counts(normalize=True) + 0.01
normalized_counts = normalized_counts[normalized_counts > percentile]
if not normalized_counts.empty:
one_value.append(f"'{col}'")
values.append(str(normalized_counts.index[0]))
if one_value:
raise ValueError(
f"Features ({', '.join(one_value)}) are mostly one value, ({', '.join(values)}), "
f"and cannot be used to compute partial dependence. Try raising the upper percentage value."
)
def partial_dependence(
pipeline, X, features, percentiles=(0.05, 0.95), grid_resolution=100, kind="average"
):
"""Calculates one or two-way partial dependence. If a single integer or
string is given for features, one-way partial dependence is calculated. If
a tuple of two integers or strings is given, two-way partial dependence
is calculated with the first feature in the y-axis and second feature in the
x-axis.
Arguments:
pipeline (PipelineBase or subclass): Fitted pipeline
X (pd.DataFrame, np.ndarray): The input data used to generate a grid of values
for feature where partial dependence will be calculated at
features (int, string, tuple[int or string]): The target feature for which to create the partial dependence plot for.
If features is an int, it must be the index of the feature to use.
If features is a string, it must be a valid column name in X.
If features is a tuple of int/strings, it must contain valid column integers/names in X.
percentiles (tuple[float]): The lower and upper percentile used to create the extreme values for the grid.
Must be in [0, 1]. Defaults to (0.05, 0.95).
grid_resolution (int): Number of samples of feature(s) for partial dependence plot. If this value
is less than the maximum number of categories present in categorical data within X, it will be
set to the max number of categories + 1. Defaults to 100.
kind {'average', 'individual', 'both'}: The type of predictions to return. 'individual' will return the predictions for
all of the points in the grid for each sample in X. 'average' will return the predictions for all of the points in
the grid but averaged over all of the samples in X.
Returns:
pd.DataFrame, list(pd.DataFrame), or tuple(pd.DataFrame, list(pd.DataFrame)):
When `kind='average'`: DataFrame with averaged predictions for all points in the grid averaged
over all samples of X and the values used to calculate those predictions.
When `kind='individual'`: DataFrame with individual predictions for all points in the grid for each sample
of X and the values used to calculate those predictions. If a two-way partial dependence is calculated, then
the result is a list of DataFrames with each DataFrame representing one sample's predictions.
When `kind='both'`: A tuple consisting of the averaged predictions (in a DataFrame) over all samples of X and the individual
predictions (in a list of DataFrames) for each sample of X.
In the one-way case: The dataframe will contain two columns, "feature_values" (grid points at which the
partial dependence was calculated) and "partial_dependence" (the partial dependence at that feature value).
For classification problems, there will be a third column called "class_label" (the class label for which
the partial dependence was calculated). For binary classification, the partial dependence is only calculated
for the "positive" class.
In the two-way case: The data frame will contain grid_resolution number of columns and rows where the
index and column headers are the sampled values of the first and second features, respectively, used to make
the partial dependence contour. The values of the data frame contain the partial dependence data for each
feature value pair.
Raises:
ValueError: if the user provides a tuple of not exactly two features.
ValueError: if the provided pipeline isn't fitted.
ValueError: if the provided pipeline is a Baseline pipeline.
ValueError: if any of the features passed in are completely NaN
ValueError: if any of the features are low-variance. Defined as having one value occurring more than the upper
percentile passed by the user. By default 95%.
"""
# Dynamically set the grid resolution to the maximum number of values
# in the categorical/datetime variables if there are more categories/datetime values than resolution cells
X = infer_feature_types(X)
if isinstance(features, (list, tuple)):
is_categorical = [
_is_feature_of_type(f, X, ww.logical_types.Categorical) for f in features
]
is_datetime = [
_is_feature_of_type(f, X, ww.logical_types.Datetime) for f in features
]
else:
is_categorical = [
_is_feature_of_type(features, X, ww.logical_types.Categorical)
]
is_datetime = [_is_feature_of_type(features, X, ww.logical_types.Datetime)]
if isinstance(features, (list, tuple)):
if len(features) != 2:
raise ValueError(
"Too many features given to graph_partial_dependence. Only one or two-way partial "
"dependence is supported."
)
if not (
all([isinstance(x, str) for x in features])
or all([isinstance(x, int) for x in features])
):
raise ValueError(
"Features provided must be a tuple entirely of integers or strings, not a mixture of both."
)
X_features = (
X.ww.iloc[:, list(features)]
if isinstance(features[0], int)
else X.ww[list(features)]
)
else:
X_features = (
X.ww.iloc[:, [features]] if isinstance(features, int) else X.ww[[features]]
)
X_unknown = X_features.ww.select("unknown")
if len(X_unknown.columns):
# We drop the unknown columns in the pipelines, so we cannot calculate partial dependence for these
raise ValueError(
f"Columns {X_unknown.columns.values} are of type 'Unknown', which cannot be used for partial dependence"
)
X_cats = X_features.ww.select("categorical")
if any(is_categorical):
max_num_cats = max(X_cats.ww.describe().loc["nunique"])
grid_resolution = max([max_num_cats + 1, grid_resolution])
X_dt = X_features.ww.select("datetime")
if isinstance(features, (list, tuple)):
feature_names = _get_feature_names_from_str_or_col_index(X, features)
if any(is_datetime):
raise ValueError(
"Two-way partial dependence is not supported for datetime columns."
)
if any(is_categorical):
features = _put_categorical_feature_first(features, is_categorical[0])
else:
feature_names = _get_feature_names_from_str_or_col_index(X, [features])
if not pipeline._is_fitted:
raise ValueError("Pipeline to calculate partial dependence for must be fitted")
if pipeline.model_family == ModelFamily.BASELINE:
raise ValueError(
"Partial dependence plots are not supported for Baseline pipelines"
)
feature_list = X[feature_names]
_raise_value_error_if_any_features_all_nan(feature_list)
if feature_list.isnull().sum().any():
warnings.warn(
"There are null values in the features, which will cause NaN values in the partial dependence output. "
"Fill in these values to remove the NaN values.",
NullsInColumnWarning,
)
_raise_value_error_if_mostly_one_value(feature_list, percentiles[1])
wrapped = evalml.pipelines.components.utils.scikit_learn_wrapped_estimator(pipeline)
try:
if any(is_datetime):
timestamps = np.array(
[X_dt - pd.Timestamp("1970-01-01")] // np.timedelta64(1, "s")
).reshape(-1, 1)
grid, values = _grid_from_X(
timestamps, percentiles=percentiles, grid_resolution=grid_resolution
)
grid_dates = pd.to_datetime(
pd.Series(grid.squeeze()), unit="s"
).values.reshape(-1, 1)
# convert values to dates for the output
value_dates = pd.to_datetime(pd.Series(values[0]), unit="s")
# need to pass in the feature as an int index rather than string
feature_index = (
X.columns.tolist().index(features)
if isinstance(features, str)
else features
)
averaged_predictions, predictions = _partial_dependence_brute(
wrapped, grid_dates, [feature_index], X, response_method="auto"
)
# reshape based on the way scikit-learn reshapes the data
predictions = predictions.reshape(
-1, X.shape[0], *[val.shape[0] for val in values]
)
averaged_predictions = averaged_predictions.reshape(
-1, *[val.shape[0] for val in values]
)
preds = {
"average": averaged_predictions,
"individual": predictions,
"values": [value_dates],
}
else:
preds = sk_partial_dependence(
wrapped,
X=X,
features=features,
percentiles=percentiles,
grid_resolution=grid_resolution,
kind=kind,
)
except ValueError as e:
if "percentiles are too close to each other" in str(e):
raise ValueError(
"The scale of these features is too small and results in"
"percentiles that are too close together. Partial dependence"
"cannot be computed for these types of features. Consider"
"scaling the features so that they differ by > 10E-7"
)
else:
raise e
classes = None
if isinstance(pipeline, evalml.pipelines.BinaryClassificationPipeline):
classes = [pipeline.classes_[1]]
elif isinstance(pipeline, evalml.pipelines.MulticlassClassificationPipeline):
classes = pipeline.classes_
values = preds["values"]
if kind in ["average", "both"]:
avg_pred = preds["average"]
if isinstance(features, (int, str)):
avg_data = pd.DataFrame(
{
"feature_values": np.tile(values[0], avg_pred.shape[0]),
"partial_dependence": np.concatenate([pred for pred in avg_pred]),
}
)
elif isinstance(features, (list, tuple)):
avg_data = pd.DataFrame(avg_pred.reshape((-1, avg_pred.shape[-1])))
avg_data.columns = values[1]
avg_data.index = np.tile(values[0], avg_pred.shape[0])
if classes is not None:
avg_data["class_label"] = np.repeat(classes, len(values[0]))
if kind in ["individual", "both"]:
ind_preds = preds["individual"]
if isinstance(features, (int, str)):
ind_data = list()
for label in ind_preds:
ind_data.append( | pd.DataFrame(label) | pandas.DataFrame |
import pandas as pd
### デスクトップアプリ作成課題
def kimetsu_search(path, word):
# 検索対象取得
df=pd.read_csv(path)
source=list(df["name"])
# 検索
if word in source:
return True
else:
return False
def add_to_kimetsu(path, word):
# 検索対象取得
df= | pd.read_csv("./source.csv") | pandas.read_csv |
# Import Statements
import matplotlib.pyplot as plt
import pandas as pd
import torch
import json
from PIL import Image
import numpy as np
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
from collections import OrderedDict
# Load Data Function
def LoadData(data_dir, batch_size):
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
# Define transforms for the training and validation/test sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the train, valid, and test datasets and apply tranformations to each
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=valid_transforms)
# Define a loader for each dataset
trainloader = torch.utils.data.DataLoader(
train_data, batch_size=batch_size, shuffle=True)
validloader = torch.utils.data.DataLoader(
valid_data, batch_size=batch_size)
return train_data, trainloader, validloader
# Define the Model
def CreateModel(model_arch, hidden_units, drp_out=0.3):
model = models.__dict__[model_arch](pretrained=True)
for param in model.parameters():
param.requires_grad = False
input_size = None
if type(model.classifier) is torch.nn.modules.container.Sequential:
got_input_size = False
i = 0
while got_input_size is False:
if type(model.classifier[i]) is torch.nn.modules.linear.Linear:
got_input_size = True
input_size = int(model.classifier[i].in_features)
elif type(model.classifier[i]) is torch.nn.modules.conv.Conv2d:
got_input_size = True
input_size = int(model.classifier[i].in_channels)
else:
i += 1
else:
input_size = int(model.classifier.in_features)
hidden_layer = hidden_units
output_size = 102
dropout = drp_out
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_layer)),
('relu', nn.ReLU()),
('drop', nn.Dropout(p=dropout)),
('fc2', nn.Linear(hidden_layer, output_size)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
return model, input_size, dropout
# Train the Network
def TrainNetwork(model, trainloader, train_data, validloader, criterion, optimizer, epochs, device):
print_every = 20
steps = 0
model.to(device)
for e in range(epochs):
model.train()
running_loss = 0
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
valid_loss = 0
accuracy = 0
with torch.no_grad():
for inputs, labels in validloader:
inputs = inputs.to(device)
labels = labels.to(device)
output = model.forward(inputs)
valid_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
print("Epoch: {}/{}".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/len(train_data)),
"Validation Loss: {:.3f}".format(
valid_loss/len(validloader)),
"Validation Accuracy: {:.3f}".format(accuracy/len(validloader)))
# Save a Checkpoint
def SaveCheckpoint(model, model_arch, learning_rate, epochs, classifier_input_size, dropout, hidden_units, class_to_idx, save_dir):
# Define Features for Checkpoint
model.to('cpu')
checkpoint = {
'input_size': classifier_input_size,
'hidden_layer': hidden_units,
'output_size': 102,
'dropout': dropout,
'class_to_idx': class_to_idx,
'state_dict': model.state_dict(),
'arch': model_arch,
'learning_rate': learning_rate,
'epochs': epochs
}
# Save the Checkpoint to Specified Directory
torch.save(checkpoint, './' + save_dir + 'checkpoint.pth')
# Loading The Checkpoint
def LoadCheckpoint(checkpoint_path):
# Get Checkpoint from Specified Path
checkpoint = torch.load(checkpoint_path)
# Create a Model from the Information Provided in the Checkpoint
new_model = models.__dict__[checkpoint['arch']](pretrained=True)
new_classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(checkpoint['input_size'],
checkpoint['hidden_layer'])),
('relu', nn.ReLU()),
('drop', nn.Dropout(p=checkpoint['dropout'])),
('fc2', nn.Linear(
checkpoint['hidden_layer'], checkpoint['output_size'])),
('output', nn.LogSoftmax(dim=1))
]))
new_model.classifier = new_classifier
new_model.load_state_dict(checkpoint['state_dict'])
new_model.class_to_idx = checkpoint['class_to_idx']
return new_model
# Process Image to be Used for Pytorch Model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
im.resize((256, 256))
im = im.crop((16, 16, 240, 240))
np_image = np.array(im)
np_image = np_image / 255
means = np.array([0.485, 0.456, 0.406])
stds = np.array([0.229, 0.224, 0.225])
im = (np_image - means) / stds
tranposed_im = im.transpose(2, 0, 1)
return torch.from_numpy(tranposed_im)
# Predict the Class of an Image
def Predict(image_path, model, device, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
img = process_image(image_path).unsqueeze(0).float()
model, img = model.to(device), img.to(device)
model.eval()
output = torch.exp(model.forward(img))
output = output.topk(topk)
probs = output[0].data.cpu().numpy()[0]
classes = output[1].data.cpu().numpy()[0]
idx_to_class = {key: value for value, key in model.class_to_idx.items()}
classes = [idx_to_class[classes[i]] for i in range(classes.size)]
return probs, classes
def ViewPredictionResults(probs, classes):
dataframe = pd.DataFrame({
'classes': pd.Series(data=classes),
'probabilities': | pd.Series(data=probs, dtype='float64') | pandas.Series |
def censor_diagnosis(path,genotype_file,phenotype_file,final_pfile, final_gfile, field ='na',type='ICD',ad=1,start_time=float('nan'),end_time=float('nan')):
import pandas as pd
import numpy as np
genotypes = | pd.read_csv(path+genotype_file) | pandas.read_csv |
import matplotlib
import pandas as pd
import numpy as np
import cvxpy as cp
from cvxopt import matrix, solvers
import pickle
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
from colorama import Fore
from config import RISK_FREE_RATE, DATAPATH, EXPECTED_RETURN, STOCKS_NUMBER, MONTO_CARLO_TIMES
class InvestmentStrategy:
@staticmethod
def process_data_x_matrix(datapath):
df_raw = pd.read_excel(datapath)
df_raw = df_raw.T
df_raw = df_raw.drop(index=['code', 'name'], columns=[0])
df_raw = df_raw.fillna(method='ffill')
# 第32只股票第一天就是空缺值,用向前填补方式
df = df_raw.fillna(method='backfill')
return df
@staticmethod
def process_data_contain_hs300(datapath):
df_raw = pd.read_excel(datapath)
df_raw = df_raw.T
df_raw = df_raw.drop(index=['code', 'name'])
# df_raw.to_excel("./test1.xlsx")
# print(df_raw.isnull().any())
df_raw = df_raw.fillna(method='ffill')
# 第32只股票第一天就是空缺值,用向前填补方式
df = df_raw.fillna(method='backfill')
return df
@staticmethod
def day_yield_compute(x_matrix):
day_yield = (x_matrix.shift(-1) - x_matrix) / x_matrix
return day_yield.iloc[:-1, :]
@staticmethod
def ex_vector_compute(x_matrix):
day_yield = (x_matrix.shift(-1) - x_matrix) / x_matrix
day_avg_yield = day_yield.mean().to_numpy()
return day_yield.iloc[:-1, :], day_avg_yield
@staticmethod
def ex_matrix_compute(x_matrix, ex_numpy_vector):
ex_np = np.repeat(np.expand_dims(ex_numpy_vector, axis=0), x_matrix.shape[0], axis=0)
ex_matrix = pd.DataFrame(ex_np, index=x_matrix.index, columns=x_matrix.columns)
return ex_matrix
@staticmethod
def cov_matrix_compute(x_ex_matrix):
return np.matmul(x_ex_matrix.T.to_numpy(), x_ex_matrix.to_numpy()) / (x_ex_matrix.shape[0] - 1)
def compute_weight(self, x_matrix, total_days=252, method="Markowitz", starttime=0, endtime=0):
# ex_numpy_vector是r拔 (50,) numpy
# x_matrix是矩阵X [6个月天数 rows x 50 columns] 比如第一次计算权重就是(1212, 50)
# ex_matrix是EX矩阵 [6个月天数 rows x 50 columns]
# x_ex_matrix矩阵X-EX
# 协方差矩阵:cov (50, 50)
total_days_every_year = total_days / 5
day_yield_matrix, ex_numpy_vector = self.ex_vector_compute(x_matrix)
ex_matrix = self.ex_matrix_compute(day_yield_matrix, ex_numpy_vector)
x_ex_matrix = day_yield_matrix - ex_matrix
cov_matrix_numpy = self.cov_matrix_compute(x_ex_matrix)
# stocks_number = 50
n = STOCKS_NUMBER
one_matrix = np.ones((1, n))
'''
# cvxopt这个包也能做
P = matrix(cov_matrix_numpy.tolist())
# print(P)
# print('*' * 50)
q = matrix([0.0] * 50)
# G = matrix([[-1.0, 0.0], [0.0, -1.0]])
# h = matrix([0.0, 0.0])
A = matrix(np.vstack((ex_numpy_vector, one_matrix))) # 原型为cvxopt.matrix(array,dims),等价于A = matrix([[1.0],[1.0]])
# print(A)
b = matrix([0.1, 1.0])
result = solvers.qp(P=P, q=q, A=A, b=b)
print(result)
print(result['x'])
'''
if method == "Markowitz":
print("\033[0;36;m 开始计算组合权重,采用策略:\033[0m \033[0;34;m Markowitz投资组合 \033[0m")
# print("\033[0;36;m 开始求解二次规划:\033[0m")
annual_yield_vector = ex_numpy_vector * total_days_every_year
w = cp.Variable(n)
prob = cp.Problem(cp.Minimize((1 / 2) * cp.quad_form(w, cov_matrix_numpy)),
[annual_yield_vector.T @ w == EXPECTED_RETURN,
one_matrix @ w == 1])
prob.solve()
# print("\nThe optimal value is", prob.value)
# # print("A solution w is")
# # print(w.value)
print("\033[0;36;m 完成Markowitz投资组合最优权重二次规划求解,方差最优值为:\033[0m \033[0;34;m {} \033[0m".format(prob.value))
return w.value
r_p_list = []
sigma_p_list = []
sharpe_ratio_list = []
weight_list = []
if method == "MontoCarlo":
print("\033[0;36;m 开始计算组合权重,采用策略:\033[0m \033[0;34;m Monto Carlo 求解最大夏普比率 \033[0m")
# 正态分布均值设置为 1 / 50 更符合
np.random.seed(1)
risk_free_rate_day = RISK_FREE_RATE / total_days_every_year
bar = tqdm(list(range(int(MONTO_CARLO_TIMES))),
bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
for _ in bar:
# bar.set_description(f"现在到Monto Carlo第{_}次")
weights = np.random.normal(1 / n, 1.0, n - 1)
weights_last = 1 - np.sum(weights)
weights = np.append(weights, weights_last)
weights_row_vector = np.expand_dims(weights, axis=0)
yield_avg_vector = np.expand_dims(ex_numpy_vector, axis=0)
sigma_p = np.sqrt(np.matmul(np.matmul(weights_row_vector, cov_matrix_numpy), weights_row_vector.T))[0][
0]
r_p = np.matmul(weights_row_vector, yield_avg_vector.T)[0][0]
sharpe_ratio = (r_p - risk_free_rate_day) / sigma_p
r_p_list.append(r_p)
sigma_p_list.append(sigma_p)
sharpe_ratio_list.append(sharpe_ratio)
weight_list.append(weights)
r_p_list_numpy = np.array(r_p_list)
sigma_p_list_numpy = np.array(sigma_p_list)
sharpe_ratio_list_numpy = np.array(sharpe_ratio_list)
weight_list_numpy = np.array(weight_list)
# 最大夏普比率
max_sharpe_ratio = np.max(sharpe_ratio_list_numpy)
max_sharpe_ratio_index = np.argmax(sharpe_ratio_list_numpy)
# 对应的标准差和均值
sigma_rp = [sigma_p_list_numpy[max_sharpe_ratio_index], r_p_list_numpy[max_sharpe_ratio_index]]
# r_p与无风险利率组合达到10%收益目标,alpha为投资于无风险利率权重,但其实alpha要接近97%,因为此时市场组合夏普比率最大,日收益率在10%以上,与年收益10%的目标收益和年利率3%无风险利率相去甚远
alpha = (EXPECTED_RETURN / total_days_every_year - sigma_rp[1]) / (risk_free_rate_day - sigma_rp[1])
weight_list_numpy_opt_alpha = np.append(weight_list_numpy[max_sharpe_ratio_index], alpha)
print("\033[0;36;m 完成 Monto Carlo 策略权重求解 \033[0m")
# 作图
filename = os.path.join(os.getcwd(), 'images')
if not os.path.exists(filename):
os.makedirs(filename)
plt.figure(figsize=(8, 6))
plt.style.use('seaborn-dark')
plt.rcParams['savefig.dpi'] = 300 # 图片像素
plt.rcParams['figure.dpi'] = 300 # 分辨率
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.scatter(sigma_p_list_numpy, r_p_list_numpy, c=r_p_list_numpy / sigma_p_list_numpy,
marker='o', cmap='coolwarm')
plt.plot([0, sigma_rp[0]], [risk_free_rate_day, sigma_rp[1]], 'r')
# plt.annotate('max Sharpe ratio:'.format(max_sharpe_ratio), xy=rp_sigma, xytext=(3, 1.5),
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
plt.annotate('max Sharpe ratio:{}'.format(max_sharpe_ratio), xy=sigma_rp)
plt.xlabel('日标准差')
plt.ylabel('日收益率')
plt.colorbar(label='Sharpe ratio')
plt.title("Monta Carlo抽样{}次获得CAL和有效前沿".format(MONTO_CARLO_TIMES))
plt.savefig("./images/Montacarlo_CAL_{}_{}_{}".format(MONTO_CARLO_TIMES, starttime, endtime), dpi=300)
print("\033[0;36;m 完成资本市场线作图 \033[0m")
return weight_list_numpy_opt_alpha
@staticmethod
def get_six_month_map(x_matrix):
dfx = | pd.DataFrame(x_matrix.index, columns=['time']) | pandas.DataFrame |
##############################################################
# #
# <NAME> (2021) #
# Textmining medical notes for cognition #
# ParseDataset #
# #
##############################################################
# Import the relevant packages
import pandas as pd
class ParseDataset:
def __init__(self, filename, file, i):
self.file = file
self.i = i
self.filename = filename
def create_dataset(self):
ID, re = self.get_ID()
self.data_table = pd.DataFrame({'ID':ID, 're':re}, index=[self.i])
def get_ID(self):
id = str(self.filename.split('/')[-1].split('.')[0])
return self.check_id(id)
def check_id(self, id):
ids = id.split('-')
if ids[-1] == 're':
re = True
else:
re = False
id = ids[0].split('_')[-1]
return id, re
def pre_parser(self):
"""
parsing basic variables of files in to dict
"""
# make dict from items in file
items = {}
# parse each line in the file
for line in self.file.splitlines():
# remove whitespace at the end of the line
line = line.strip()
line = re.sub(r"\s+", " ", line)
if line.startswith('Verhage:'):
line = line.split(':')
items['verhage'] = line[-1]
elif line.startswith('Opleiding:'):
line = line.split(':')
items['opleiding'] = line[-1]
elif line.startswith('Aantal jaren:'):
line = line.split(':')
items['jaren'] = line[-1]
elif line.startswith('handvoorkeur'):
line = line.split(':')
items['hand'] = line[-1]
elif line.startswith('Partner'):
line = line.split(':')
items['partner'] = line[-1]
elif line.startswith('Woonsituatie'):
line = line.split(':')
items['woon'] = line[-1]
elif line.startswith('Roker'):
line = line.split(':')
items['roker'] = line[-1]
elif line.startswith('Alcohol'):
line = line.split(':')
items['alcohol'] = line[-1]
elif line.startswith('Kleurenblind'):
line = line.split(':')
items['kleur'] = line[-1]
elif line.startswith('Epilepsie:'):
line = line.split(':')
items['epil'] = line[-1]
elif line.startswith('Computergebruik:'):
line = line.split(':')
items['com'] = line[-1]
break
else:
continue
data = pd.DataFrame(items, index=[self.i])
self.data_table = self.data_table.join(data)
def T0_parser(self):
"""
parsing basic pre measurements and questions variables of files in to dict format for T0
"""
try:
file = self.file.split('PRE-meting')[1:]
file = "\n".join(file).split('POST-meting')[0].replace('3 maanden', '').replace('12 maanden', '')
except:
file = ''
lines = file.splitlines()
items = self.test_parser(lines)
items = {'T0_' + str(key): val for key, val in items.items()}
data = | pd.DataFrame(items, index=[self.i]) | pandas.DataFrame |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import object
from past.utils import old_div
import os
import numpy as np
import pandas as pd
from threeML.io.rich_display import display
from threeML.io.file_utils import sanitize_filename
from ..serialize import Serialization
from .from_root_file import from_root_file
from .from_hdf5_file import from_hdf5_file
import astropy.units as u
def map_tree_factory(map_tree_file, roi):
# Sanitize files in input (expand variables and so on)
map_tree_file = sanitize_filename(map_tree_file)
if os.path.splitext(map_tree_file)[-1] == '.root':
return MapTree.from_root_file(map_tree_file, roi)
else:
return MapTree.from_hdf5(map_tree_file, roi)
class MapTree(object):
def __init__(self, analysis_bins, roi):
self._analysis_bins = analysis_bins
self._roi = roi
@classmethod
def from_hdf5(cls, map_tree_file, roi):
data_analysis_bins = from_hdf5_file(map_tree_file, roi)
return cls(data_analysis_bins, roi)
@classmethod
def from_root_file(cls, map_tree_file, roi):
"""
Create a MapTree object from a ROOT file and a ROI. Do not use this directly, use map_tree_factory instead.
:param map_tree_file:
:param roi:
:return:
"""
data_analysis_bins = from_root_file(map_tree_file, roi)
return cls(data_analysis_bins, roi)
def __iter__(self):
"""
This allows to loop over the analysis bins as in:
for analysis_bin in maptree:
... do something ...
:return: analysis bin_name iterator
"""
for analysis_bin in self._analysis_bins:
yield analysis_bin
def __getitem__(self, item):
"""
This allows to access the analysis bins by name:
first_analysis_bin = maptree["bin_name 0"]
:param item: string for access by name
:return: the analysis bin_name
"""
try:
return self._analysis_bins[item]
except IndexError:
raise IndexError("Analysis bin_name with index %i does not exist" % (item))
def __len__(self):
return len(self._analysis_bins)
@property
def analysis_bins_labels(self):
return list(self._analysis_bins.keys())
def display(self):
df = pd.DataFrame()
df['Bin'] = list(self._analysis_bins.keys())
df['Nside'] = [self._analysis_bins[bin_id].nside for bin_id in self._analysis_bins]
df['Scheme'] = [self._analysis_bins[bin_id].scheme for bin_id in self._analysis_bins]
# Compute observed counts, background counts, how many pixels we have in the ROI and
# the sky area they cover
n_bins = len(self._analysis_bins)
obs_counts = np.zeros(n_bins)
bkg_counts = np.zeros_like(obs_counts)
n_pixels = np.zeros(n_bins, dtype=int)
sky_area = np.zeros_like(obs_counts)
size = 0
for i, bin_id in enumerate(self._analysis_bins):
analysis_bin = self._analysis_bins[bin_id]
sparse_obs = analysis_bin.observation_map.as_partial()
sparse_bkg = analysis_bin.background_map.as_partial()
size += sparse_obs.nbytes
size += sparse_bkg.nbytes
obs_counts[i] = sparse_obs.sum()
bkg_counts[i] = sparse_bkg.sum()
n_pixels[i] = sparse_obs.shape[0]
sky_area[i] = n_pixels[i] * analysis_bin.observation_map.pixel_area
df['Obs counts'] = obs_counts
df['Bkg counts'] = bkg_counts
df['obs/bkg'] = old_div(obs_counts, bkg_counts)
df['Pixels in ROI'] = n_pixels
df['Area (deg^2)'] = sky_area
display(df)
first_bin_id = list(self._analysis_bins.keys())[0]
print("This Map Tree contains %.3f transits in the first bin" \
% self._analysis_bins[first_bin_id].n_transits)
print("Total data size: %.2f Mb" % (size * u.byte).to(u.megabyte).value)
def write(self, filename):
"""
Export the tree to a HDF5 file.
NOTE: if an ROI has been applied, only the data within the ROI will be saved.
:param filename: output filename. Use an extension .hd5 or .hdf5 to ensure proper handling by downstream
software
:return: None
"""
# Make a dataframe with the ordered list of bin names
# bin_names = map(lambda x:x.name, self._analysis_bins)
# Create a dataframe with a multi-index, with the energy bin name as first level and the HEALPIX pixel ID
# as the second level
multi_index_keys = []
dfs = []
all_metas = []
for bin_id in self._analysis_bins:
analysis_bin = self._analysis_bins[bin_id]
assert bin_id == analysis_bin.name, \
'Bin name inconsistency: {} != {}'.format(bin_id, analysis_bin.name)
multi_index_keys.append(analysis_bin.name)
this_df, this_meta = analysis_bin.to_pandas()
dfs.append(this_df)
all_metas.append(pd.Series(this_meta))
analysis_bins_df = pd.concat(dfs, axis=0, keys=multi_index_keys)
meta_df = pd.concat(all_metas, axis=1, keys=multi_index_keys).T
with Serialization(filename, mode='w') as serializer:
serializer.store_pandas_object('/analysis_bins', analysis_bins_df)
serializer.store_pandas_object('/analysis_bins_meta', meta_df)
# Write the ROI
if self._roi is not None:
serializer.store_pandas_object('/ROI', | pd.Series() | pandas.Series |
from functools import reduce
import numpy as np
import pandas as pd
import pyprind
from .enums import *
class Backtest:
"""Backtest runner class."""
def __init__(self, allocation, initial_capital=1_000_000, shares_per_contract=100):
assets = ('stocks', 'options', 'cash')
total_allocation = sum(allocation.get(a, 0.0) for a in assets)
self.allocation = {}
for asset in assets:
self.allocation[asset] = allocation.get(asset, 0.0) / total_allocation
self.initial_capital = initial_capital
self.stop_if_broke = True
self.shares_per_contract = shares_per_contract
self._stocks = []
self._options_strategy = None
self._stocks_data = None
self._options_data = None
@property
def stocks(self):
return self._stocks
@stocks.setter
def stocks(self, stocks):
assert np.isclose(sum(stock.percentage for stock in stocks), 1.0,
atol=0.000001), 'Stock percentages must sum to 1.0'
self._stocks = list(stocks)
return self
@property
def options_strategy(self):
return self._options_strategy
@options_strategy.setter
def options_strategy(self, strat):
self._options_strategy = strat
@property
def stocks_data(self):
return self._stocks_data
@stocks_data.setter
def stocks_data(self, data):
self._stocks_schema = data.schema
self._stocks_data = data
@property
def options_data(self):
return self._options_data
@options_data.setter
def options_data(self, data):
self._options_schema = data.schema
self._options_data = data
def run(self, rebalance_freq=0, monthly=False, sma_days=None):
"""Runs the backtest and returns a `pd.DataFrame` of the orders executed (`self.trade_log`)
Args:
rebalance_freq (int, optional): Determines the frequency of portfolio rebalances. Defaults to 0.
monthly (bool, optional): Iterates through data monthly rather than daily. Defaults to False.
Returns:
pd.DataFrame: Log of the trades executed.
"""
assert self._stocks_data, 'Stock data not set'
assert all(stock.symbol in self._stocks_data['symbol'].values
for stock in self._stocks), 'Ensure all stocks in portfolio are present in the data'
assert self._options_data, 'Options data not set'
assert self._options_strategy, 'Options Strategy not set'
assert self._options_data.schema == self._options_strategy.schema
option_dates = self._options_data['date'].unique()
stock_dates = self.stocks_data['date'].unique()
assert np.array_equal(stock_dates,
option_dates), 'Stock and options dates do not match (check that TZ are equal)'
self._initialize_inventories()
self.current_cash = self.initial_capital
self.trade_log = pd.DataFrame()
self.balance = pd.DataFrame({
'total capital': self.current_cash,
'cash': self.current_cash
},
index=[self.stocks_data.start_date - pd.Timedelta(1, unit='day')])
if sma_days:
self.stocks_data.sma(sma_days)
dates = pd.DataFrame(self.options_data._data[['quotedate',
'volume']]).drop_duplicates('quotedate').set_index('quotedate')
rebalancing_days = pd.to_datetime(
dates.groupby(pd.Grouper(freq=str(rebalance_freq) +
'BMS')).apply(lambda x: x.index.min()).values) if rebalance_freq else []
data_iterator = self._data_iterator(monthly)
bar = pyprind.ProgBar(len(stock_dates), bar_char='█')
for date, stocks, options in data_iterator:
if (date in rebalancing_days):
previous_rb_date = rebalancing_days[rebalancing_days.get_loc(date) -
1] if rebalancing_days.get_loc(date) != 0 else date
self._update_balance(previous_rb_date, date)
self._rebalance_portfolio(date, stocks, options, sma_days)
bar.update()
# Update balance for the period between the last rebalancing day and the last day
self._update_balance(rebalancing_days[-1], self.stocks_data.end_date)
self.balance['options capital'] = self.balance['calls capital'] + self.balance['puts capital']
self.balance['stocks capital'] = sum(self.balance[stock.symbol] for stock in self._stocks)
self.balance['stocks capital'].iloc[0] = 0
self.balance['options capital'].iloc[0] = 0
self.balance[
'total capital'] = self.balance['cash'] + self.balance['stocks capital'] + self.balance['options capital']
self.balance['% change'] = self.balance['total capital'].pct_change()
self.balance['accumulated return'] = (1.0 + self.balance['% change']).cumprod()
return self.trade_log
def _initialize_inventories(self):
"""Initialize empty stocks and options inventories."""
columns = pd.MultiIndex.from_product(
[[l.name for l in self._options_strategy.legs],
['contract', 'underlying', 'expiration', 'type', 'strike', 'cost', 'order']])
totals = pd.MultiIndex.from_product([['totals'], ['cost', 'qty', 'date']])
self._options_inventory = pd.DataFrame(columns=columns.append(totals))
self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty'])
def _data_iterator(self, monthly):
"""Returns combined iterator for stock and options data.
Each step, it produces a tuple like the following:
(date, stocks, options)
Returns:
generator: Daily/monthly iterator over `self._stocks_data` and `self.options_data`.
"""
if monthly:
it = zip(self._stocks_data.iter_months(), self._options_data.iter_months())
else:
it = zip(self._stocks_data.iter_dates(), self._options_data.iter_dates())
return ((date, stocks, options) for (date, stocks), (_, options) in it)
def _rebalance_portfolio(self, date, stocks, options, sma_days):
"""Reabalances the portfolio according to `self.allocation` weights.
Args:
date (pd.Timestamp): Current date.
stocks (pd.DataFrame): Stocks data for the current date.
options (pd.DataFrame): Options data for the current date.
sma_days (int): SMA window size
"""
self._execute_option_exits(date, options)
stock_capital = self._current_stock_capital(stocks)
options_capital = self._current_options_capital(options)
total_capital = self.current_cash + stock_capital + options_capital
# buy stocks
stocks_allocation = self.allocation['stocks'] * total_capital
self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty'])
# We simulate a sell of the stock positions and then a rebuy.
# This would **not** work if we added transaction fees.
self.current_cash = stocks_allocation + total_capital * self.allocation['cash']
self._buy_stocks(stocks, stocks_allocation, sma_days)
# exit/enter contracts
options_allocation = self.allocation['options'] * total_capital
if options_allocation >= options_capital:
self._execute_option_entries(date, options, options_allocation - options_capital)
else:
to_sell = options_capital - options_allocation
current_options = self._get_current_option_quotes(options)
self._sell_some_options(date, to_sell, current_options)
def _sell_some_options(self, date, to_sell, current_options):
sold = 0
total_costs = sum([current_options[i]['cost'] for i in range(len(current_options))])
for (exit_cost, (row_index, inventory_row)) in zip(total_costs, self._options_inventory.iterrows()):
if (to_sell - sold > -exit_cost) and (to_sell - sold) > 0:
qty_to_sell = (to_sell - sold) // exit_cost
if -qty_to_sell <= inventory_row['totals']['qty']:
qty_to_sell = (to_sell - sold) // exit_cost
else:
if qty_to_sell != 0:
qty_to_sell = -inventory_row['totals']['qty']
if qty_to_sell != 0:
trade_log_append = self._options_inventory.loc[row_index].copy()
trade_log_append['totals', 'qty'] = -qty_to_sell
trade_log_append['totals', 'date'] = date
trade_log_append['totals', 'cost'] = exit_cost
for i, leg in enumerate(self._options_strategy.legs):
trade_log_append[leg.name, 'order'] = ~trade_log_append[leg.name, 'order']
trade_log_append[leg.name, 'cost'] = current_options[i].loc[row_index]['cost']
self.trade_log = self.trade_log.append(trade_log_append, ignore_index=True)
self._options_inventory.at[row_index, ('totals', 'date')] = date
self._options_inventory.at[row_index, ('totals', 'qty')] += qty_to_sell
sold += (qty_to_sell * exit_cost)
self.current_cash += sold - to_sell
def _current_stock_capital(self, stocks):
"""Return the current value of the stocks inventory.
Args:
stocks (pd.DataFrame): Stocks data for the current time step.
Returns:
float: Total capital in stocks.
"""
current_stocks = self._stocks_inventory.merge(stocks,
how='left',
left_on='symbol',
right_on=self._stocks_schema['symbol'])
return (current_stocks[self._stocks_schema['adjClose']] * current_stocks['qty']).sum()
def _current_options_capital(self, options):
options_value = self._get_current_option_quotes(options)
values_by_row = [0] * len(options_value[0])
if len(options_value[0]) != 0:
for i in range(len(self._options_strategy.legs)):
values_by_row += options_value[i]['cost'].values
total = -sum(values_by_row * self._options_inventory['totals']['qty'].values)
else:
total = 0
return total
def _buy_stocks(self, stocks, allocation, sma_days):
"""Buys stocks according to their given weight, optionally using an SMA entry filter.
Updates `self._stocks_inventory` and `self.current_cash`.
Args:
stocks (pd.DataFrame): Stocks data for the current time step.
allocation (float): Total capital allocation for stocks.
sma_days (int): SMA window.
"""
stock_symbols = [stock.symbol for stock in self.stocks]
query = '{} in {}'.format(self._stocks_schema['symbol'], stock_symbols)
inventory_stocks = stocks.query(query)
stock_percentages = np.array([stock.percentage for stock in self.stocks])
stock_prices = inventory_stocks[self._stocks_schema['adjClose']]
if sma_days:
qty = np.where(inventory_stocks['sma'] < stock_prices, (allocation * stock_percentages) // stock_prices, 0)
else:
qty = (allocation * stock_percentages) // stock_prices
self.current_cash -= np.sum(stock_prices * qty)
self._stocks_inventory = pd.DataFrame({'symbol': stock_symbols, 'price': stock_prices, 'qty': qty})
def _update_balance(self, start_date, end_date):
"""Updates self.balance in batch in a certain period between rebalancing days"""
stocks_date_col = self._stocks_schema['date']
stocks_data = self._stocks_data.query('({date_col} >= "{start_date}") & ({date_col} < "{end_date}")'.format(
date_col=stocks_date_col, start_date=start_date, end_date=end_date))
options_date_col = self._options_schema['date']
options_data = self._options_data.query('({date_col} >= "{start_date}") & ({date_col} < "{end_date}")'.format(
date_col=options_date_col, start_date=start_date, end_date=end_date))
calls_value = pd.Series(0, index=options_data[options_date_col].unique())
puts_value = pd.Series(0, index=options_data[options_date_col].unique())
for leg in self._options_strategy.legs:
leg_inventory = self._options_inventory[leg.name]
cost_field = (~leg.direction).value
for contract in leg_inventory['contract']:
leg_inventory_contract = leg_inventory.query('contract == "{}"'.format(contract))
qty = self._options_inventory.loc[leg_inventory_contract.index]['totals']['qty'].values[0]
options_contract_col = self._options_schema['contract']
current = leg_inventory_contract[['contract']].merge(options_data,
how='left',
left_on='contract',
right_on=options_contract_col)
current.set_index(options_date_col, inplace=True)
if cost_field == Direction.BUY.value:
current[cost_field] = -current[cost_field]
if (leg_inventory_contract['type'] == Type.CALL.value).any():
calls_value = calls_value.add(current[cost_field] * qty * self.shares_per_contract, fill_value=0)
else:
puts_value = puts_value.add(current[cost_field] * qty * self.shares_per_contract, fill_value=0)
stocks_current = self._stocks_inventory[['symbol', 'qty']].merge(stocks_data[['date', 'symbol', 'adjClose']],
on='symbol')
stocks_current['cost'] = stocks_current['qty'] * stocks_current['adjClose']
columns = [
stocks_current[stocks_current['symbol'] == stock.symbol].set_index(stocks_date_col)[[
'cost'
]].rename(columns={'cost': stock.symbol}) for stock in self._stocks
]
add = pd.concat(columns, axis=1)
add['cash'] = self.current_cash
add['options qty'] = self._options_inventory['totals']['qty'].sum()
add['calls capital'] = calls_value
add['puts capital'] = puts_value
add['stocks qty'] = self._stocks_inventory['qty'].sum()
for _index, row in self._stocks_inventory.iterrows():
symbol = row['symbol']
add[symbol + ' qty'] = row['qty']
# sort=False means we're assuming the updates are done in chronological order, i.e,
# the dates in add are the immediate successors to the ones at the end of self.balance.
# Pass sort=True to ensure self.balance is always sorted chronologically if needed.
self.balance = self.balance.append(add, sort=False)
def _execute_option_entries(self, date, options, options_allocation):
"""Enters option positions according to `self._options_strategy`.
Calls `self._pick_entry_signals` to select from the entry signals given by the strategy.
Updates `self._options_inventory` and `self.current_cash`.
Args:
date (pd.Timestamp): Current date.
options (pd.DataFrame): Options data for the current time step.
options_allocation (float): Capital amount allocated to options.
"""
self.current_cash += options_allocation
# Remove contracts already in inventory
inventory_contracts = pd.concat(
[self._options_inventory[leg.name]['contract'] for leg in self._options_strategy.legs])
subset_options = options[~options[self._options_schema['contract']].isin(inventory_contracts)]
entry_signals = []
for leg in self._options_strategy.legs:
flt = leg.entry_filter
cost_field = leg.direction.value
leg_entries = subset_options[flt(subset_options)]
# Exit if no entry signals for the current leg
if leg_entries.empty:
return
fields = self._signal_fields(cost_field)
leg_entries = leg_entries.reindex(columns=fields.keys())
leg_entries.rename(columns=fields, inplace=True)
order = get_order(leg.direction, Signal.ENTRY)
leg_entries['order'] = order
# Change sign of cost for SELL orders
if leg.direction == Direction.SELL:
leg_entries['cost'] = -leg_entries['cost']
leg_entries['cost'] *= self.shares_per_contract
leg_entries.columns = pd.MultiIndex.from_product([[leg.name], leg_entries.columns])
entry_signals.append(leg_entries.reset_index(drop=True))
# Append the 'totals' column to entry_signals
total_costs = sum([leg_entry.droplevel(0, axis=1)['cost'] for leg_entry in entry_signals])
qty = options_allocation // abs(total_costs)
totals = | pd.DataFrame.from_dict({'cost': total_costs, 'qty': qty, 'date': date}) | pandas.DataFrame.from_dict |
#TODO:
import tensorflow as tf
import os
import argparse
import sys
import random
import math
import logging
import operator
import itertools
import datetime
import numpy as np
import pandas as pd
from csv import reader
from random import randrange
FLAGS = None
#FORMAT = '%(asctime)s %(levelname)s %(message)s'
#logging.basicConfig(format=FORMAT)
#logger = logging.getLogger('tensorflow')
logger = logging.getLogger('tensorflow')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.removeHandler(logger.handlers[0])
logger.propagate = False
def sales_example(sales):
record = {
'sales': tf.train.Feature(float_list=tf.train.FloatList(value=sales))
}
return tf.train.Example(features=tf.train.Features(feature=record))
def capacity_example(capacity):
record = {
'capacity': tf.train.Feature(float_list=tf.train.FloatList(value=capacity))
}
return tf.train.Example(features=tf.train.Features(feature=record))
def stock_example(stock):
record = {
'stock': tf.train.Feature(float_list=tf.train.FloatList(value=stock))
}
return tf.train.Example(features=tf.train.Features(feature=record))
#https://stackoverflow.com/questions/553303/generate-a-random-date-between-two-other-dates
def random_date(start, end):
return start + datetime.timedelta(
seconds=random.randint(0, int((end - start).total_seconds())),
)
def create_records(number_of_products, start_date, end_date, start_time_period, middle_time_period, end_time_period, orders_file, products_file, departments_file, order_products_prior_file, order_products_train_file, train_tfrecords_file, test_tfrecords_file, capacity_tfrecords_file, stock_tfrecords_file):
stock = np.random.uniform(low=0.0, high=1.0, size=(FLAGS.number_of_products))
with tf.io.TFRecordWriter(stock_tfrecords_file) as writer:
logger.debug ("stock: {}".format(stock))
tf_example = stock_example(stock)
writer.write(tf_example.SerializeToString())
with open(orders_file, 'r') as f:
csv_reader = reader(f)
next(csv_reader)
orders_list = list(map(tuple, csv_reader))
sorted_orders = sorted(orders_list, key = lambda x: (int(x[1]), int(x[3])))
dated_orders = []
i = 0
for k, g in itertools.groupby(sorted_orders, lambda x : int(x[1])):
item = next(g)
order_date = random_date(start_date, end_date)
while order_date.weekday() != int(item[4]):
order_date = order_date + datetime.timedelta(days=1)
start_date = datetime.datetime.combine(start_date, datetime.datetime.min.time())
end_date = datetime.datetime.combine(end_date, datetime.datetime.min.time())
order_date = datetime.datetime(order_date.year, order_date.month, order_date.day, int(item[5]), 0, 0)
time_period = int((order_date - start_date).total_seconds() / (60*60*6))
dated_orders.append((int(item[0]), int(item[1]), int(item[4]), order_date, time_period))
for item in g:
order_date = order_date + datetime.timedelta(days=int(float(item[6])))
order_date = datetime.datetime(order_date.year, order_date.month, order_date.day, int(item[5]), 0, 0)
time_period = int((order_date - start_date).total_seconds() / (60*60*6))
dated_orders.append((int(item[0]), int(item[1]), int(item[4]), order_date, time_period))
orders = pd.DataFrame(dated_orders, columns =['order_id', 'user_id', 'order_dow', 'order_date', 'time_period'])
products = pd.read_csv(products_file)
departments = pd.read_csv(departments_file)
prior_order = pd.read_csv("data/order_products__prior.csv")
train_order = pd.read_csv("data/order_products__train.csv")
#aisles = pd.read_csv("data/aisles.csv")
ntop = int(FLAGS.top_products*products['product_id'].count())
all_ordered_products = pd.concat([prior_order, train_order], axis=0)[["order_id", "product_id"]]
largest = all_ordered_products[['product_id']].groupby(['product_id']).size().nlargest(ntop).to_frame()
largest.reset_index(inplace=True)
products_largest = | pd.merge(largest, products, how="left", on="product_id") | pandas.merge |
# coding: utf-8
# We are going to try and predict the if a loan will be late or default using the below data. The do the preprocessing and to explore the data.
# ### Import Libraries
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set(style='white', font_scale=0.9)
# ### Import DataSet
# In[ ]:
dataset = pd.read_csv('../input/loan.csv')
# In[ ]:
pd.set_option('display.max_columns', len(dataset.columns))
dataset.head(3)
# In[ ]:
| pd.reset_option('display.max_columns') | pandas.reset_option |
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
from evalml.pipelines.components import LabelEncoder
def test_label_encoder_init():
encoder = LabelEncoder()
assert encoder.parameters == {"positive_label": None}
assert encoder.random_seed == 0
def test_label_encoder_fit_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
with pytest.raises(ValueError, match="y cannot be None"):
encoder.fit(X)
encoder.fit(X, y)
with pytest.raises(ValueError, match="y cannot be None"):
encoder.inverse_transform(None)
def test_label_encoder_transform_y_is_None():
X = pd.DataFrame({})
y = pd.Series(["a", "b"])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X)
assert_frame_equal(X, X_t)
assert y_t is None
def test_label_encoder_fit_transform_with_numeric_values_does_not_encode():
X = pd.DataFrame({})
# binary
y = pd.Series([0, 1, 1, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
# multiclass
X = pd.DataFrame({})
y = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y, y_t)
def test_label_encoder_fit_transform_with_numeric_values_needs_encoding():
X = pd.DataFrame({})
# binary
y = pd.Series([2, 1, 2, 1])
y_expected = pd.Series([1, 0, 1, 0])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series([0, 1, 1, 3, 0, 3])
y_expected = pd.Series([0, 1, 1, 2, 0, 2])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_with_categorical_values():
X = pd.DataFrame({})
# binary
y = pd.Series(["b", "a", "b", "b"])
y_expected = pd.Series([1, 0, 1, 1])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
# multiclass
y = pd.Series(["c", "a", "b", "c", "d"])
y_expected = pd.Series([2, 0, 1, 2, 3])
encoder = LabelEncoder()
encoder.fit(X, y)
X_t, y_t = encoder.transform(X, y)
assert_frame_equal(X, X_t)
assert_series_equal(y_expected, y_t)
def test_label_encoder_fit_transform_equals_fit_and_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
encoder = LabelEncoder()
X_fit_transformed, y_fit_transformed = encoder.fit_transform(X, y)
encoder_duplicate = LabelEncoder()
encoder_duplicate.fit(X, y)
X_transformed, y_transformed = encoder_duplicate.transform(X, y)
assert_frame_equal(X_fit_transformed, X_transformed)
assert_series_equal(y_fit_transformed, y_transformed)
def test_label_encoder_inverse_transform():
X = pd.DataFrame({})
y = pd.Series(["a", "b", "c", "a"])
y_expected = ww.init_series(y)
encoder = LabelEncoder()
_, y_fit_transformed = encoder.fit_transform(X, y)
y_inverse_transformed = encoder.inverse_transform(y_fit_transformed)
assert_series_equal(y_expected, y_inverse_transformed)
y_encoded = pd.Series([1, 0, 2, 1])
y_expected = ww.init_series(pd.Series(["b", "a", "c", "b"]))
y_inverse_transformed = encoder.inverse_transform(y_encoded)
assert_series_equal(y_expected, y_inverse_transformed)
def test_label_encoder_with_positive_label_multiclass_error():
y = pd.Series(["a", "b", "c", "a"])
encoder = LabelEncoder(positive_label="a")
with pytest.raises(
ValueError,
match="positive_label should only be set for binary classification targets",
):
encoder.fit(None, y)
def test_label_encoder_with_positive_label_missing_from_input():
y = pd.Series(["a", "b", "a"])
encoder = LabelEncoder(positive_label="z")
with pytest.raises(
ValueError,
match="positive_label was set to `z` but was not found in the input target data.",
):
encoder.fit(None, y)
@pytest.mark.parametrize(
"y, positive_label, y_encoded_expected",
[
(
pd.Series([True, False, False, True]),
False,
pd.Series([0, 1, 1, 0]),
), # boolean
(
pd.Series([True, False, False, True]),
True,
pd.Series([1, 0, 0, 1]),
), # boolean
(
pd.Series([0, 1, 1, 0]),
0,
pd.Series([1, 0, 0, 1]),
), # int, 0 / 1, encoding should flip
(
pd.Series([0, 1, 1, 0]),
1,
| pd.Series([0, 1, 1, 0]) | pandas.Series |
"""
Fields
------
In this module the Fields are defined, which are the main containers for
the elements.
"""
from dataclasses import dataclass
from typing import Sequence, Tuple, Optional
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt, ticker
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from scipy.integrate import ode
from plofeld.utils.classes import Vector
from plofeld.utils.constants import INTEGRATION_TIME_STEP, ZORDER, ELECTRIC, MAGNETIC, RealNumber
from plofeld.elements import PointCharge
from plofeld.utils.plotting import (
charge_to_marker, charge_to_circle, add_arrow_to_line,
went_straight_through_origin, is_point_visible
)
@dataclass
class StaticField:
charges: Sequence[PointCharge]
field_type: str = ELECTRIC
def __post_init__(self):
if self.field_type == MAGNETIC:
assert all(abs(c.q) == 1 for c in self.charges)
def field_at(self, location: Vector) -> Vector:
"""Calculates the field Vector at a given location from the charges."""
return Vector(*np.sum([c.field_at(location) for c in self.charges], axis=0))
def plot(self,
xlim: Tuple[RealNumber, RealNumber] = (None, None),
ylim: Tuple[RealNumber, RealNumber] = (None, None),
no_ticks: bool = True, **kwargs) -> Figure:
"""Plot the given static field with all the lines and charges.
Args:
xlim (Tuple[RealNumber, RealNumber]): xlimits to set
ylim (Tuple[RealNumber, RealNumber]): ylimits to set
no_ticks (bool): No ticks on the axes. Default: True
Keyword Args:
other: passed on to :func:`plot_field_lines`
"""
fig, ax = plt.subplots(1, 1)
ax.axis("image")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
self.plot_field_lines(ax, **kwargs)
self.plot_charges(ax)
if no_ticks:
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
fig.tight_layout()
return fig
# Line Plotting ----------------------------------------------------------------
def plot_field_lines(self, ax: Axes, **kwargs):
"""Plot all the field lines around the charges in given static field.
The field lines are on-the-fly evaluated via integration.
So this can take a while.
Inspired by: https://www.numbercrunch.de/blog/2013/05/visualizing-streamlines/
Args:
ax (Axes): Matplotlib Axes to plot on
Keyword Args:
n_lines (int): Number of field lines per charge, distributed in a circle.
Default: ``20``
no_origin_lines (bool): Tries to remove straight lines through the
origin (0, 0). Default: ``False``
arrows (bool): Plot arrows on the lines. Default: ``True``
color (str): Desired color of the lines. Default: ``'k'``
time_step (float): Time step for the integration. Default: see ``INTEGRATION_TIME_STEP``
"""
n_lines: int = kwargs.get('n_lines', 20)
no_origin_lines: bool = kwargs.get('no_origin_lines', False)
arrows: bool = kwargs.get('arrows', True)
color: str = kwargs.get('color', 'k')
time_step: float = kwargs.get('time_step', INTEGRATION_TIME_STEP)
charges_strings = [str(c) for c in self.charges]
connected_charges = | pd.DataFrame(False, index=charges_strings, columns=charges_strings, dtype=bool) | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.slow
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(10 ** 6).reshape(100, -1)
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = | pd.DataFrame({0: dti, 1: tdi}) | pandas.DataFrame |
import pandas as pd
import numpy as np
# from pandas.core.tools.datetimes import normalize_date
from pandas._libs import tslib
from backend.robinhood_api import RobinhoodAPI
class RobinhoodData:
"""
Wrapper to download orders and dividends from Robinhood accounts
Downloads two dataframes and saves to datafile
----------
Parameters:
datafile : location of h5 datafile
"""
def __init__(self, datafile):
self.datafile = datafile
def _login(self, user, password):
self.client = RobinhoodAPI()
# try import the module with passwords
try:
_temp = __import__('auth')
self.client.login(_temp.local_user, _temp.local_password)
except:
self.client.login(username=user, password=password)
return self
# private method for getting all orders
def _fetch_json_by_url(self, url):
return self.client.session.get(url).json()
# deleting sensitive or redundant fields
def _delete_sensitive_fields(self, df):
for col in ['account', 'url', 'id', 'instrument']:
if col in df:
del df[col]
return df
# download orders and fields requiring RB client
def _download_orders(self):
print("Downloading orders from Robinhood")
orders = []
past_orders = self.client.order_history()
orders.extend(past_orders['results'])
while past_orders['next']:
next_url = past_orders['next']
past_orders = self._fetch_json_by_url(next_url)
orders.extend(past_orders['results'])
df = pd.DataFrame(orders)
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='created_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_ord = self._delete_sensitive_fields(df)
return df_ord
# download dividends and fields requiring RB client
def _download_dividends(self):
print("Downloading dividends from Robinhood")
dividends = self.client.dividends()
dividends = [x for x in dividends['results']]
df = pd.DataFrame(dividends)
if df.shape[0] > 0:
df['symbol'] = df['instrument'].apply(
self.client.get_symbol_by_instrument)
df.sort_values(by='paid_at', inplace=True)
df.reset_index(inplace=True, drop=True)
df_div = self._delete_sensitive_fields(df)
else:
df_div = pd.DataFrame(columns=['symbol', 'amount', 'position',
'rate', 'paid_at', 'payable_date'])
return df_div
# process orders
def _process_orders(self, df_ord):
# assign to df and reduce the number of fields
df = df_ord.copy()
fields = [
'created_at',
'average_price', 'cumulative_quantity', 'fees',
'symbol', 'side']
df = df[fields]
# convert types
for field in ['average_price', 'cumulative_quantity', 'fees']:
df[field] = pd.to_numeric(df[field])
for field in ['created_at']:
df[field] = pd.to_datetime(df[field])
# add days
df['date'] = df['created_at'].apply(
lambda x: tslib.normalize_date(x))
# rename columns for consistency
df.rename(columns={
'cumulative_quantity': 'current_size'
}, inplace=True)
# quantity accounting for side of transaction for cumsum later
df['signed_size'] = np.where(
df.side == 'buy',
df['current_size'],
-df['current_size'])
df['signed_size'] = df['signed_size'].astype(np.int64)
return df
# process_orders
def _process_dividends(self, df_div):
df = df_div.copy()
# convert types
for field in ['amount', 'position', 'rate']:
df[field] = pd.to_numeric(df[field])
for field in ['paid_at', 'payable_date']:
df[field] = pd.to_datetime(df[field])
# add days
df['date'] = df['paid_at'].apply(
lambda x: tslib.normalize_date(x))
return df
def _generate_positions(self, df_ord):
"""
Process orders dataframe and generate open and closed positions.
For all open positions close those which were later sold, so that
the cost_basis for open can be calculated correctly. For closed
positions calculate the cost_basis based on the closed open positions.
Note: the olders open positions are first to be closed. The logic here
is to reduce the tax exposure.
-----
Parameters:
- Pre-processed df_ord
Return:
- Two dataframes with open and closed positions correspondingly
"""
# prepare dataframe for open and closed positions
df_open = df_ord[df_ord.side == 'buy'].copy()
df_closed = df_ord[df_ord.side == 'sell'].copy()
# create a new column for today's position size
# TODO: may be redundant - review later
df_open['final_size'] = df_open['current_size']
df_closed['final_size'] = df_closed['current_size']
# main loop
for i_closed, row_closed in df_closed.iterrows():
sell_size = row_closed.final_size
sell_cost_basis = 0
for i_open, row_open in df_open[
(df_open.symbol == row_closed.symbol) &
(df_open.date < row_closed.date)].iterrows():
new_sell_size = sell_size - df_open.loc[i_open, 'final_size']
new_sell_size = 0 if new_sell_size < 0 else new_sell_size
new_open_size = df_open.loc[i_open, 'final_size'] - sell_size
new_open_size = new_open_size if new_open_size > 0 else 0
# updating open positions
df_open.loc[i_open, 'final_size'] = new_open_size
# updating closed positions
df_closed.loc[i_closed, 'final_size'] = new_sell_size
sold_size = sell_size - new_sell_size
sell_cost_basis +=\
df_open.loc[i_open, 'average_price'] * sold_size
sell_size = new_sell_size
# assign a cost_basis to the closed position
df_closed.loc[i_closed, 'current_cost_basis'] = -sell_cost_basis
# calculate cost_basis for open positions
df_open['current_cost_basis'] =\
df_open['current_size'] * df_open['average_price']
df_open['final_cost_basis'] =\
df_open['final_size'] * df_open['average_price']
# calculate capital gains for closed positions
df_closed['realized_gains'] =\
df_closed['current_size'] * df_closed['average_price'] +\
df_closed['current_cost_basis']
df_closed['final_cost_basis'] = 0
return df_open, df_closed
def download_robinhood_data(self, user, password):
self._login(user, password)
df_div = self._process_dividends(self._download_dividends())
df_div.to_hdf(self.datafile, 'dividends')
df_ord = self._process_orders(self._download_orders())
df_ord.to_hdf(self.datafile, 'orders')
df_open, df_closed = self._generate_positions(df_ord)
df_open.to_hdf(self.datafile, 'open')
df_closed.to_hdf(self.datafile, 'closed')
return df_div, df_ord, df_open, df_closed
if __name__ == "__main__":
rd = RobinhoodData('../data/data.h5')
if False:
df_div, df_ord, df_open, df_closed =\
rd.download_robinhood_data(None, None)
df_div = pd.read_hdf('../data/data.h5', 'dividends')
df_ord = | pd.read_hdf('../data/data.h5', 'orders') | pandas.read_hdf |
from datetime import date, datetime, timedelta
from dateutil import tz
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = date_range("20010101", periods=4, tz="UTC")
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = date_range("20010101", periods=4)
df = DataFrame({"a": np.arange(4)}, index=idx).astype("float64")
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = | pd.concat([df, df]) | pandas.concat |
import cPickle
from collections import defaultdict
from helpers import functions as helpers
from view import View
import pandas as pd
import copy
class Chain(defaultdict):
"""
Container class that holds ordered Link defintions and associated Views.
The Chain object is a subclassed dict of list where each list contains one
or more View aggregations of a Stack. It is an internal class included and
used inside the Stack object. Users can interact with the data directly
through the Chain or through the related Cluster object.
"""
def __init__(self, name=None):
super(Chain, self).__init__(Chain)
self.name = name
self.orientation = None
self.source_name = None
self.source_type = None
self.source_length = None
self.len_of_axis = None
self.content_of_axis = None
self.data_key = None
self.filter = None
self.views = None
# self.view_sizes = None
# self.view_lengths = None
self.has_weighted_views = False
self.x_hidden_codes = None
self.y_hidden_codes = None
self.x_new_order = None
self.y_new_order = None
self.props_tests = list()
self.props_tests_levels = list()
self.means_tests = list()
self.means_tests_levels = list()
self.has_props_tests = False
self.has_means_tests = False
self.is_banked = False
self.banked_spec = None
self.banked_view_key = None
self.banked_meta = None
self.base_text = None
self.annotations = None
def __repr__(self):
return ('%s:\norientation-axis: %s - %s,\ncontent-axis: %s, \nviews: %s'
%(Chain, self.orientation, self.source_name,
self.content_of_axis, len(self.views)))
def __setstate__(self, attr_dict):
self.__dict__.update(attr_dict)
def __reduce__(self):
return self.__class__, (self.name, ), self.__dict__, None, self.iteritems()
def save(self, path=None):
"""
This method saves the current chain instance (self) to file (.chain) using cPickle.
Attributes :
path (string)
Specifies the location of the saved file, NOTE: has to end with '/'
Example: './tests/'
"""
if path is None:
path_chain = "./{}.chain".format(self.name)
else:
path_chain = path
f = open(path_chain, 'wb')
cPickle.dump(self, f, cPickle.HIGHEST_PROTOCOL)
f.close()
def copy(self):
"""
Create a copy of self by serializing to/from a bytestring using
cPickle.
"""
new_chain = cPickle.loads(
cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL))
return new_chain
def _lazy_name(self):
"""
Apply lazy-name logic to chains created without an explicit name.
- This method does not take any responsibilty for uniquley naming chains
"""
self.name = '%s.%s.%s.%s' % (self.orientation, self.source_name, '.'.join(self.content_of_axis), '.'.join(self.views).replace(' ', '_'))
def _derive_attributes(self, data_key, filter, x_def, y_def, views, source_type=None, orientation=None):
"""
A simple method that is deriving attributes of the chain from its specification:
(some attributes are only updated when chains get post-processed,
i.e. there is meta data available for the dataframe)
-- examples:
- orientation: directional alignment of the link
- source_name: name of the orientation defining variable
- source_type: dtype of the source variable
- len_of_axis: number of variables in the non-orientation axis
- views: the list of views specified in the chain
- view_sizes: a list of lists of dataframe index and column lenght tuples,
matched on x/view index (only when post-processed)
"""
if x_def is not None or y_def is not None:
self.orientation = orientation
if self.orientation=='x':
self.source_name = ''.join(x_def)
self.len_of_axis = len(y_def)
self.content_of_axis = y_def
else:
self.source_name = ''.join(y_def)
self.len_of_axis = len(x_def)
self.content_of_axis = x_def
self.views = views
self.data_key = data_key
self.filter = filter
self.source_type = source_type
def concat(self):
"""
Concatenates all Views found for the Chain definition along its
orientations axis.
"""
views_on_var = []
contents = []
full_chain = []
all_chains = []
chain_query = self[self.data_key][self.filter]
if self.orientation == 'y':
for var in self.content_of_axis:
contents = []
for view in self.views:
try:
res = (chain_query[var][self.source_name]
[view].dataframe.copy())
if self.source_name == '@':
res.columns = pd.MultiIndex.from_product(
['@', '-'], names=['Question', 'Values'])
views_on_var.append(res)
except:
pass
contents.append(views_on_var)
for c in contents:
full_chain.append(pd.concat(c, axis=0))
concat_chain = pd.concat(full_chain, axis=0)
else:
for var in self.content_of_axis:
views_on_var = []
for view in self.views:
try:
res = (chain_query[self.source_name][var]
[view].dataframe.copy())
if var == '@':
res.columns = pd.MultiIndex.from_product(
['@', '-'], names=['Question', 'Values'])
views_on_var.append(res)
except:
pass
contents.append( | pd.concat(views_on_var, axis=0) | pandas.concat |
from decimal import *
from slugify import slugify # awesome-slugify, from requirements
import configuration # configuration.py, with user-defined variables.
from pandas import DataFrame, read_csv
from pandas import datetime as dt
import pandas as pd #this is how I usually import pandas
import sys #only needed to determine Python version number
import matplotlib #only needed to determine Matplotlib version number
import numpy as np
import csv
import glob
import time
import datetime
from collections import OrderedDict
import pprint
import os
import sys
from subprocess import Popen
import pickle
import json
import pandas
time = datetime.datetime.now().strftime("%Y-%m-%d %I:%M %p")
def FloridaResults():
targetdir = configuration.snapshotsdir
sourcecsvs = sorted(list(glob.glob(targetdir + "*")))
for filename in sourcecsvs:
newestdirectories = sorted(list(glob.glob(filename +"/*")))
sorted_files = sorted(newestdirectories, key=os.path.getctime)
newestdirectory = sorted_files[-1]
csvs = sorted(list(glob.glob(newestdirectory +"/*.csv")))
for filename in csvs:
if '/snapshots/Florida/' in filename:
df = pd.read_csv(filename, dtype='object', sep=',', encoding="utf8")
df['seatname'] = df['seatname'].fillna(" ")
df['officename'] = df['officename'] + " " + df['seatname']
df['precinctsreporting'] = df['precinctsreporting'].astype('int64')
df['precinctstotal'] = df['precinctstotal'].astype('int64')
df['votecount'] = df['votecount'].astype('int64')
df['party'] = df['party'].fillna(" ")
df['first'] = df['first'].fillna(" ")
df['party'] = df['party'].replace(['Republican Party', 'Democratic Party', 'No Party Affiliation', 'Libertarian Party', 'Green Party', 'Reform Party', 'Non-Partisan'],['REP', 'DEM', 'NPA', 'LPF', 'GRE', 'REF', ''])
df['last'] = df['last'].replace(['Fried',],['"Nikki" Fried'])
df['lastupdated'] = time
votestotal = df.groupby(['officename', 'last', 'first', 'party', 'lastupdated'])['precinctsreporting', 'precinctstotal', 'votecount'].sum()
votestotal['totalvotesinrace'] = votestotal.groupby(['officename'])['votecount'].transform('sum')
votestotal['totalvotesinrace'] = votestotal['totalvotesinrace'].astype('int64')
votestotal['votepct'] = votestotal['votecount']/votestotal['totalvotesinrace']
votestotal['precinctsreportingpct'] = votestotal['precinctsreporting']/votestotal['precinctstotal']
# print(votestotal['party'])
votestotal.to_csv("stateagg.csv")
csvfile = open('stateagg.csv', 'r')
jsonfile = open('stateagg.json', 'w')
data = []
reader = csv.DictReader(csvfile)
for row in reader:
data.append(row)
jsonfile.write(json.dumps(data, jsonfile))
df = df.loc[(df['officename'] == 'Governor ') | (df['officename'] == 'United States Senator ')]
countyturnout = df.groupby(['officename', 'last', 'first', 'party', 'reportingunitname'])['precinctsreporting', 'precinctstotal', 'votecount'].sum()
countyturnout['totalvotesinrace'] = countyturnout.groupby(['officename'])['votecount'].transform('sum')
countyturnout['totalvotesinrace'] = countyturnout['totalvotesinrace'].astype('int64')
countyturnout['votepct'] = countyturnout['votecount']/countyturnout['totalvotesinrace']
countyturnout['precinctsreportingpct'] = countyturnout['precinctsreporting']/countyturnout['precinctstotal']
countyturnout.to_csv("countyturnout.csv")
FloridaResults()
def countyResults():
targetdir = configuration.snapshotsdir
files = []
dfs = []
sourcecsvs = sorted(list(glob.glob(targetdir + "*")))
for filename in sourcecsvs:
newestdirectories = sorted(list(glob.glob(filename +"/*")))
sorted_files = sorted(newestdirectories, key=os.path.getctime)
newestdirectory = sorted_files[-1]
csvs = sorted(list(glob.glob(newestdirectory +"/*.csv")))
for filename in csvs:
if '/snapshots/Miami-Dade/' in filename:
df = | pd.read_csv(filename, dtype='object', sep=',', encoding="utf8") | pandas.read_csv |
import string
import pandas as pd
import sqlite3
import re
from urllib.request import urlopen
from datetime import datetime
from bs4 import BeautifulSoup
from fundamentus import get_data
from tqdm import tqdm
from exception_util import exception, create_logger, retry
# Create instances of loggers
cvm_logger = create_logger('cvm_logger')
result_logger = create_logger('result_logger')
cvm2symbol_logger = create_logger('cvm2symbol_logger')
price_logger = create_logger('price_logger')
@retry()
@exception(cvm_logger)
def cvm():
"""
Get registration of all companies listed in CVM.
This function is a crawler which get all registration information from companies on cvmweb page.cvmweb
Parameters
----------
None
Returns
-------
DataFrame
The dataframe with fields ['cnpj', 'name', 'type', 'cvm_code', 'situation']
"""
# Define url base
url = 'http://cvmweb.cvm.gov.br/SWB/Sistemas/SCW/CPublica/CiaAb/FormBuscaCiaAbOrdAlf.aspx?LetraInicial='
# Get alphanum uppercase set to use in page index
alphanum =string.ascii_lowercase.upper() + ''.join(list(map(str,range(10))))
# Attribute values to identify table lines of interest
colors = ['Cornsilk','#FAEFCA']
# Loop through index pages and append registration information to data list
data = list()
#for letra_inicial in tqdm(alphanum, desc='Reading companies', unit='tabs'):
for letra_inicial in alphanum:
# get html
with urlopen(url+f'{letra_inicial}') as html:
soup = BeautifulSoup(html, 'html.parser')
try:
# loop through table lines retrieving fields values
for row in soup.find_all('tr', bgcolor=True):
row_tup = tuple()
# check the attribute matching
if row['bgcolor'] in colors:
for field in row.find_all('td'):
row_tup += (field.get_text(),)
data.append(row_tup)
except:
continue
# Store data in dataframe
columns = ['cnpj', 'name', 'type', 'cvm_code', 'situation']
df = pd.DataFrame(data, columns=columns)
df['cvm_code'] = df['cvm_code'].apply(int)
return df
@retry()
@exception(cvm2symbol_logger)
def cvm2symbol(cvm_codes, cvm_prices_and_liq):
"""
Get most relevant symbol price with cvm_code information
This function is a crawler which get all symbols from companies listed in cvm and then retrieve between the symbols with same cvm_code the one with best liq.
Parameters
----------
cvm_codes : list or numpy.array
List of cvm_codes
cvm_prices_and_liq : DataFrame or numpy.ndarray
Table indexed by symbol name and with price and liq info
Returns
-------
DataFrame
The dataframe with fields ['cvm_code', 'symbol', 'price', 'date']
"""
# Define cvm symbols source url
url = 'http://bvmf.bmfbovespa.com.br/pt-br/mercados/acoes/empresas/ExecutaAcaoConsultaInfoEmp.asp?CodCVM='
# Get symbols at url entering adding cmv_code to query
cvm_symbol = []
for code in tqdm(cvm_codes, desc='Reading prices', unit='codes'):
#for code in cvm_codes:
with urlopen(url+f'{code}') as html:
soup = BeautifulSoup(html, 'html.parser')
liq = .0
symbol = None
# Take the symbol with best liq for this cvm_code
for row in soup.find_all('a', "LinkCodNeg"):
tmp_symbol = row.get_text().strip()
# Evaluate if symbol exists
if tmp_symbol in cvm_prices_and_liq.index:
tmp_liq = convertNum(cvm_prices_and_liq.loc[tmp_symbol].liq)
if tmp_liq < liq:
continue
liq = tmp_liq
symbol = tmp_symbol
# Skip when no symbol
if symbol:
cvm_symbol.append((code, symbol, pd.to_datetime(cvm_prices_and_liq.loc[symbol].date)))
return | pd.DataFrame(cvm_symbol, columns=['cvm_code', 'symbol', 'date']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def plot_feature_diff(diff, xaxis_labels=None, title="", file_path_out=None):
data = {i+1: diff[:,i] for i in range(diff.shape[1])}
df = | pd.DataFrame(data) | pandas.DataFrame |
import re
import numpy as np
import pandas as pd
from typing import List, Tuple
def prepare_data(link: str) -> Tuple[pd.DataFrame, List[str]]:
""" Load and prepare/preprocess the data
Parameters:
-----------
link : str
Link to the dataset, which should be in excel and of the following format:
| Date | Players | Game | Scores | Winner | Version |
| 2018-11-18 | Peter+Mike | Qwixx | Peter77+Mike77 | Peter+Mike | Normal |
| 2018-11-18 | Chris+Mike | Qwixx | Chris42+Mike99 | Mike | Big Points |
| 2018-11-22 | Mike+Chris | Jaipur | Mike84+Chris91 | Chris | Normal |
| 2018-11-30 | Peter+Chris+Mike | Kingdomino | Chris43+Mike37+Peter35 | Chris | 5x5 |
Returns:
--------
df : pandas.core.frame.DataFrame
The preprocessed data to be used for the analyses of played board game matches.
player_list : list of str
List of players
"""
df = | pd.read_excel(link) | pandas.read_excel |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: | pd.Timestamp("2013-05-11 00:00:00") | pandas.Timestamp |
# Importing default django methods:
from django.shortcuts import render, redirect
from django.db.models import Count
from django.core.paginator import Paginator
from django.db.models.functions import TruncDay
from django.http import HttpResponseRedirect
# Importing plotly methods:
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# Importing data manipulation packages:
import numpy as np
import pandas as pd
# Importing date methods:
import datetime
# Importing database models:
from .models import Source, Topic, Author, Publisher
# TODO: Generate the array of Sources read per day to populate the heatmaps (add custom coloring).
# Function for generating calendar heatmaps (modified from https://gist.github.com/bendichter/d7dccacf55c7d95aec05c6e7bcf4e66e):
def display_year(z,
year: int = None,
month_lines: bool = True,
fig=None,
row: int = None,
color: str = "#76cf63"
):
"""The method that renders a calendar heatmap showing the number of sources per day given
an array of Source counts. This method can either be called on its own to generate a calendar
heatmap for a single year or it can be 'recursively' called by the display_years function to
generate a heatmap across multiple years.
Args:
z (np.array): A dataset containing the daily counts of all Sources read per day stored in a
1-D numpy array.
year (int): The year that the calendar heatmap will be rendered for. It creates a subplot of
the calendar heatmap with this specific year as a label.
month_lines (Bool): A boolean which determines if lines will be used to seperate each month
on the heatmap.
fig (go.Figure): The figure object that the subplot the method generates will be appended too.
This is necessary as this method is recursively called via the display_years() method.
row (int): The row of the sublot that is used for labeling and the transforming the dataset.
color (str): The color that the individaul plots would be. This sets the base color used in the
colorscale.
"""
if year is None:
year = datetime.datetime.now().year
data = np.ones(365) * 0
data[:len(z)] = z
d1 = datetime.date(year, 1, 1)
d2 = datetime.date(year, 12, 31)
delta = d2 - d1
month_names = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
month_positions = (np.cumsum(month_days) - 15)/7
dates_in_year = [d1 + datetime.timedelta(i) for i in range(delta.days+1)] #gives me a list with datetimes for each day a year
weekdays_in_year = [i.weekday() for i in dates_in_year] #gives [0,1,2,3,4,5,6,0,1,2,3,4,5,6,…] (ticktext in xaxis dict translates this to weekdays
weeknumber_of_dates = [int(i.strftime("%V")) if not (int(i.strftime("%V")) == 1 and i.month == 12) else 53
for i in dates_in_year] #gives [1,1,1,1,1,1,1,2,2,2,2,2,2,2,…] name is self-explanatory
text = [str(date.strftime("%d %b, %Y")) for date in dates_in_year]
#4cc417 green #347c17 dark green
colorscale=[[False, '#eeeeee'], [True, color]]
# handle end of year
data = [
go.Heatmap(
x=weeknumber_of_dates,
y=weekdays_in_year,
z=data,
text=text,
hovertemplate = "<b style='font-family: Helvetica Neue;'>%{z} sources read on %{text}</b>",
xgap=3, # this
ygap=3, # and this is used to make the grid-like apperance
showscale=False,
colorscale=colorscale,
hoverlabel=dict(align="left")
)
]
# TODO: Add onclick events in plotly to imbed links to each day's Sources page.
# https://plotly.com/python/click-events/
if month_lines:
kwargs = dict(
mode='lines',
line=dict(
color='#9e9e9e',
width=1
),
hoverinfo='skip'
)
for date, dow, wkn in zip(dates_in_year,
weekdays_in_year,
weeknumber_of_dates):
if date.day == 1:
data += [
go.Scatter(
x=[wkn-.5, wkn-.5],
y=[dow-.5, 6.5],
**kwargs
)
]
if dow:
data += [
go.Scatter(
x=[wkn-.5, wkn+.5],
y=[dow-.5, dow - .5],
**kwargs
),
go.Scatter(
x=[wkn+.5, wkn+.5],
y=[dow-.5, -.5],
**kwargs
)
]
layout = go.Layout(
title=f'Sources Read in {year}',
height=280,
yaxis=dict(
showline=False, showgrid=False, zeroline=False,
tickmode='array',
ticktext=['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'],
tickvals=[0, 1, 2, 3, 4, 5, 6],
autorange="reversed"
),
xaxis=dict(
showline=False, showgrid=False, zeroline=False,
tickmode='array',
ticktext=month_names,
tickvals=month_positions
),
font={'size':10, 'color':'#000000'},
plot_bgcolor=('#fff'),
margin = dict(t=40),
showlegend=False
)
if fig is None:
fig = go.Figure(data=data, layout=layout)
else:
fig.add_traces(data, rows=[(row+1)]*len(data), cols=[1]*len(data))
fig.update_layout(layout)
fig.update_xaxes(layout['xaxis'])
fig.update_yaxes(layout['yaxis'])
return fig
def display_years(z, years):
"""The method that makes use of the display_year() method to create and
modify the calendar heatmap of Sources read.
Args:
z (np.array): The dataset of Sources read per year stored as a 1-D array of integers.
years (tuple): The relevant years of the dataset stored as a tuple which determines
which subplots are generated eg: (2019, 2020).
Returns:
go.Figure: The fully rendered calendar heatmap ready to be passed onto the template.
"""
fig = make_subplots(rows=len(years), cols=1, subplot_titles=years)
for i, year in enumerate(years):
data = z[i*365 : (i+1)*365]
display_year(data, year=year, fig=fig, row=i)
fig.update_layout(height=250*len(years))
return fig
def refactor_annual_queryset(queryset, year):
"""This is a method that inqests a Source queryset and refactors it into
a 1-D array (list) of the number of sources read per day in a year.
This array is used by the 'display_year' function to create the calendar
heatmap of sources read per year.
Args:
queryset (Queryset): The queryset of Source objects for the year. Only
supports the local Source model
year (datetime.datetime.date): The current year of the queryset and
the calendar heatmap. It is used to build the datetime index.
Returns:
lst: The 1-D array of source counts for the day. Should always be 365
elements.
"""
# Annotating the queryset to create datetime and count fields:
year_sources = queryset.annotate(date=TruncDay('date_read')).values(
"date").annotate(created_count=Count('id')).order_by("date")
# Creating a pandas datetime index of each day in the current year:
heatmap_datetime_index = pd.date_range(
start=datetime.date(year, 1, 1),
end=(datetime.date(year, 12, 31))
)
# Full series of only 0 for the year:
heatmap_series = | pd.Series(data=0, index=heatmap_datetime_index) | pandas.Series |
import numpy as np
import random
import pandas as pd
from itertools import combinations
items_set = ['beer','burger','milk','onion','potato']
max_trn = 20
data=np.random.randint(2, size=(random.randint(1,max_trn),len(items_set)))
df = pd.DataFrame(data)
df.columns = items_set
print(df)
def candidate_gen(df,items,level,global_freq):
candidate_set = []
if level <=1:
for i in combinations(items,level):
p_s = df[list(i)].all(axis=1).sum()
p_sp = df[list(i)].all(axis=1).sum()/len(df)
candidate_set.append((",".join(i),p_s,p_sp))
candidate_set = pd.DataFrame(candidate_set, columns=["Candidate", "Support","Support %"])
elif level == 2:
for i in combinations(items,level):
p_s = df[list(i)].all(axis=1).sum()
p_sp = df[list(i)].all(axis=1).sum()/len(df)
candidate_set.append((i,p_s,p_sp))
candidate_set = | pd.DataFrame(candidate_set, columns=["Candidate", "Support","Support %"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import requests
from termcolor import colored as cl
from math import floor
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20, 10)
plt.style.use('fivethirtyeight')
# EXTRACTING STOCK DATA
def get_historical_data(symbol, start_date):
api_key = 'YOUR API KEY'
api_url = f'https://api.twelvedata.com/time_series?symbol={symbol}&interval=1day&outputsize=5000&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df['values']).iloc[::-1].set_index('datetime').astype(float)
df = df[df.index >= start_date]
df.index = pd.to_datetime(df.index)
return df
aapl = get_historical_data('AAPL', '2020-01-01')
aapl
def get_stoch(symbol, k_period, d_period, start_date):
api_key = open(r'api_key.txt')
url = f'https://www.alphavantage.co/query?function=STOCH&symbol={symbol}&interval=daily&fastkperiod={k_period}&slowdperiod={d_period}&apikey={api_key}'
raw = requests.get(url).json()
df = pd.DataFrame(raw['Technical Analysis: STOCH']).T.iloc[::-1]
df = df[df.index >= start_date]
df.index = pd.to_datetime(df.index)
df = df.astype(float)
return df['SlowK'], df['SlowD']
aapl['%k'], aapl['%d'] = get_stoch('aapl', 14, 3, '2020-01-01')
aapl = aapl.dropna()
aapl.head()
def plot_stoch(symbol, price, k, d):
ax1 = plt.subplot2grid((9, 1), (0,0), rowspan = 5, colspan = 1)
ax2 = plt.subplot2grid((9, 1), (6,0), rowspan = 3, colspan = 1)
ax1.plot(price)
ax1.set_title(f'{symbol} STOCK PRICE')
ax2.plot(k, color = 'deepskyblue', linewidth = 1.5, label = '%K')
ax2.plot(d, color = 'orange', linewidth = 1.5, label = '%D')
ax2.axhline(80, color = 'black', linewidth = 1, linestyle = '--')
ax2.axhline(20, color = 'black', linewidth = 1, linestyle = '--')
ax2.set_title(f'{symbol} STOCH')
ax2.legend()
plt.show()
plot_stoch('aapl', aapl['close'], aapl['%k'], aapl['%d'])
def implement_stoch_strategy(prices, k, d):
buy_price = []
sell_price = []
stoch_signal = []
signal = 0
for i in range(len(prices)):
if k[i] < 20 and d[i] < 20 and k[i] < d[i]:
if signal != 1:
buy_price.append(prices[i])
sell_price.append(np.nan)
signal = 1
stoch_signal.append(signal)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
stoch_signal.append(0)
elif k[i] > 80 and d[i] > 80 and k[i] > d[i]:
if signal != -1:
buy_price.append(np.nan)
sell_price.append(prices[i])
signal = -1
stoch_signal.append(signal)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
stoch_signal.append(0)
else:
buy_price.append(np.nan)
sell_price.append(np.nan)
stoch_signal.append(0)
return buy_price, sell_price, stoch_signal
buy_price, sell_price, stoch_signal = implement_stoch_strategy(aapl['close'], aapl['%k'], aapl['%d'])
ax1 = plt.subplot2grid((9, 1), (0,0), rowspan = 5, colspan = 1)
ax2 = plt.subplot2grid((9, 1), (6,0), rowspan = 3, colspan = 1)
ax1.plot(aapl['close'], color = 'skyblue', label = 'aapl')
ax1.plot(aapl.index, buy_price, marker = '^', color = 'green', markersize = 10, label = 'BUY SIGNAL', linewidth = 0)
ax1.plot(aapl.index, sell_price, marker = 'v', color = 'r', markersize = 10, label = 'SELL SIGNAL', linewidth = 0)
ax1.legend(loc = 'upper left')
ax1.set_title('aapl STOCK PRICE')
ax2.plot(aapl['%k'], color = 'deepskyblue', linewidth = 1.5, label = '%K')
ax2.plot(aapl['%d'], color = 'orange', linewidth = 1.5, label = '%D')
ax2.axhline(80, color = 'black', linewidth = 1, linestyle = '--')
ax2.axhline(20, color = 'black', linewidth = 1, linestyle = '--')
ax2.set_title('aapl STOCH')
ax2.legend()
plt.show()
position = []
for i in range(len(stoch_signal)):
if stoch_signal[i] > 1:
position.append(0)
else:
position.append(1)
for i in range(len(aapl['close'])):
if stoch_signal[i] == 1:
position[i] = 1
elif stoch_signal[i] == -1:
position[i] = 0
else:
position[i] = position[i-1]
k = aapl['%k']
d = aapl['%d']
close_price = aapl['close']
stoch_signal = | pd.DataFrame(stoch_signal) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author:
<NAME>, PhD, MSB, BCBA-D
https://www.researchgate.net/profile/David_Cox26
twitter: @davidjcox_
LinkedIn: https://www.linkedin.com/in/coxdavidj/
Website: https://davidjcox.xyz
"""
#Set current working directory to the folder that contains your data.
import os
import pandas as pd
import numpy as np
import sys
import re, string, unicodedata
import matplotlib.pyplot as plt
sys.path.append('/Users/davidjcox/Dropbox/Coding/Local Python Modules/')
# Set path to data
os.chdir('/Users/davidjcox/Dropbox/Projects/CurrentProjectManuscripts/Empirical/PersonalFun/Org_Statements_Diversity/Org-Statements-Diversity-Inclusion/Data')
# Change settings to view all columns of data
pd.set_option('display.max_columns', None)
#%% Import data.
raw_data = pd.read_csv('all_data.csv').drop(['Unnamed: 0'], axis=1)
data = raw_data.copy()
data
#%% DATA PRE-PROCESSING
# Pre-process our data. Goal is to have:
# (1) Single list where each item in the list is the raw string of the narrative for that participant.
# (2) List of lists with one list per subject, and each item in list is a sentence from their narrative.
# (3) List of lists with one list per subject, and each item in the list is a clean* word from their narrative.
# (4) Single list with all of the cleaned vocab for the entire group.
# (5) Single list of the vocabulary used throughout all narratives (i.e., omitting all redundancies from (4)).
# (6) Single list where each item in the list is a string of the cleaned narrative for that participant.
# (7) Single list where each item in the list is a string of the participant narratives with only clean words.
# ----------------------
# List names for above:
# (1) narratives
# (2) narratives_sent_tokenized
# (3) clean_words_tokenized
# (4) narratives_word_list
# (5) narrative_vocab
# (6) narr_as_string
# (7) clean_ind_narr
# --------------------------------------------------
# * Clean = punctuation and stop words removed.
#%% Start with (1) narratives:
# Single list where each item in the list is the raw string of the narrative for that participant.
narratives = data['body_text'] # Create a list of the narratives.
#%% Next we'll get (2), narratives_sent_tokenized:
# List of lists with one list per subject, and each item in list is a sentence from their narrative.
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
#nltk.download() # Need to download the 'punkt' model from nltk if not already on computer.
# Make some empty lists we'll store our data in.
lower_narratives = []
narratives_sent_tokenized = []
count=0
# Make lowercase all words in the narratives. Store in lower_narratives list.
for each_narr in narratives:
lower = each_narr.lower()
lower = lower.replace("/", '')
lower = lower.replace("\\", '')
lower = lower.replace("_", ' ')
lower_narratives.append(lower)
len(lower_narratives) # Should still have 1141 narratives.
lower_narratives[:2] # Check out first few to make sure everything looks okay.
# Sentence tokenize the narratives. Store in narratives_sent_tokenized list.
for each_narr in lower_narratives:
sent_tokens = nltk.sent_tokenize(each_narr)
narratives_sent_tokenized.append(sent_tokens)
print(len(narratives_sent_tokenized)) # Should still have 1141 narratives.
narratives_sent_tokenized[::500] # Check out every 500th narrative to make sure it looks okay.
#%% Next, we'll get (3), clean_words_tokenized:
# List of lists with one list per subject, and each item in the list is a clean* word from their narrative.
# Some empty lists we'll need to store data.
stem_narratives = []
clean_words_tokenized = []
narratives_word_tokenized = []
# Word tokenize the narratives. Store in narratives_word_tokenized list.
for list in lower_narratives:
word_tokens = nltk.word_tokenize(list)
narratives_word_tokenized.append(word_tokens)
len(narratives_word_tokenized) # Should still have 1141 items.
narratives_word_tokenized[:1] # Check to make sure the last two look okay.
# Convert each word to its root (i.e., lemmatize).
from nltk.stem import WordNetLemmatizer
wnl = nltk.WordNetLemmatizer()
for list in narratives_word_tokenized:
temp_list = []
for word in list:
words_stemmed = wnl.lemmatize(word) # Noun
words_stemmed = wnl.lemmatize(word, pos='v') # Verb
temp_list.append(words_stemmed)
stem_narratives.append(temp_list)
len(stem_narratives) # Should still have 1141 items.
stem_narratives[:1] # Check last two and compare to narratives_word_tokenized
# Some additional punctuation characters.
punctuation = [",", ".", "''", "' '", "\"", "!", "?", '-', '``', ':', ';', \
"'s", "...", "'d", '(', ')', '=', "'", "#", "$", "%", "&", '_', \
"<", "=v=", ">", "@", "[", "]", "^_^", '{', '}', "\"", '/', "\\\\", \
"n't", "'ll", "'m", '*', '..', "\"links:\"", "[001]", "[002]", \
"[003]", "<b>", "\"buttons\"", "\\r", "\\n", "\\\"", "\""] # Define list of punctuation to remove.
# Remove all punctuation, any sticky contraction elements, and stopwords from stem_narratives list.
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
a_range = [0, 1, 2, 3, 4, 5]
for i in a_range:
for list in stem_narratives:
for word in list:
if word == "'ve":
list.remove(word)
if word in punctuation:
list.remove(word)
if word in stop_words:
list.remove(word)
# Put this cleaned list into it's own list so we don't mess it up.
clean_words_tokenized = stem_narratives
len(clean_words_tokenized) # Should still have 5044 items
clean_words_tokenized[::200] # Check look the same.
#%% Next, we'll get (4) narratives_word_list:
# (4) Single list with all of the cleaned vocab for the entire group.
# ======================================================================================================================================
# Create empty list where we'll store our data.
narratives_word_list = []
# Iterate over each list and add the word to the list we just created.
for list in stem_narratives:
for word in list:
narratives_word_list.append(word)
narr_all_words = ' '.join(narratives_word_list)
len(narratives_word_list) # Should be 4038 total words.
#%% Next we'll get (5) narrative_vocab:
# Single list of the vocabulary used throughout all narratives (i.e., omitting all redundancies from (4)).
# Create empty list where we'll store our data.
narrative_vocab = []
# Iterate over narratives_word_list and only append to narrative_vocab if the word is not in there already.
for word in narratives_word_list:
if word not in narrative_vocab:
narrative_vocab.append(word)
print("Number of words in vocab:", len(narrative_vocab)) # Should be 1373 unique words in our vocab.
sorted_vocab = sorted(narrative_vocab)
unique_words = np.unique(sorted_vocab) # Look at every 100th word in the vocab set.
#%% Next, we'll get (6) narr_as_string:
# Single item of all narratives as a single string of the cleaned narratives.
# Create empty list where we'll store our data.
narr_as_string = []
# Join all of the words into single string.
narr_as_string = ' '.join(narratives_word_list)
print("Number of characters total:", len(narr_as_string)) # Should be 31,973 characters in this string.
narr_as_string[:198] # Look at the first 300 characters of this string.
#%% Finally, we'll get (7) clean_ind_narr:
# Single list where each item in the list is a string of the participant narratives with only clean words.
clean_ind_narr = []
for list in clean_words_tokenized:
sub_clean_narr = ' '.join(list)
clean_ind_narr.append(sub_clean_narr)
data['cleaned_sentences'] = clean_ind_narr
print("Number of total statements", len(clean_ind_narr))
print(clean_ind_narr[::500])
#%% ===========================================================================
############################## LIST CREATION COMPLETE #########################
# =============================================================================
narratives # Single list where each item in the list is the raw string of the narrative for that participant.
narratives_sent_tokenized # List of lists with one list per subject, and each item in list is a sentence from their narrative.
clean_words_tokenized # List of lists with one list per subject, and each item in the list is a clean word from their narrative.
narratives_word_list # Single list with all of the cleaned words for the entire group.
narr_all_words # Single item of all the cleaned words for entire group as single string
narrative_vocab # Single list of the vocabulary used throughout all narratives (i.e., omitting all redundancies from (4)).
narr_as_string # Single item of all narratives as a string of the cleaned narratives.
clean_ind_narr # Single list where each item in the list is a string of the participant narratives with only clean words.
#%%
from wordcloud import WordCloud
import matplotlib.pyplot as plt
#%% All words.
wordcloud = WordCloud(width=500, height=500, background_color='white').generate(narr_all_words)
plt.figure()
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
#%% Just top 50 words.
wordcloud = WordCloud(width=500, height=500, background_color='white', max_words=50).generate(narr_all_words)
plt.figure()
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
#%% Sentiment Analysis on the raw input
from nltk.sentiment.vader import SentimentIntensityAnalyzer
raw_sentiment_score = []
for sentence in data['body_text']:
ss = SentimentIntensityAnalyzer().polarity_scores(sentence)
raw_sentiment_score.append(ss)
raw_sent_df = pd.DataFrame(raw_sentiment_score)
raw_sent_df = raw_sent_df.rename(columns={'neg':'raw_neg', 'neu':'raw_neu', \
'pos':'raw_pos', 'compound':'raw_compound'})
raw_sent_df
#%% Sentiment Analysis on the lemmatized and cleaned sentences
lemmed_sentiment_score = []
for sentence in clean_ind_narr:
ss = SentimentIntensityAnalyzer().polarity_scores(sentence)
lemmed_sentiment_score.append(ss)
lemmed_sent_df = pd.DataFrame(lemmed_sentiment_score)
lemmed_sent_df = lemmed_sent_df.rename(columns={'neg':'cleaned_neg', 'neu':'cleaned_neu', \
'pos':'cleaned_pos', 'compound':'cleaned_compound'})
lemmed_sent_df
#%% Add the above to the data df
data = | pd.concat([data, raw_sent_df, lemmed_sent_df], axis=1) | pandas.concat |
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, Panel, Slider, RangeSlider, Span
from bokeh.models import HoverTool
from bokeh.layouts import column, row
import pandas as pd
import numpy as np
class Residual_model:
def __init__(self, data):
#self.source = ColumnDataSource(data=dict(Time=[], residual_ols=[],residual_tls=[], Beta=[], Alfa=[], STD=[], Z=[], Un=[], Ln=[], Ux=[], Lx=[]))
self.source = ColumnDataSource()
self.data = data
self.LRperiod = 20
self.EntryT = 2.5
self.ExitT = 0.0
self.tools = ['reset', 'hover', 'zoom_in', 'zoom_out', 'pan']
self.t1 = self.data.keys()[0]
self.t2 = self.data.keys()[2]
self.isLRperiodChanged = True
linreg = np.polyfit(data.t1, data.t2, 1)
self.gradient = linreg[0]
self.intercept = linreg[1]
self.resids = self.calculate_resid(data)
self.data['resids']=self.resids
#self.data=self.data.dropna()
self.p1 = figure(plot_width=1100, plot_height=300, tools=self.tools, x_axis_type="datetime")
self.source.data = ColumnDataSource.from_df(data[['Time', 'resids']])
self.p1.title.text = 'Residual of prices: '+ self.t1 + ' and ' + self.t2
self.p1.line('date', 'resids', source=self.source, line_width=2, color='black', alpha=0.8)
self.p1.line('date', 'Un', source=self.source, line_width=1, color='green', alpha=0.8)
self.p1.line('date', 'Ln', source=self.source, line_width=1, color='blue', alpha=0.8)
self.p1.line('date', 'Ux', source=self.source, line_width=1, color='brown', alpha=0.8)
self.p1.line('date', 'Lx', source=self.source, line_width=1, color='pink', alpha=0.8)
self.hover = self.p1.select(dict(type=HoverTool))
self.hover.tooltips = [('date', '@date{%F}'), ('zscore', '@resids')]
self.hover.formatters = {'date' : 'datetime'}
self.p1.legend.location = "bottom_left"
self.p1.title.text = 'Residual of prices: ' + self.t1 + ' and ' + self.t2
self.LR_slider = Slider(start=10, end=80, value=25, step=1, title='Experimental! Regresson Period', callback_policy = "mouseup", callback_throttle=1000)
self.Entry_slider = Slider(start=1.0, end=5.0, value=2.0, step=0.1, title='Entry Treshold')
self.Exit_slider = Slider(start=0.0, end=1.5, value=0.0, step=0.1, title='Exit Treshold')
self.Entry_slider.on_change('value', self.Entry_slider_callback)
self.Exit_slider.on_change('value', self.Exit_slider_callback)
self.LR_slider.on_change('value', self.LR_slider_callback)
self.update(self.data)
layout = row(column(self.p1), column(self.LR_slider, self.Entry_slider, self.Exit_slider))
self.tab = Panel(child=layout, title='Residual Model')
def calculate_resid(self, data):
slopes=[]
intercepts=[]
resids=[]
res_indices=[]
data_lines=data.t1.count()
for r in range(0, (data_lines - self.LRperiod) + 1):
c0 = data.t1[r:r+self.LRperiod]
c1 = data.t2[r:r+self.LRperiod]
slope, intercept = np.polyfit(c1,c0, 1)
resid = (c0 - (slope * c1 + intercept))
# slopes.append(slope)
# intercepts.append(intercept)
resids.append(resid[-1])
res_indices.append(c0.index[-1])
# slopes = pd.Series(slopes, index=res_indices)
# intercepts = pd.Series(intercepts, index=res_indices)
resids = | pd.Series(resids, index=res_indices) | pandas.Series |
# Core imports
import os
import time
from datetime import datetime
import random
# Third party imports
import geopandas as gpd
import pandas as pd
import yaml
#import gptables as gpt
# Module imports
import geospatial_mods as gs
import data_ingest as di
import data_transform as dt
import ftp_get_files_logic as fpts
import data_output as do
start_time = time.time()
# get current working directory
CWD = os.getcwd()
# TODO: find out best practice on CWD
# Load config
with open(os.path.join(CWD, "config.yaml")) as yamlfile:
config = yaml.load(yamlfile, Loader=yaml.FullLoader)
module = os.path.basename(__file__)
print(f"Config loaded in {module}")
# Retrieve Missing Data Files via FTP
#fpts.execute_file_grab(CWD)
# Constants
DEFAULT_CRS = config["DEFAULT_CRS"]
DATA_DIR = config["DATA_DIR"]
EXT_ORDER = config['EXT_ORDER']
# Years
# Getting the year for population data
pop_year = str(config["calculation_year"])
# Getting the year for centroid data
centroid_year = str(config["centroid_year"])
# Get the pandas dataframe for the stops data
stops_df = di.get_stops_file(url=config["NAPTAN_API"],
dir=os.path.join(os.getcwd(),
"data",
"stops"))
# filter out on inactive stops
filtered_stops = dt.filter_stops(stops_df=stops_df)
# coverts from pandas df to geo df
stops_geo_df = (di.geo_df_from_pd_df(pd_df=filtered_stops,
geom_x='Easting',
geom_y='Northing',
crs=DEFAULT_CRS))
# adds in high/low capacity column
stops_geo_df=dt.add_stop_capacity_type(stops_df=stops_geo_df)
# define la col which is LADXXNM where XX is last 2 digits of year e.g 21 from 2021
lad_col = f'LAD{pop_year[-2:]}NM'
# getting path for .shp file for LA's
uk_la_path = di.get_shp_file_name(dir=os.path.join(os.getcwd(),
"data",
"LA_shp",
pop_year))
# getting the coordinates for all LA's
uk_la_file = di.geo_df_from_geospatialfile(path_to_file=uk_la_path)
# Get list of all pop_estimate files for target year
pop_files = os.listdir(os.path.join(os.getcwd(),
"data/population_estimates",
pop_year
)
)
# Get the population data for the whole nation for the specified year
whole_nation_pop_df = di.get_whole_nation_pop_df(pop_files, pop_year)
# Get population weighted centroids into a dataframe
uk_pop_wtd_centr_df = (di.geo_df_from_geospatialfile
(os.path.join
(DATA_DIR,
'pop_weighted_centroids',
centroid_year)))
# Get output area boundaries
# OA_df = pd.read_csv(config["OA_boundaries_csv"])
# Links were changed at the source site which made the script fail.
# Manually downloading the csv for now
OA_boundaries_df = pd.read_csv(
os.path.join("data",
"Output_Areas__December_2011__Boundaries_EW_BGC.csv"))
# Merge with uk population df
uk_pop_wtd_centr_df = uk_pop_wtd_centr_df.merge(
OA_boundaries_df, on="OA11CD", how='left')
# Clean after merge
uk_pop_wtd_centr_df.drop('OBJECTID_y', axis=1, inplace=True)
uk_pop_wtd_centr_df.rename({'OBJECTID_x': 'OBJECTID'}, inplace=True)
# Getting the urban-rural classification by OA for England and Wales
Urb_Rur_ZIP_LINK = config["Urb_Rur_ZIP_LINK"]
URB_RUR_TYPES = config["URB_RUR_TYPES"]
# Make a df of the urban-rural classification
urb_rur_df = (di.any_to_pd("RUC11_OA11_EW",
Urb_Rur_ZIP_LINK,
['csv'],
URB_RUR_TYPES))
# These are the codes (RUC11CD) mapping to rural and urban descriptions (RUC11)
# I could make this more succinct, but leaving here
# for clarity and maintainability
urban_dictionary = {'A1': 'Urban major conurbation',
'C1': 'Urban city and town',
'B1': 'Urban minor conurbation',
'C2': 'Urban city and town in a sparse setting'}
# mapping to a simple urban or rural classification
urb_rur_df["urb_rur_class"] = (urb_rur_df.RUC11CD.map
(lambda x: "urban"
if x in urban_dictionary.keys()
else "rural"))
# filter the df. We only want OA11CD and an urban/rurual classification
urb_rur_df = urb_rur_df[['OA11CD', 'urb_rur_class']]
# joining urban rural classification onto the pop df
uk_pop_wtd_centr_df = (uk_pop_wtd_centr_df.merge
(urb_rur_df,
on="OA11CD",
how='left'))
# Joining the population dataframe to the centroids dataframe,
whole_nation_pop_df = whole_nation_pop_df.join(
other=uk_pop_wtd_centr_df.set_index('OA11CD'), on='OA11CD', how='left')
# Map OA codes to Local Authority Names
oa_la_lookup_path = di.get_oa_la_file_name(os.path.join(os.getcwd(),
"data/oa_la_mapping",
pop_year))
LA_df = pd.read_csv(oa_la_lookup_path, usecols=["OA11CD", lad_col])
whole_nation_pop_df = pd.merge(
whole_nation_pop_df, LA_df, how="left", on="OA11CD")
# Unique list of LA's to iterate through
list_local_auth = uk_la_file[lad_col].unique()
# selecting random LA for dev purposes
# eventually will iterate through all LA's
random_la=random.choice(list_local_auth)
list_local_auth=[random_la]
# define output dicts to capture dfs
total_df_dict = {}
sex_df_dict = {}
urb_rur_df_dict={}
disab_df_dict = {}
age_df_dict = {}
for local_auth in list_local_auth:
print(f"Processing: {local_auth}")
# Get a polygon of la based on the Location Code
la_poly = (gs.get_polygons_of_loccode(
geo_df=uk_la_file,
dissolveby=lad_col,
search=local_auth))
# Creating a Geo Dataframe of only stops in la
la_stops_geo_df = (gs.find_points_in_poly
(geo_df=stops_geo_df,
polygon_obj=la_poly))
# Make LA LSOA just containing local auth
uk_la_file = uk_la_file[[lad_col, 'geometry']]
# merge the two dataframes limiting to just the la
la_pop_df = whole_nation_pop_df.merge(uk_la_file,
how='right',
left_on=lad_col,
right_on=lad_col,
suffixes=('_pop', '_LA'))
# subset by the local authority name needed
la_pop_df = la_pop_df.loc[la_pop_df[lad_col] == local_auth]
# rename the "All Ages" column to pop_count as it's the population count
la_pop_df.rename(columns={"All Ages": "pop_count"}, inplace=True)
# Get a list of ages from config
age_lst = config['age_lst']
# Get a datframe limited to the data ages columns only
age_df = dt.slice_age_df(la_pop_df, age_lst)
# Create a list of tuples of the start and finish indexes for the age bins
age_bins = dt.get_col_bins(age_lst)
# get the ages in the age_df binned, and drop the original columns
age_df = dt.bin_pop_ages(age_df, age_bins, age_lst)
# Ridding the la_pop df of the same cols
la_pop_df.drop(age_lst, axis=1, inplace=True)
# merging summed+grouped ages back in
la_pop_df = pd.merge(la_pop_df, age_df, left_index=True, right_index=True)
# converting into GeoDataFrame
la_pop_df = gpd.GeoDataFrame(la_pop_df)
# create a buffer around the stops, in column "geometry" #forthedemo
# the `buffer_points` function changes the df in situ
la_stops_geo_df = gs.buffer_points(la_stops_geo_df)
# renaming the column to geometry so the point in polygon func gets expected
la_pop_df.rename(columns={"geometry_pop": "geometry"}, inplace=True)
# import the disability data - this is the based on the 2011 census
# TODO: use new csv_to_df func to make disability_df
disability_df = pd.read_csv(os.path.join(CWD,
"data",
"nomis_QS303.csv"),
header=5)
# drop the column "mnemonic" as it seems to be a duplicate of the OA code
# also "All categories: Long-term health problem or disability" is not needed,
# nor is "Day-to-day activities not limited"
drop_lst = ["mnemonic",
"All categories: Long-term health problem or disability"]
disability_df.drop(drop_lst, axis=1, inplace=True)
# the col headers are database unfriendly. Defining their replacement names
replacements = {"2011 output area": 'OA11CD',
"Day-to-day activities limited a lot": "disab_ltd_lot",
"Day-to-day activities limited a little": "disab_ltd_little",
'Day-to-day activities not limited': "disab_not_ltd"}
# renaming the dodgy col names with their replacements
disability_df.rename(columns=replacements, inplace=True)
# Getting the disab total
disability_df["disb_total"] = (disability_df["disab_ltd_lot"]
+ disability_df["disab_ltd_little"])
# Calcualting the total "non-disabled"
la_pop_only = la_pop_df[['OA11CD','pop_count']]
disability_df = la_pop_only.merge(disability_df, on="OA11CD")
# Putting the result back into the disability df
disability_df["non-disabled"] = disability_df["pop_count"] - disability_df['disb_total']
# Calculating the proportion of disabled people in each OA
disability_df["proportion_disabled"] = (
disability_df['disb_total']
/
disability_df['pop_count']
)
# Calcualting the proportion of non-disabled people in each OA
disability_df["proportion_non-disabled"] = (
disability_df['non-disabled']
/
disability_df['pop_count']
)
# Slice disability df that only has the proportion disabled column and the OA11CD col
disab_prop_df = disability_df[['OA11CD', 'proportion_disabled', 'proportion_non-disabled']]
# Merge the proportion disability df into main the pop df with a left join
la_pop_df = la_pop_df.merge(disab_prop_df, on='OA11CD', how="left")
# Make the calculation of the number of people with disabilities in the year
# of the population estimates
la_pop_df["number_disabled"] = (
round
(la_pop_df["pop_count"]
*
la_pop_df["proportion_disabled"])
)
la_pop_df["number_disabled"] = la_pop_df["number_disabled"].astype(int)
# Make the calculation of the number of non-disabled people in the year
# of the population estimates
la_pop_df["number_non-disabled"] = (
round
(la_pop_df["pop_count"]
*
la_pop_df["proportion_non-disabled"])
)
la_pop_df["number_non-disabled"] = la_pop_df["number_non-disabled"].astype(int)
# import the sex data
# # TODO: use new csv_to_df func to make the sex_df
# sex_df = pd.read_csv(os.path.join(CWD, "data", "nomis_QS104EW.csv"),
# header=6,
# usecols=["2011 output area",
# "Males", "Females"])
# sex_df = bham_pop_df['OA11CD', 'males_pop', 'fem_pop']
# # # renaming the dodgy col names with their replacements
replacements = {"males_pop": "male",
"fem_pop": "female"}
la_pop_df.rename(columns=replacements, inplace=True)
# # merge the sex data with the rest of the population data
# bham_pop_df = bham_pop_df.merge(sex_df, on='OA11CD', how='left')
# Make a polygon object from the geometry column of the stops df
# all_stops_poly = gs.poly_from_polys(birmingham_stops_geo_df)
# # find all the pop centroids which are in the la_stops_geo_df
pop_in_poly_df = gs.find_points_in_poly(la_pop_df, la_stops_geo_df)
# Dedupe the df because many OAs are appearing multiple times (i.e. they are served by multiple stops)
pop_in_poly_df = pop_in_poly_df.drop_duplicates(subset="OA11CD")
# Count the population served by public transport
served = pop_in_poly_df.pop_count.sum()
full_pop = la_pop_df.pop_count.sum()
not_served = full_pop - served
pct_not_served = "{:.2f}".format(not_served/full_pop*100)
pct_served = "{:.2f}".format(served/full_pop*100)
print(f"""The number of people who are served by public transport is {served}.\n
The full population of {local_auth} is calculated as {full_pop}
While the number of people who are not served is {not_served}""")
la_results_df = pd.DataFrame({"All_pop":[full_pop],
"Served":[served],
"Unserved":[not_served],
"Percentage served":[pct_served],
"Percentage unserved":[pct_not_served]})
# Re-orienting the df to what's accepted by the reshaper and renaming col
la_results_df = la_results_df.T.rename(columns={0:"Total"})
# Feeding the la_results_df to the reshaper
la_results_df_out = do.reshape_for_output(la_results_df,
id_col="Total",
local_auth=local_auth)
# Finally for the local authority totals the id_col can be dropped
# That's because the disaggregations each have their own column,
# but "Total" is not a disaggregation so doesn't have a column.
# It will simply show up as blanks (i.e. Total) in all disagg columns
la_results_df_out.drop("Total", axis=1, inplace=True)
# Output this iteration's df to the dict
total_df_dict[local_auth] = la_results_df_out
# # Disaggregations!
pd.set_option("precision", 1)
# Calculating those served and not served by age
age_bins_ = ['0-4', '5-9', '10-14', '15-19', '20-24',
'25-29', '30-34', '35-39', '40-44', '45-49', '50-54',
'55-59', '60-64', '65-69', '70-74', '75-79',
'80-84', '85-89', '90+']
age_servd_df = dt.served_proportions_disagg(pop_df=la_pop_df,
pop_in_poly_df=pop_in_poly_df,
cols_lst=age_bins_)
# Feeding the results to the reshaper
age_servd_df_out = do.reshape_for_output(age_servd_df,
id_col="Age",
local_auth=local_auth)
# Output this local auth's age df to the dict
age_df_dict[local_auth] = age_servd_df_out
# print(age_servd_df)
# # Calculating those served and not served by sex
sex_cols = ['male', 'female']
sex_servd_df = dt.served_proportions_disagg(pop_df=la_pop_df,
pop_in_poly_df=pop_in_poly_df,
cols_lst=sex_cols)
# Feeding the results to the reshaper
sex_servd_df_out = do.reshape_for_output(sex_servd_df,
id_col="Sex",
local_auth=local_auth)
# Output this iteration's sex df to the dict
sex_df_dict[local_auth]=sex_servd_df_out
# Calculating those served and not served by disability
disab_cols = ["number_disabled"]
disab_servd_df = dt.served_proportions_disagg(pop_df=la_pop_df,
pop_in_poly_df=pop_in_poly_df,
cols_lst=disab_cols)
# Feeding the results to the reshaper
disab_servd_df_out = do.reshape_for_output(disab_servd_df,
id_col=disab_cols[0],
local_auth=local_auth,
id_rename="Disability Status")
# The disability df is unusual. I think all rows correspond to people with
# disabilities only. There is no "not-disabled" status here (I think)
disab_servd_df_out.replace(to_replace="number_disabled",
value="Disabled",
inplace=True)
# Output this iteration's sex df to the dict
sex_df_dict[local_auth]=sex_servd_df_out
# Calculating non-disabled people served and not served
non_disab_cols = ["number_non-disabled"]
non_disab_servd_df = dt.served_proportions_disagg(pop_df=la_pop_df,
pop_in_poly_df=pop_in_poly_df,
cols_lst=non_disab_cols)
# Feeding the results to the reshaper
non_disab_servd_df_out = do.reshape_for_output(non_disab_servd_df,
id_col=disab_cols[0],
local_auth=local_auth,
id_rename="Disability Status")
# The disability df is unusual. I think all rows correspond to people with
# disabilities only. There is no "not-disabled" status here (I think)
non_disab_servd_df_out.replace(to_replace="number_non-disabled",
value="Non-disabled",
inplace=True)
# Concatting non-disabled and disabled dataframes
non_disab_disab_servd_df_out = | pd.concat([non_disab_servd_df_out, disab_servd_df_out]) | pandas.concat |
import os
from common.score import scorePredict
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from simpletransformers.classification.classification_model import ClassificationModel
def train_predict_model(df_train, df_test, is_predict, use_cuda):
labels_test = pd.Series(df_test['labels']).to_numpy()
labels = list(df_train['labels'].unique())
labels.sort()
model = ClassificationModel('bert', 'dccuchile/bert-base-spanish-wwm-cased',
num_labels=len(labels), use_cuda=use_cuda, args={
'learning_rate':2e-5,
'num_train_epochs': 3,
'reprocess_input_data': True,
'overwrite_output_dir': True,
'process_count': 10,
'train_batch_size': 4,
'eval_batch_size': 4,
'max_seq_length': 512,
'multiprocessing_chunksize': 500,
'fp16': True,
'fp16_opt_level': 'O1'})
model.train_model(df_train)
results = ''
if is_predict:
text_a = df_test['text_a']
text_b = df_test['text_b']
df_result = | pd.concat([text_a, text_b], axis=1) | pandas.concat |
# tests.test_features.test_jointplot
# Test the JointPlot Visualizer
#
# Author: <NAME>
# Created: Mon Apr 10 21:00:54 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers.
# For license information, see LICENSE.txt
#
# ID: test_jointplot.py [9e008b0] <EMAIL> $
"""
Test joint plot visualization methods.
These tests work differently depending on what version of matplotlib is
installed. If version 2.0.2 or greater is installed, then most tests will
execute, otherwise the histogram tests will skip and only the warning will
be tested.
"""
##########################################################################
## Imports
##########################################################################
import sys
import pytest
import numpy as np
from functools import partial
from unittest.mock import patch, MagicMock
from sklearn.datasets import make_classification, make_regression
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.features.jointplot import *
from ..fixtures import Dataset
try:
# Only available in Matplotlib >= 2.0.2
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
make_axes_locatable = None
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Fixtures
##########################################################################
# Random numpy array generators
rand1d = partial(np.random.rand, 120)
rand2col = partial(np.random.rand, 120, 2)
rand3col = partial(np.random.rand, 120, 3)
@pytest.fixture(scope="class")
def discrete(request):
"""
Creates a simple 2-column dataset with a discrete target.
"""
X, y = make_classification(
n_samples=120,
n_features=2,
n_informative=2,
n_redundant=0,
n_classes=3,
n_clusters_per_class=1,
random_state=2221,
)
request.cls.discrete = Dataset(X, y)
@pytest.fixture(scope="class")
def continuous(request):
"""
Creates a simple 2-column dataset with a continuous target.
"""
X, y = make_regression(n_samples=120, n_features=2, random_state=1112)
request.cls.continuous = Dataset(X, y)
##########################################################################
## JointPlot Tests
##########################################################################
@pytest.mark.usefixtures("discrete", "continuous")
class TestJointPlotNoHistogram(VisualTestCase):
"""
Test the JointPlot visualizer without histograms
"""
def test_invalid_columns_values(self):
"""
Assert invalid columns arguments raise exception
"""
with pytest.raises(YellowbrickValueError, match="invalid for joint plot"):
JointPlot(columns=["a", "b", "c"], hist=False)
def test_invalid_correlation_values(self):
"""
Assert invalid correlation arguments raise an exception
"""
with pytest.raises(YellowbrickValueError, match="invalid correlation method"):
JointPlot(correlation="foo", hist=False)
def test_invalid_kind_values(self):
"""
Assert invalid kind arguments raise exception
"""
for bad_kind in ("foo", None, 123):
with pytest.raises(YellowbrickValueError, match="invalid joint plot kind"):
JointPlot(kind=bad_kind, hist=False)
def test_invalid_hist_values(self):
"""
Assert invalid hist arguments raise exception
"""
for bad_hist in ("foo", 123):
with pytest.raises(
YellowbrickValueError, match="invalid argument for hist"
):
JointPlot(hist=bad_hist)
def test_no_haxes(self):
"""
Test that xhax and yhax are not available
"""
oz = JointPlot(hist=False)
with pytest.raises(AttributeError, match="histogram for the X axis"):
oz.xhax
with pytest.raises(AttributeError, match="histogram for the Y axis"):
oz.yhax
@patch("yellowbrick.features.jointplot.plt")
def test_correlation(self, mplt):
"""
Test correlation is correctly computed
"""
x = self.discrete.X[:, 0]
y = self.discrete.X[:, 1]
cases = (
("pearson", -0.3847799883805261),
("spearman", -0.37301201472324463),
("covariance", -0.5535440619953924),
("kendalltau", -0.2504201680672269),
)
for alg, expected in cases:
oz = JointPlot(hist=False, correlation=alg, columns=None)
oz.ax = MagicMock()
oz.fit(x, y)
assert hasattr(oz, "corr_")
assert oz.corr_ == pytest.approx(
expected
), "{} not computed correctly".format(alg)
def test_columns_none_invalid_x(self):
"""
When self.columns=None validate X and y
"""
bad_kws = (
{"X": rand1d(), "y": None},
{"X": rand3col(), "y": None},
{"X": rand2col(), "y": rand1d()},
{"X": rand3col(), "y": rand1d()},
{"X": rand1d(), "y": rand2col()},
)
for kws in bad_kws:
oz = JointPlot(columns=None, hist=False)
with pytest.raises(
YellowbrickValueError, match="when self.columns is None"
):
oz.fit(**kws)
def test_columns_none_x_y(self):
"""
When self.columns=None image similarity with valid X and y
"""
oz = JointPlot(hist=False, columns=None)
assert oz.fit(self.discrete.X[:, 0], self.discrete.y) is oz
assert hasattr(oz, "corr_")
oz.finalize()
# Appveyor and Linux conda fail due to non-text-based differences
self.assert_images_similar(oz, tol=2.5)
def test_columns_none_x(self):
"""
When self.columns=None image similarity with valid X, no y
"""
oz = JointPlot(hist=False, columns=None)
assert oz.fit(self.discrete.X) is oz
assert hasattr(oz, "corr_")
oz.finalize()
tol = (
4.0 if sys.platform == "win32" else 0.01
) # Fails on AppVeyor with RMS 3.941
self.assert_images_similar(oz, tol=tol)
def test_columns_single_index_no_y(self):
"""
When self.columns=int or str y must not be None
"""
oz = JointPlot(columns="foo", hist=False)
with pytest.raises(YellowbrickValueError, match="y must be specified"):
oz.fit(rand2col(), y=None)
def test_columns_single_invalid_index_numpy(self):
"""
When self.columns=int validate the index in X
"""
oz = JointPlot(columns=2, hist=False)
with pytest.raises(IndexError, match="could not index column '2' into type"):
oz.fit(self.continuous.X, self.continuous.y)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_columns_single_invalid_index_pandas(self):
"""
When self.columns=str validate the index in X
"""
oz = JointPlot(columns="foo", hist=False)
X = pd.DataFrame(self.continuous.X, columns=["a", "b"])
y = | pd.Series(self.continuous.y) | pandas.Series |
import pandas as pd
# The ndarrays must all be the same length. If an index is passed, it must clearly also be
# the same length as the arrays. If no index is passed, the result will be range(n), where
# n is the array length.
data = {'Username': ['foo', 'bar', 'buz'],
'Email': ['<EMAIL>', '<EMAIL>', '<EMAIL>']
}
df = | pd.DataFrame(data=data) | pandas.DataFrame |
#!/usr/bin/env python
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
import os
import datetime
from secrets import USERNAME, PASSWORD
import ipaddress
import xlsxwriter
import pandas as pd
import constants
from eman import Eman
from logging_config import configure_logger
LOGPATH = os.path.abspath(os.curdir) + "/logs/viptela_onboarding.log"
LOGGER = configure_logger(__name__, LOGPATH)
class UserOnboard:
""" Pulls from CSV file to create subnets for a specific addressblock for
use in assigning for viptela CVO users. If username and password are not
passed via command line, they mus tbe present in a file called secrets.py
in the same directory as onboarding.py in the following format:
USERNAME = "username"
PASSWORD = "password"
Please provide absolute path to the csv file.
An xlsx called vedge_onboarding-<current date>.xls is created in the same
directory as onboarding.py with the results.
"""
def __init__(self, username="", password=""):
self.username = username
self.password = password
self.csv_file = csv_file
def read_csv(self):
"""
Reads the CSV file, row by row and creates the subnets, scope and dhco
interfaces accordingly.
:return:
"""
LOGGER.info(
"\n"
"+++++++++++++++++++++++++++++\n"
"Starting new on-boarding run.\n"
"+++++++++++++++++++++++++++++\n"
)
# get username/password for address management access
if self.username and self.password:
am = Eman(self.username, self.password)
else:
am = Eman(USERNAME, PASSWORD)
# Checking Authentication with EMAN
self.check_eman_auth(am)
# Open xlsx workbook for editing
workbook, worksheet = self.openxlsx()
# read dataset from csv file into data with pandas
data = pd.read_csv(self.csv_file)
outputrow = 2
# read and act upon each row in dataset
for index, row in data.head(n=1000).iterrows():
hostname = row["csv-host-name"]
LOGGER.info("\n\n" f"++++++++++++++{hostname}+++++++++++++++\n")
region = row["REGION"].upper()
if | pd.isnull(row["csv-deviceIP"]) | pandas.isnull |
# coding: utf-8
# In[4]:
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
import pandas as pd
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import plotly.offline as py
import plotly.graph_objs as go
import numpy as np
import seaborn as sns
py.init_notebook_mode(connected=True)
get_ipython().magic('matplotlib inline')
# In[5]:
import matplotlib
import pylab
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime
import seaborn as sns
from scipy.stats import pearsonr
from matplotlib import cm as cm
import calendar
import warnings
import itertools
from statsmodels.tsa.stattools import adfuller
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.tsa.api as smt
from sklearn.metrics import mean_squared_error
import pandas as pd
import seaborn as sb
import itertools
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from numpy import loadtxt
import os
import json
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
y = 2016
new_data = pd.DataFrame()
sample_times = []
for y in range(2014,2016,1):
print (y)
for m in range(1,13,1):
no_of_days = calendar.monthrange(2014,m)[1]
for d in range (1,no_of_days+1,1):
## for d in range (1,2,1):
# data = pd.read_csv("C:\\Users\\ahilan\\Dropbox\\Research\\Solar Forecast\\Solar Asia 2018\\Data\\Year %d\\D120318_%d%02d%02d_0000.csv"%(y,y,m, d));
data = pd.read_csv("F:\edit\Data\data\Predicting\\D120318_%d%02d%02d_0000.csv"%(y,m,d));
pd
if (pd.to_datetime(data['Date/time'][2]) -pd.to_datetime(data['Date/time'][1])).seconds ==600:
new_data_temp = data[['Date/time','Anemometer;wind_speed;Avg','Wind Vane;wind_direction;Avg','Hygro/Thermo;humidity;Avg', 'Hygro/Thermo;temperature;Avg','Barometer;air_pressure;Avg','Pyranometer-Diffused;solar_irradiance;Avg', 'Pyranometer-Global;solar_irradiance;Avg', 'Silicon;voltage;Avg']][0:144].copy()
new_data = new_data.append(new_data_temp)
for i in range(len(new_data_temp)):
sample_times.append(datetime.datetime(y, m, d, 6, 00, 0)+ i*datetime.timedelta(minutes=10))
elif (pd.to_datetime(data['Date/time'][2]) - | pd.to_datetime(data['Date/time'][1]) | pandas.to_datetime |
"""
This file contains methods to visualize EKG data, clean EKG data and run EKG analyses.
Classes
-------
EKG
Notes
-----
All R peak detections should be manually inspected with EKG.plotpeaks method and
false detections manually removed with rm_peak method. After rpeak examination,
NaN data can be accounted for by removing false IBIs with rm_ibi method.
"""
import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy as sp
import statistics
import biosignalsnotebooks as bsnb
from scipy import interpolate
from numpy import linspace, diff, zeros_like, arange, array
from mne.time_frequency import psd_array_multitaper
from pandas.plotting import register_matplotlib_converters
from scipy.signal import welch
class EKG:
"""
Run EKG analyses including cleaning and visualizing data.
Attributes
----------
metadata : nested dict
File information and analysis information.
Format {str:{str:val}} with val being str, bool, float, int or pd.Timestamp.
data : pd.DataFrame
Raw data of the EKG signal (mV) and the threshold line (mV) at each sampled time point.
rpeak_artifacts : pd.Series
False R peak detections that have been removed.
rpeaks_added : pd.Series
R peak detections that have been added.
ibi_artifacts : pd.Series
Interbeat interval data that has been removed.
rpeaks : pd.Series
Cleaned R peaks data without removed peaks and with added peaks.
rr : np.ndarray
Time between R peaks (ms).
nn : np.ndarray
Cleaned time between R peaks (ms) without removed interbeat interval data.
rpeaks_df : pd.DataFrame
Raw EKG value (mV) and corresponding interbeat interval leading up to the data point (ms) at each sampled point.
"""
def __init__(self, fname, fpath, polarity='positive', min_dur=True, epoched=True, smooth=False, sm_wn=30, mw_size=100, upshift=3.5,
rms_align='right', detect_peaks=True, pan_tompkins=True):
"""
Initialize raw EKG object.
Parameters
----------
fname : str
Filename.
fpath : str
Path to file.
polarity: str, default 'positive'
polarity of the R-peak deflection. Options: 'positive', 'negative'
min_dur : bool, default True
Only load files that are >= 5 minutes long.
epoched : bool, default True
Whether file was epoched using ioeeg.
smooth : bool, default False
Whether raw signal should be smoothed before peak detections. Set True if raw data has consistent high frequency noise
preventing accurate peak detection.
sm_wn : float, default 30
Size of moving window for rms smoothing preprocessing (milliseconds).
mw_size : float, default 100
Moving window size for R peak detection (milliseconds).
upshift : float, default 3.5
Detection threshold upshift for R peak detection (% of signal).
rms_align: str, default 'right'
whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
rm_artifacts : bool, default False
Apply IBI artifact removal algorithm.
detect_peaks : bool, default True
Option to detect R peaks and calculate interbeat intervals.
pan_tompkins : bool, default True
Option to detect R peaks using automatic pan tompkins detection method
Returns
-------
EKG object. Includes R peak detections and calculated inter-beat intervals if detect_peaks is set to True.
"""
# set metadata
filepath = os.path.join(fpath, fname)
if epoched == False:
in_num, start_date, slpstage, cycle = fname.split('_')[:4]
elif epoched == True:
in_num, start_date, slpstage, cycle, epoch = fname.split('_')[:5]
self.metadata = {'file_info':{'in_num': in_num,
'fname': fname,
'path': filepath,
'rpeak_polarity': polarity,
'start_date': start_date,
'sleep_stage': slpstage,
'cycle': cycle
}
}
if epoched == True:
self.metadata['file_info']['epoch'] = epoch
# load the ekg
self.load_ekg(min_dur)
# flip the polarity if R peaks deflections are negative
if polarity == 'negative':
self.data = self.data*-1
if smooth == True:
self.rms_smooth(sm_wn)
else:
self.metadata['analysis_info']['smooth'] = False
# create empty series for false detections removed and missed peaks added
self.rpeak_artifacts = pd.Series()
self.rpeaks_added = pd.Series()
self.ibi_artifacts = pd.Series()
# detect R peaks
if detect_peaks == True:
if pan_tompkins == True:
self.pan_tompkins_detector()
# detect R peaks & calculate inter-beat intevals
else:
self.calc_RR(smooth, mw_size, upshift, rms_align)
self.metadata['analysis_info']['pan_tompkins'] = False
# initialize the nn object
self.nn = self.rr
register_matplotlib_converters()
def load_ekg(self, min_dur):
"""
Load EKG data from csv file and extract metadata including sampling frequency, cycle length, start time and NaN data.
Parameters
----------
min_dur : bool, default True
If set to True, will not load files shorter than the minimum duration length of 5 minutes.
"""
data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)['EKG']
# Check cycle length against 5 minute duration minimum
cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()
if cycle_len_secs < 60*5-1:
if min_dur == True:
print('Data is shorter than minimum duration. Cycle will not be loaded.')
print('--> To load data, set min_dur to False')
return
else:
print('* WARNING: Data is shorter than 5 minutes.')
self.data = data
else:
self.data = data
diff = data.index.to_series().diff()[1:2]
s_freq = 1000000/diff[0].microseconds
nans = len(data) - data['Raw'].count()
# Set metadata
self.metadata['file_info']['start_time'] = data.index[0]
self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs,
'NaNs(samples)': nans, 'NaNs(secs)': nans/s_freq}
print('EKG successfully imported.')
def rms_smooth(self, sm_wn):
"""
Smooth raw data with root mean square (RMS) moving window.
Reduce noise leading to false R peak detections.
Parameters
----------
sm_wn : float, default 30
Size of moving window for RMS smoothing preprocessing (ms).
"""
self.metadata['analysis_info']['smooth'] = True
self.metadata['analysis_info']['rms_smooth_wn'] = sm_wn
mw = int((sm_wn/1000)*self.metadata['analysis_info']['s_freq'])
self.data['raw_smooth'] = self.data.Raw.rolling(mw, center=True).mean()
def set_Rthres(self, smooth, mw_size, upshift, rms_align):
"""
Set R peak detection threshold based on moving average shifted up by a percentage of the EKG signal.
Parameters
----------
smooth : bool, default False
If set to True, raw EKG data will be smoothed using RMS smoothing window.
mw_size : float, default 100
Time over which the moving average of the EKG signal will be taken to calculate the R peak detection threshold (ms).
upshift : float, default 3.5
Percentage of EKG signal that the moving average will be shifted up by to set the R peak detection threshold.
rms_align: str, default 'right'
whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
See Also
--------
EKG.rms_smooth : Smooth raw EKG data with root mean square (RMS) moving window.
"""
print('Calculating moving average with {} ms window and a {}% upshift...'.format(mw_size, upshift))
# convert moving window to sample & calc moving average over window
mw = int((mw_size/1000)*self.metadata['analysis_info']['s_freq'])
#if smooth is true have the moving average calculated based off of smoothed data
if smooth == False:
mavg = self.data.Raw.rolling(mw).mean()
ekg_avg = np.mean(self.data['Raw'])
elif smooth == True:
mavg = self.data.raw_smooth.rolling(mw).mean()
ekg_avg = np.mean(self.data['raw_smooth'])
if rms_align == 'left':
# get the number of NaNs and shift the average left by that amount
mavg = mavg.shift(-mavg.isna().sum())
# replace edge nans with overall average
mavg = mavg.fillna(ekg_avg)
# set detection threshold as +upshift% of moving average
upshift_perc = upshift/100
det_thres = mavg + np.abs(mavg*upshift_perc)
# insert threshold column at consistent position in df to ensure same color for plotting regardless of smoothing
self.data.insert(1, 'EKG_thres', det_thres) # can remove this for speed, just keep as series
#set metadata
self.metadata['analysis_info']['mw_size'] = mw_size
self.metadata['analysis_info']['upshift'] = upshift
self.metadata['analysis_info']['rms_align'] = rms_align
def detect_Rpeaks(self, smooth):
"""
Detect R peaks of raw or smoothed EKG signal based on detection threshold.
Parameters
----------
smooth : bool, default False
If set to True, raw EKG data is smoothed using a RMS smoothing window.
See Also
--------
EKG.rms_smooth : Smooth raw EKG data with root mean square (RMS) moving window
EKG.set_Rthres : Set R peak detection threshold based on moving average shifted up by a percentage of the EKG signal.
"""
print('Detecting R peaks...')
#Use the raw data or smoothed data depending on bool smooth
if smooth == False:
raw = pd.Series(self.data['Raw'])
elif smooth == True:
raw = pd.Series(self.data['raw_smooth'])
thres = pd.Series(self.data['EKG_thres'])
#create empty peaks list
peaks = []
x = 0
#Within the length of the data if the value of raw data (could be smoothed raw data) is less than ekg threshold keep counting forwards
while x < len(raw):
if raw[x] > thres[x]:
roi_start = x
# count forwards to find down-crossing
for h in range(x, len(raw), 1):
# if value drops below threshold, end ROI
if raw[h] < thres[h]:
roi_end = h
break
# else if data ends before dropping below threshold, leave ROI open
# & advance h pointer to end loop
elif (raw[h] >= thres[h]) and (h == len(raw)-1):
roi_end = None
h += 1
break
# if ROI is closed, get maximum between roi_start and roi_end
if roi_end:
peak = raw[x:h].idxmax()
peaks.append(peak)
# advance the pointer
x = h
else:
x += 1
self.rpeaks = raw[peaks]
print('R peak detection complete')
# get time between peaks and convert to mseconds
self.rr = np.diff(self.rpeaks.index)/np.timedelta64(1, 'ms')
# create rpeaks dataframe and add ibi columm
rpeaks_df = | pd.DataFrame(self.rpeaks) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Calculate the mobility demand.
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
import os
import pandas as pd
from collections import namedtuple
from reegis import geometries, config as cfg, tools, energy_balance
def format_kba_table(filename, sheet):
"""
Clean the layout of the table.
The tables are made for human readability and not for automatic processing.
Lines with subtotals and format-strings of the column names are removed.
A valid MultiIndex is created to make it easier to filter the table by the
index.
Parameters
----------
filename : str
Path and name of the excel file.
sheet : str
Name of the sheet of the excel table.
Returns
-------
pandas.DataFrame
"""
# Read table
df = | pd.read_excel(filename, sheet, skiprows=7, header=[0, 1]) | pandas.read_excel |
import logging
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from hdrbp._util import (
basic_repr,
basic_str,
compute_correlation,
compute_diversification_ratio,
compute_drawdowns,
compute_gini,
compute_prices,
compute_risk_contributions,
compute_turnover,
compute_variance,
count_dates_per_year,
count_years,
)
logger = logging.getLogger(__name__)
@basic_str
@basic_repr
class MetricCalculator(ABC):
@property
def name(self):
return repr(self)
@abstractmethod
def calculate(self, result: pd.DataFrame) -> float:
pass
class GeometricMeanReturn(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
log_returns = np.log1p(returns)
mean_log_return = np.mean(log_returns)
if self._annualized:
dates = | pd.to_datetime(result["date"].values) | pandas.to_datetime |
from __future__ import annotations
from io import (
BytesIO,
StringIO,
)
import os
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
NA,
DataFrame,
Index,
)
import pandas._testing as tm
import pandas.io.common as icom
from pandas.io.common import get_handle
from pandas.io.xml import read_xml
"""
CHECKLIST
[x] - ValueError: "Values for parser can only be lxml or etree."
etree
[x] - ImportError: "lxml not found, please install or use the etree parser."
[X] - TypeError: "...is not a valid type for attr_cols"
[X] - TypeError: "...is not a valid type for elem_cols"
[X] - LookupError: "unknown encoding"
[X] - KeyError: "...is not included in namespaces"
[X] - KeyError: "no valid column"
[X] - ValueError: "To use stylesheet, you need lxml installed..."
[] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.)
[X] - FileNotFoundError: "No such file or directory"
[X] - PermissionError: "Forbidden"
lxml
[X] - TypeError: "...is not a valid type for attr_cols"
[X] - TypeError: "...is not a valid type for elem_cols"
[X] - LookupError: "unknown encoding"
[] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.)
[X] - FileNotFoundError: "No such file or directory"
[X] - KeyError: "...is not included in namespaces"
[X] - KeyError: "no valid column"
[X] - ValueError: "stylesheet is not a url, file, or xml string."
[] - LookupError: (NEED WRONG ENCODING FOR FILE OUTPUT)
[] - URLError: (USUALLY DUE TO NETWORKING)
[] - HTTPError: (NEED AN ONLINE STYLESHEET)
[X] - OSError: "failed to load external entity"
[X] - XMLSyntaxError: "Opening and ending tag mismatch"
[X] - XSLTApplyError: "Cannot resolve URI"
[X] - XSLTParseError: "failed to compile"
[X] - PermissionError: "Forbidden"
"""
geom_df = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": [4, np.nan, 3],
}
)
planet_df = DataFrame(
{
"planet": [
"Mercury",
"Venus",
"Earth",
"Mars",
"Jupiter",
"Saturn",
"Uranus",
"Neptune",
],
"type": [
"terrestrial",
"terrestrial",
"terrestrial",
"terrestrial",
"gas giant",
"gas giant",
"ice giant",
"ice giant",
],
"location": [
"inner",
"inner",
"inner",
"inner",
"outer",
"outer",
"outer",
"outer",
],
"mass": [
0.330114,
4.86747,
5.97237,
0.641712,
1898.187,
568.3174,
86.8127,
102.4126,
],
}
)
from_file_expected = """\
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<category>cooking</category>
<title>Everyday Italian</title>
<author><NAME></author>
<year>2005</year>
<price>30.0</price>
</row>
<row>
<index>1</index>
<category>children</category>
<title>Harry Potter</title>
<author><NAME></author>
<year>2005</year>
<price>29.99</price>
</row>
<row>
<index>2</index>
<category>web</category>
<title>Learning XML</title>
<author><NAME></author>
<year>2003</year>
<price>39.95</price>
</row>
</data>"""
def equalize_decl(doc):
# etree and lxml differ on quotes and case in xml declaration
if doc is not None:
doc = doc.replace(
'<?xml version="1.0" encoding="utf-8"?',
"<?xml version='1.0' encoding='utf-8'?",
)
return doc
@pytest.fixture(params=["rb", "r"])
def mode(request):
return request.param
@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"])
def parser(request):
return request.param
# FILE OUTPUT
def test_file_output_str_read(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with tm.ensure_clean("test.xml") as path:
df_file.to_xml(path, parser=parser)
with open(path, "rb") as f:
output = f.read().decode("utf-8").strip()
output = equalize_decl(output)
assert output == from_file_expected
def test_file_output_bytes_read(datapath, parser):
filename = datapath("io", "data", "xml", "books.xml")
df_file = read_xml(filename, parser=parser)
with | tm.ensure_clean("test.xml") | pandas._testing.ensure_clean |
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019; modified on 11/06/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# pair tiles of 20x, 10x, 5x of the same area
def paired_tile_ids_in_old(slide, label, root_dir):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 1000
else:
fac = 500
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 0]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 1]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 2]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idsa
def tile_ids_in(inp):
ids = []
try:
for id in os.listdir(inp['path']):
if '_{}.png'.format(str(inp['sldnum'])) in id:
ids.append([inp['slide'], inp['level'], inp['path']+'/'+id, inp['BMI'], inp['age'], inp['label']])
except FileNotFoundError:
print('Ignore:', inp['path'])
return ids
# pair tiles of 10x, 5x, 2.5x of the same area
def paired_tile_ids_in(slide, label, root_dir, age=None, BMI=None):
dira = os.path.isdir(root_dir + 'level1')
dirb = os.path.isdir(root_dir + 'level2')
dirc = os.path.isdir(root_dir + 'level3')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 2000
else:
fac = 1000
ids = []
for level in range(1, 4):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 1]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 2]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 3]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
idsa['age'] = age
idsa['BMI'] = BMI
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
return idsa
# Balance CPTAC and TCGA tiles in each class
def balance(pdls, cls):
balanced = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for i in range(cls):
ref = pdls.loc[pdls['label'] == i]
CPTAC = ref[~ref['slide'].str.contains("TCGA")]
TCGA = ref[ref['slide'].str.contains("TCGA")]
if CPTAC.shape[0] != 0 and TCGA.shape[0] != 0:
ratio = (CPTAC.shape[0])/(TCGA.shape[0])
if ratio < 0.2:
TCGA = TCGA.sample(int(5*CPTAC.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
elif ratio > 5:
CPTAC = CPTAC.sample(int(5*TCGA.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
balanced = pd.concat([balanced, ref], sort=False)
return balanced
# Prepare label at per patient level
def big_image_sum(pmd, path='../tiles/', ref_file='../Fusion_dummy_His_MUT_joined.csv'):
ref = pd.read_csv(ref_file, header=0)
big_images = []
if pmd == 'subtype':
ref = ref.loc[ref['subtype_0NA'] == 0]
for idx, row in ref.iterrows():
if row['subtype_POLE'] == 1:
big_images.append([row['name'], 0, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_MSI'] == 1:
big_images.append([row['name'], 1, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_Endometrioid'] == 1:
big_images.append([row['name'], 2, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_Serous-like'] == 1:
big_images.append([row['name'], 3, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif pmd == 'histology':
ref = ref.loc[ref['histology_Mixed'] == 0]
for idx, row in ref.iterrows():
if row['histology_Endometrioid'] == 1:
big_images.append([row['name'], 0, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
if row['histology_Serous'] == 1:
big_images.append([row['name'], 1, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif pmd in ['Endometrioid', 'MSI', 'Serous-like', 'POLE']:
# ref = ref.loc[ref['histology_Endometrioid'] == 1]
ref = ref.loc[ref['subtype_0NA'] == 0]
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row['subtype_{}'.format(pmd)]), path + "{}/".format(str(row['name'])),
row['age'], row['BMI']])
elif pmd == 'MSIst':
ref = ref.loc[ref['MSIst_0NA'] == 0]
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row['MSIst_MSI-H']), path + "{}/".format(str(row['name'])),
row['age'], row['BMI']])
else:
ref = ref.dropna(subset=[pmd])
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row[pmd]), path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
datapd = pd.DataFrame(big_images, columns=['slide', 'label', 'path', 'age', 'BMI'])
return datapd
# TO KEEP SPLIT SAME AS BASELINES. seperate into training and testing; each type is the same separation
# ratio on big images test and train csv files contain tiles' path.
def set_sep_secondary(alll, path, cls, pmd, batchsize=24):
if pmd == 'subtype':
split = pd.read_csv('../split/ST.csv', header=0)
elif pmd == 'histology':
split = pd.read_csv('../split/his.csv', header=0)
elif pmd == 'Serous-like':
split = pd.read_csv('../split/CNVH.csv', header=0)
elif pmd == 'Endometrioid':
split = pd.read_csv('../split/CNVL.csv', header=0)
else:
split = pd.read_csv('../split/{}.csv'.format(pmd), header=0)
train = split.loc[split['set'] == 'train']['slide'].tolist()
validation = split.loc[split['set'] == 'validation']['slide'].tolist()
test = split.loc[split['set'] == 'test']['slide'].tolist()
trlist = []
telist = []
valist = []
subset = alll
valist.append(subset[subset['slide'].isin(validation)])
telist.append(subset[subset['slide'].isin(test)])
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
train_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
validation_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
train_tiles = pd.concat([train_tiles, tile_ids])
for idx, row in validation.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
validation_tiles = pd.concat([validation_tiles, tile_ids])
train_tiles = balance(train_tiles, cls=cls)
validation_tiles = balance(validation_tiles, cls=cls)
# No shuffle on test set
train_tiles = sku.shuffle(train_tiles)
validation_tiles = sku.shuffle(validation_tiles)
if train_tiles.shape[0] > int(batchsize * 80000 / 3):
train_tiles = train_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate training set!')
if validation_tiles.shape[0] > int(batchsize * 80000 / 30):
validation_tiles = validation_tiles.sample(int(batchsize * 80000 / 30), replace=False)
print('Truncate validation set!')
if test_tiles.shape[0] > int(batchsize * 80000 / 3):
test_tiles = test_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate test set!')
test_tiles.to_csv(path + '/te_sample.csv', header=True, index=False)
train_tiles.to_csv(path + '/tr_sample.csv', header=True, index=False)
validation_tiles.to_csv(path + '/va_sample.csv', header=True, index=False)
return train_tiles, test_tiles, validation_tiles
# Training and validation on TCGA; Testing on CPTAC
def set_sep_idp(alll, path, cls, cut=0.1, batchsize=64):
trlist = []
telist = []
valist = []
TCGA = alll[alll['slide'].str.contains("TCGA")]
CPTAC = alll[~alll['slide'].str.contains("TCGA")]
for i in range(cls):
subset = TCGA.loc[TCGA['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut)]
valist.append(subset[subset['slide'].isin(validation)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
telist.append(CPTAC)
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
train_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
validation_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
train_tiles = pd.concat([train_tiles, tile_ids])
for idx, row in validation.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
validation_tiles = pd.concat([validation_tiles, tile_ids])
train_tiles = balance(train_tiles, cls=cls)
validation_tiles = balance(validation_tiles, cls=cls)
# No shuffle on test set
train_tiles = sku.shuffle(train_tiles)
validation_tiles = sku.shuffle(validation_tiles)
if train_tiles.shape[0] > int(batchsize * 80000 / 3):
train_tiles = train_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate training set!')
if validation_tiles.shape[0] > int(batchsize * 80000 / 30):
validation_tiles = validation_tiles.sample(int(batchsize * 80000 / 30), replace=False)
print('Truncate validation set!')
if test_tiles.shape[0] > int(batchsize * 80000 / 3):
test_tiles = test_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate test set!')
test_tiles.to_csv(path + '/te_sample.csv', header=True, index=False)
train_tiles.to_csv(path + '/tr_sample.csv', header=True, index=False)
validation_tiles.to_csv(path + '/va_sample.csv', header=True, index=False)
return train_tiles, test_tiles, validation_tiles
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cls, cut=0.2, batchsize=24):
trlist = []
telist = []
valist = []
TCGA = alll[alll['slide'].str.contains("TCGA")]
CPTAC = alll[~alll['slide'].str.contains("TCGA")]
for i in range(cls):
subset = TCGA.loc[TCGA['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq)*cut/2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq)*cut/2):int(len(unq)*cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq)*cut):]
trlist.append(subset[subset['slide'].isin(train)])
subset = CPTAC.loc[CPTAC['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut / 2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
train_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
validation_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
train_tiles = pd.concat([train_tiles, tile_ids])
for idx, row in validation.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
validation_tiles = | pd.concat([validation_tiles, tile_ids]) | pandas.concat |
import keras.models
from keras.layers import Dense, Dropout, Activation, Input, Concatenate
import keras.backend as K
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.base import clone
from EmoMap.coling18.framework import util
import os
import math
class Model():
def __init__(self):
raise NotImplementedError
def fit(self):
raise NotImplementedError
def predict(self):
raise NotImplementedError
class Word_Model(Model):
def fit(self, words, labels):
raise NotImplementedError
def predict(self, words):
raise NotImplementedError
def pearson(y_true, y_pred):
fsp = y_pred - K.mean(y_pred) #being K.mean a scalar here, it will be automatically subtracted from all elements in y_pred
fst = y_true - K.mean(y_true)
devP = K.std(y_pred)
devT = K.std(y_true)
return K.mean(fsp*fst)/(devP*devT)
class SKlearn_Mapping_Model(Word_Model):
def __init__(self, base_model, source_lexicon):
self.model=None
self.untrained=base_model
self.source_lex=source_lexicon
self.targets=None
self.initialize()
def initialize(self):
self.model=clone(self.untrained)
def __feature_extraction__(self, words):
return np.array([self.source_lex.loc[word] for word in words])
def fit(self, words, labels):
# self.model.fit(features, labels)
self.targets=labels.columns
features=self.__feature_extraction__(words)
self.model.fit(features, labels)
def predict(self, words):
features=self.__feature_extraction__(words)
preds=self.model.predict(features)
return pd.DataFrame(preds, columns=self.targets)
class Mapping_Model(Word_Model):
'''
Wrapper for Keras based MLP
'''
def __init__( self,
layers, #including input and output layer
activation,
dropout_hidden,
batch_size,
optimizer,
source_lexicon, # Emotion lexicon with entries in the
# source representation. Must also cover
# the entries in the test set.
verbose=0,
epochs=None,
train_steps=None,
kind='joint', #either "joint" or "separate"
):
'''
ARGS
batch_generation either 'serial' or 'radomreplace'
epochs Will be interpreted as training steps
if batch_generation is set to "randomreplace"
WATCH OUT!!!
'''
self.targets=None
self.source_lexicon=source_lexicon
self.epochs=epochs
self.train_steps=train_steps #will "round up" to make full epochs
self.batch_size=batch_size
self.verbose=verbose
self.layers=layers
self.activation=activation
self.dropout_hidden=dropout_hidden
self.optimizer=optimizer
self.kind=kind
self.kinds={'joint':self.__init_joint__, 'separate':self.__init_separate__}
assert (epochs is not None) or (train_steps is not None), 'Either epochs or train_streps must be set.'
assert not( epochs is not None and train_steps is not None ), 'You cannot specify both epochs and train_steps.'
self.initialize()
def __init_separate__(self):
input_layer = Input(shape=(self.layers[0],))
top_layer=[]
for i in range(self.layers[-1]):
curr_layers=[input_layer]
for j in range(len(self.layers)-2):
curr_layers.append(Dense(self.layers[j+1])(curr_layers[-1]))
curr_layers.append(Activation(self.activation)(curr_layers[-1]))
curr_layers.append(Dropout(rate=self.dropout_hidden)(curr_layers[-1]))
#last dense layer
top_layer.append(Dense(1)(curr_layers[-1]))
out=Concatenate()(top_layer)
self.model=keras.models.Model(inputs=[input_layer], outputs=out)
self.model.compile(optimizer=self.optimizer, loss='mse', metrics=[pearson])
def __init_joint__(self):
self.model=keras.models.Sequential()
self.model.add(Dense(self.layers[1], input_dim=self.layers[0]))
i=1
while i<len(self.layers)-1:
self.model.add(Activation(self.activation))
self.model.add(Dropout(rate=self.dropout_hidden))
self.model.add(Dense(self.layers[i+1]))
i+=1
self.model.compile(optimizer=self.optimizer, loss='mse', metrics=[pearson])
def initialize(self):
self.kinds[self.kind]()
def __feature_extraction__(self, words):
return np.array([self.source_lexicon.loc[word] for word in words])
def fit(self, words, labels):
self.targets=labels.columns
features=self.__feature_extraction__(words)
if bool(self.epochs)==True:
if self.verbose >0:
print('Using epoch wise training')
self.model.fit( features, labels,
epochs=self.epochs,
batch_size=self.batch_size,
verbose=self.verbose)
elif bool(self.train_steps)==True:
if self.verbose > 0:
print('Using step-wise training.')
bs=util.Serial_Batch_Gen( features=pd.DataFrame(features),
labels=pd.DataFrame(labels),
batch_size=self.batch_size)
for i_step in range(self.train_steps):
if i_step%100==0 and self.verbose>0:
print('Now at training step: '+str(i_step))
batch_features,batch_labels=bs.next()
self.model.train_on_batch(batch_features,batch_labels)
else:
raise ValueError('Neither epochs nore train_steps are specified!')
def predict(self,words):
features=self.__feature_extraction__(words)
preds=self.model.predict(features)
return preds
def lexicon_creation(self, words, features):
preds=self.model.predict(features)
return pd.DataFrame(preds, index=words, columns=self.targets)
def test_at_steps(self, words, labels, test_split, test_steps, iterations):
# self.targets=labels.columns
# step 1 feature extraction
assert bool(self.train_steps)==True, 'Training must be specified by the number of training steps'
features=self.__feature_extraction__(words)
labels=pd.DataFrame(labels, index=words)
performance=pd.DataFrame(index=np.arange(1,iterations+1))
performance.index.names=['iteration']
for i in range(iterations):
number_of_iteration=i+1
# print(number_of_iteration)
features_train, features_test,\
labels_train,\
labels_test=util.train_test_split(
features, labels, test_size=test_split)
bs=util.Serial_Batch_Gen( features=pd.DataFrame(features_train),
labels=pd.DataFrame(labels_train),
batch_size=self.batch_size)
for i_steps in range(self.train_steps):
total_steps=i_steps+1
batch_features,batch_labels=bs.next()
self.model.train_on_batch(batch_features,batch_labels)
if total_steps in test_steps:
preds=pd.DataFrame(self.model.predict(features_test), columns=list(labels))
# print(preds)
perf=np.mean(util.eval(labels_test, preds))
performance.loc[number_of_iteration, total_steps]=perf
# resets model
self.initialize()
# print(performance)
perf_mean=pd.Series(performance.mean(axis=0))
return perf_mean
class Attention_Mapping_Model(Word_Model):
'''
Wrapper for Keras based MLP
'''
def __init__( self,
layers, #including input and output layer
activation,
dropout_hidden,
batch_size,
optimizer,
source_lexicon, # Emotion lexicon with entries in the
# source representation. Must also cover
# the entries in the test set.
verbose=0,
epochs=None,
train_steps=None,
kind='joint', #either "joint" or "separate"
):
'''
ARGS
batch_generation either 'serial' or 'radomreplace'
epochs Will be interpreted as training steps
if batch_generation is set to "randomreplace"
WATCH OUT!!!
'''
self.targets=None
self.source_lexicon=source_lexicon
self.epochs=epochs
self.train_steps=train_steps #will "round up" to make full epochs
self.batch_size=batch_size
self.verbose=verbose
self.layers=layers
self.activation=activation
self.dropout_hidden=dropout_hidden
self.optimizer=optimizer
self.kind=kind
self.kinds={'joint':self.__init_joint__, 'separate':self.__init_separate__}
assert (epochs is not None) or (train_steps is not None), 'Either epochs or train_streps must be set.'
assert not( epochs is not None and train_steps is not None ), 'You cannot specify both epochs and train_steps.'
self.initialize()
def __init_separate__(self):
print("attention separate")
input_layer = Input(shape=(self.layers[0],))
q = tf.keras.layers.Dense(128, activation=tf.nn.relu, name="linear_q")(input_layer)
k = tf.keras.layers.Dense(128, activation=tf.nn.relu, name="linear_k")(input_layer)
v = tf.keras.layers.Dense(128, activation=tf.nn.relu, name="linear_v")(input_layer)
q_k_attention = tf.keras.layers.Attention(name="dot_product_attention")([q, k])
x = tf.keras.layers.Concatenate(name="concat")([v, q_k_attention])
x = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="layer_normalization")(x)
x = tf.keras.layers.Dense(128, activation=tf.nn.relu, name="dense")(x)
top_layer=[]
for i in range(self.layers[-1]):
curr_layers=[input_layer]
for j in range(len(self.layers)-2):
curr_layers.append(Dense(self.layers[j+1])(curr_layers[-1]))
curr_layers.append(Activation(self.activation)(curr_layers[-1]))
curr_layers.append(Dropout(rate=self.dropout_hidden)(curr_layers[-1]))
#last dense layer
top_layer.append(Dense(1)(curr_layers[-1]))
out=Concatenate()(top_layer)
self.model=keras.models.Model(inputs=[input_layer], outputs=out)
self.model.compile(optimizer=self.optimizer, loss='mse', metrics=[pearson])
def __init_joint__(self):
print("attention joint")
self.model=keras.models.Sequential()
self.model.add(Dense(self.layers[1], input_dim=self.layers[0]))
i=1
while i<len(self.layers)-1:
self.model.add(Activation(self.activation))
self.model.add(Dropout(rate=self.dropout_hidden))
self.model.add(Dense(self.layers[i+1]))
i+=1
self.model.compile(optimizer=self.optimizer, loss='mse', metrics=[pearson])
def initialize(self):
self.kinds[self.kind]()
def __feature_extraction__(self, words):
return np.array([self.source_lexicon.loc[word] for word in words])
def fit(self, words, labels):
self.targets=labels.columns
features=self.__feature_extraction__(words)
if bool(self.epochs)==True:
if self.verbose >0:
print('Using epoch wise training')
self.model.fit( features, labels,
epochs=self.epochs,
batch_size=self.batch_size,
verbose=self.verbose)
elif bool(self.train_steps)==True:
if self.verbose > 0:
print('Using step-wise training.')
bs=util.Serial_Batch_Gen( features=pd.DataFrame(features),
labels=pd.DataFrame(labels),
batch_size=self.batch_size)
for i_step in range(self.train_steps):
if i_step%100==0 and self.verbose>0:
print('Now at training step: '+str(i_step))
batch_features,batch_labels=bs.next()
self.model.train_on_batch(batch_features,batch_labels)
else:
raise ValueError('Neither epochs nore train_steps are specified!')
def predict(self,words):
features=self.__feature_extraction__(words)
preds=self.model.predict(features)
return preds
def lexicon_creation(self, words, features):
preds=self.model.predict(features)
return pd.DataFrame(preds, index=words, columns=self.targets)
def test_at_steps(self, words, labels, test_split, test_steps, iterations):
# self.targets=labels.columns
# step 1 feature extraction
assert bool(self.train_steps)==True, 'Training must be specified by the number of training steps'
features=self.__feature_extraction__(words)
labels=pd.DataFrame(labels, index=words)
performance=pd.DataFrame(index=np.arange(1,iterations+1))
performance.index.names=['iteration']
for i in range(iterations):
number_of_iteration=i+1
# print(number_of_iteration)
features_train, features_test,\
labels_train,\
labels_test=util.train_test_split(
features, labels, test_size=test_split)
bs=util.Serial_Batch_Gen( features=pd.DataFrame(features_train),
labels= | pd.DataFrame(labels_train) | pandas.DataFrame |
import os
import pandas as pd
from settings.config import DATASET_USAGE, K_FOLDS_VALUES, item_label, title_label, genre_label, algorithm_label, \
FAIRNESS_METRIC_LABEL, LAMBDA_LABEL, EVALUATION_METRIC_LABEL, EVALUATION_VALUE_LABEL, evaluation_label, \
LAMBDA_VALUE_LABEL, results_path, N_CORES
from conversions.pandas_to_models import transform_trainset
from graphics.experimental_evaluation import evaluation_linear_fairness_by_algo_over_lambda, evaluation_map_by_mc, \
evaluation_map_by_mace
from settings.language_strings import LANGUAGE_LOAD_DATA_SET, LANGUAGE_MOVIELENS_SELECTED, LANGUAGE_OMS_SELECTED, \
LANGUAGE_DATA_SET_MEMORY, LANGUAGE_PROCESSING_STEP_START, LANGUAGE_PROCESSING_STEP_STOP
from models.item import create_item_mapping
from preprocessing.load_database import movielens_load_data, oms_load_data, yahoo_load_data
from processing.step import recommender_algorithms
def save_recommender_results(evaluation_results_df, k):
for recommender in evaluation_results_df[algorithm_label].unique().tolist():
recommender_subset_df = evaluation_results_df[evaluation_results_df[algorithm_label] == recommender]
ave_dir = results_path + '/' + str(recommender)
if not os.path.exists(ave_dir):
os.makedirs(ave_dir)
rating_path = os.path.join(ave_dir, str(k) + '.csv')
recommender_subset_df.to_csv(rating_path, index=False)
def save_results(evaluation_results_df, k):
if not os.path.exists(results_path):
os.makedirs(results_path)
rating_path = os.path.join(results_path, str(k) + '.csv')
evaluation_results_df.to_csv(rating_path, index=False)
def k_fold_results_concat(evaluation_results_df):
k_results_df = pd.DataFrame()
print(evaluation_results_df)
for recommender in evaluation_results_df[algorithm_label].unique().tolist():
recommender_subset_df = evaluation_results_df[evaluation_results_df[algorithm_label] == recommender]
for distance_metric in recommender_subset_df[FAIRNESS_METRIC_LABEL].unique().tolist():
fairness_subset_df = recommender_subset_df[recommender_subset_df[FAIRNESS_METRIC_LABEL] == distance_metric]
for lambda_type in fairness_subset_df[LAMBDA_LABEL].unique().tolist():
lambda_subset_df = fairness_subset_df[fairness_subset_df[LAMBDA_LABEL] == lambda_type]
for lambda_value in lambda_subset_df[LAMBDA_VALUE_LABEL].unique().tolist():
lambda_value_subset_df = lambda_subset_df[lambda_subset_df[LAMBDA_VALUE_LABEL] == lambda_value]
for evaluation_metric in lambda_value_subset_df[EVALUATION_METRIC_LABEL].unique().tolist():
evaluation_subset_df = lambda_value_subset_df[
lambda_value_subset_df[EVALUATION_METRIC_LABEL] == evaluation_metric]
result = evaluation_subset_df[EVALUATION_VALUE_LABEL].mean()
k_results_df = pd.concat([k_results_df,
pd.DataFrame(
[[recommender,
distance_metric,
lambda_type,
lambda_value,
evaluation_metric,
result]],
columns=evaluation_label
)
])
print(k_results_df)
return k_results_df
def run_one_time(k=1):
print(LANGUAGE_LOAD_DATA_SET)
print('*' * 30)
print(str('-' * 13) + str(k) + str('-' * 13))
print('*' * 30)
trainset_df = | pd.DataFrame() | pandas.DataFrame |
from logic.helpers import *
import pandas as pd
from sklearn import svm, preprocessing
from scipy.stats import mode
from sklearn.model_selection import cross_validate
CLASS_LABEL = "class_label"
TIMESTAMP = "timestamp"
GROUP_INDEX = "group_index"
class ClassificationManager:
def __init__(self, windowSize=20):
self.calibrationData = pd.DataFrame()
self.calibrationLabels = []
self.mlData = None
self.clf = None
self.scaler = None
self.clf_stats = None
self.currentSamplingRate = None
self.windowSize = windowSize
self.currentPrediction = None
def makePrediction(self, data):
"""
Uses the internally trained model (cf. trainClassifierModel) to predict classes for the given data. Return 'None' is not classifier is present.
Executes a prediction for each EMG data sample (i.e. each row in data) and returns both a list of predictions and a voted (mode-based) final prediction.
Note that a prediction is only possible when the window (cf. windowsize) completely overlaps with the data. Hence, the number of predictions is len(data) - windowsize.
Accordingly, the first prediction corresponds to the element data[:][windowsize/2].
Parameters:
data: 2D-array containing recorded EMG data samples (rows) per channel (columns).
returns:
currentPrediction: voted (mode-based) prediction for the given data
prediction: list of predictions for valid window configurations; length = len(data) - windowsize
"""
if self.clf is None:
return 'None'
df = pd.DataFrame(data)
df = df.transpose()
apply_bandpass_filter(df, 2.0, self.currentSamplingRate / 2.0 - 1.0, self.currentSamplingRate)
apply_bandstop_filter(df, 49.0, 51.0, self.currentSamplingRate)
X = | pd.DataFrame() | pandas.DataFrame |
import argparse
import mplfinance as mpf
import numba as nb
import os
import pandas as pd
from pandas_datareader import data, wb
from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.frequencies import to_offset
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from datetime import datetime, timedelta
import pytz
import sys
# %matplotlib inline
plt.rcParams['figure.figsize'] = [15, 15]
plt.style.use('ggplot')
# plt.style.use('seaborn')
from matplotlib.ticker import Formatter
class WeekdayDateFormatter(Formatter):
# https://matplotlib.org/gallery/ticks_and_spines/date_index_formatter.html
# the data is first plotted against an integer. The formatter changes the integer to the correct date.
def __init__(self, dates, fmt='%Y-%m-%d'):
self.dates = dates
self.fmt = fmt
def __call__(self, x, pos=0):
'Return the label for time x at position pos'
ind = int(round(x))
if ind >= len(self.dates) or ind < 0:
return ''
return (self.dates[ind]).strftime(self.fmt)
def search_stock_symbols(stock):
fn = 'symbols.csv'
if not os.path.exists(fn):
symbols = get_nasdaq_symbols()
symbols.to_csv(fn, index='Symbol')
else:
symbols = pd.read_csv(fn, index_col='Symbol')
if stock is None:
return symbols
stock = stock.upper()
hard_search = symbols[symbols['NASDAQ Symbol'] == stock]
if len(hard_search) == 1:
return 1, symbols[symbols['NASDAQ Symbol'] == stock]['Security Name'][stock]
else:
found = symbols[symbols['NASDAQ Symbol'].str.contains(stock)]
if found.empty:
return 0, None
else:
return len(found), found
def valid_time(arg):
try:
return datetime.strptime(arg, "%H:%M")
except ValueError:
msg = "Not a valid time: '{0}'.".format(arg)
raise argparse.ArgumentTypeError(msg)
def valid_date(arg):
try:
dt = datetime.strptime(arg, "%m/%d/%Y")
except ValueError:
msg = 'Not a valid date: "{0}".'.format(arg)
raise argparse.ArgumentTypeError(msg)
if dt.date() > datetime.now().date():
dt = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
msg = f'''A future date is not valid: "{arg}". Instead using "{dt.date().strftime('%m/%d/%Y')}"'''
print(msg)
return dt
def cli_parameters():
parser = argparse.ArgumentParser()
parser.add_argument('stock', metavar='N', type=str, nargs='*', help='create reports for all stocks entered')
parser.add_argument('--compare', action='store_true', default=False, help='compare the list of stocks')
parser.add_argument('--bb', action='store_true', default=False, help='show Bollinger Bands on stock chart')
parser.add_argument('--macd', action='store_true', default=False, help='show Moving Average Convergence/Divergence on separate chart')
parser.add_argument('--sto', action='store_true', default=False, help='show Stochastic on separate chart')
parser.add_argument('--rsi', action='store_true', default=False, help='show Relative Srength Index on separate chart')
parser.add_argument('--cmf', action='store_true', default=False, help='show Chaikin Money Flow on separate chart')
parser.add_argument('--best', action='store_true', default=False, help='show BB, MACD, and RSI')
parser.add_argument('--save', action='store_true', default=False, help='Save plot to disk')
parser.add_argument('--show', action='store_true', default=False, help='Show interactive plot')
parser.add_argument('--weekly', action='store_true', default=False, help='Resample data into weekly charts')
parser.add_argument("--startdate", help="Start date - format MM/DD/YYYY",
default=datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - timedelta(days=547), type=valid_date)
parser.add_argument("--enddate", help="End date - format MM/DD/YYYY",
default=datetime.now().replace(hour=0, minute=0, second=0, microsecond=0), type=valid_date)
parser.add_argument("--priceline", help="Insert a horizontal black line on plot at price (float)", type=float)
parser.add_argument("--dateline", help="Insert a vertical black line on plot - format MM/DD/YYYY", type=valid_date)
parser.add_argument('--daydelta', type=int, help='Days between start date and end date.')
parser.add_argument('--zoom', type=int, default=0, help='Zoom into the plot for the last number of days.')
args = parser.parse_args()
args.stock = sorted([i.upper() for i in args.stock])
if not args.save:
args.show = True
# if len(args.stock) > 1:
# args.save = True
# args.show = False
if args.startdate > args.enddate:
parser.error(f'Start date "{args.startdate}" can not be greater than End Date "{args.enddate}"')
if args.daydelta:
args.startdate = args.enddate - timedelta(days=args.daydelta)
if args.best:
args.bb = True
args.macd = True
args.rsi = True
# log_message(parser.parse_args().__str__())
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return args
def remove_holidays_and_weekends(start, end, move_date_forward=True):
holidays = USFederalHolidayCalendar().holidays(start=start - timedelta(days=14), end=end + timedelta(days=14)).to_pydatetime()
if move_date_forward:
dt = start
else:
dt = end
while dt in holidays or dt.weekday() >= 5:
if move_date_forward:
dt += timedelta(days=1)
else:
dt -= timedelta(days=1)
return dt
class StockAnalysis:
def __init__(self, stock: str, start: datetime, end: datetime,
sma: list=[200, 100, 50, 5], close_col: str="Close", plot_type: str="line",
weekly: bool=False, priceline: float=None, dateline: datetime=None):
"""
Gather data for the stock between the given dates.
SMA: list of simple moving average days to plot
CLOSE_COL: column name to use for close data. Usually Close or Adj Close
PLOT_TYPE: how to plot the data. line or candlestick
"""
self.stock = stock
self.stock_name = None
self.stock_count = self.confirm_stock_symbol()
if self.stock_count > 1:sys.exit(1)
self.start = start
self.end = end
self.close_col = close_col
self.sma = sma
self.ema = sma + [20]
self.plot_type = plot_type
self.weekly = weekly
self.priceline = priceline
self.dateline = dateline
self.df = self.get_data_frame(self.stock, self.start,
self.end, weekly=self.weekly)
self.set_day_color()
self.simple_moving_average()
self.exponential_moving_average()
def confirm_stock_symbol(self):
count, name = search_stock_symbols(self.stock)
if count == 0:
print(f'Symbol {self.stock} is not traded on the Nasdaq exchange')
elif count == 1:
self.stock_name = name
else:
print(f'Multiple stock symbols found for {self.stock}')
return count
def store_stock(self, stock: str, start: datetime, end: datetime, filename: str):
print(f"Pulling stock data for {stock} from {start} to {end}")
try:
# df = pd.DataFrame(data.DataReader(stock,'yahoo',start,end))
df = pd.DataFrame(data.DataReader(stock,'stooq',start,end))
except KeyError:
print("Stock out of range")
df = pd.DataFrame()
df = df.reset_index()
if os.path.exists(filename):
df_existing = pd.read_csv(filename, parse_dates=['Date'])
df = df_existing.append(df).reset_index(drop=True)
df = df.sort_values('Date')
if df.empty and self.stock_name == 0:
print(f"No data found for {self.stock}")
sys.exit(1)
# sometimes data is returned with two rows for the same date. The last row is the row to keep.
df = df[~df.Date.duplicated(keep='last')]
df_store = df.copy()
market_close = datetime.now().replace(hour=15, minute=5, second=0, microsecond=0)
# market_close = datetime.now().replace(hour=23, minute=59, second=59, microsecond=999999)
if (df.Date.sort_values().iloc[-1].date() == datetime.today().date()) and (datetime.now() < market_close):
# The market has not closed today so do not store today's data in csv.
df_store.drop(df_store.tail(1).index,inplace=True)
df_store.to_csv(filename, index=False)
return df
def get_data_frame(self, stock: str, start: datetime, end: datetime, get_most_recent_data: bool = True, weekly: bool = False):
"""
:stock: text stock ticker
:start: date to start stock data in format "MM-DD-YYYY" or python datetime
:end: date to end stock data in format "MM-DD-YYYY" or python datetime
# :get_most_recent_data: update stored data to have recent close data
"""
if '-usd' in stock.lower():
start_dt = start
end_dt = end
else:
# do not remove days when dealing with a currency like ETH-USD
start_dt = remove_holidays_and_weekends(start, end, move_date_forward=True)
end_dt = remove_holidays_and_weekends(start, end, move_date_forward=False)
filename = f"data/{stock}.csv"
if os.path.exists(filename):
df = pd.read_csv(filename, parse_dates=['Date'])
if start_dt >= df.Date.min() and end_dt <= df.Date.max():
print(f"Using Stored Stock Data for {stock} from {start_dt.date()} to {end_dt.date()}")
if end_dt > df.Date.max():
interim_dt = remove_holidays_and_weekends(df.Date.max() + pd.Timedelta("1d"), end_dt, move_date_forward=True)
if interim_dt <= end_dt and interim_dt > df.Date.max():
df = self.store_stock(stock, interim_dt, end_dt, filename)
if start_dt < df.Date.min():
interim_dt = remove_holidays_and_weekends(start_dt, df.Date.min() - pd.Timedelta("1d"), move_date_forward=False)
if interim_dt >= start_dt and interim_dt < df.Date.min():
df = self.store_stock(stock, start_dt, interim_dt, filename)
else:
df = self.store_stock(stock, start_dt, end_dt, filename)
df.Date = pd.to_datetime(df.Date)
df = df[(df.Date >= start_dt) & (df.Date <= end_dt)].copy()
df.set_index('Date', inplace=True)
if weekly:
logic = {
'High' : 'max',
'Low' : 'min',
'Open' : 'first',
'Close' : 'last',
'Volume': 'sum'}
# df = df.resample('W', loffset="-3D").apply(logic)
df = df.resample("W").apply(logic)
df.index = df.index + to_offset("-3D")
df['TOpen'] = df.Open.shift(-1) # tomorrow's open
# df = df.asfreq('D')
return df
def simple_moving_average(self):
for sma in self.sma:
self.df[f'{sma} mavg'] = self.df[self.close_col].rolling(window=sma, min_periods=sma).mean()
def exponential_moving_average(self):
for ema in self.ema:
self.df[f'{ema} ema'] = self.df[self.close_col].ewm(span=ema, adjust=False).mean()
def set_day_color(self):
self.df['day_color'] = 'red'
self.df.loc[self.df[self.close_col] >= self.df.Open, 'day_color'] = 'green'
self.df.loc[self.df[self.close_col] == self.df.Open, 'day_color'] = 'gray'
def MACD(self, big_ema: int = 26, small_ema: int = 12, signal_span: int = 9):
"""
TREND INDICATOR
Moving Average Convergence/Divergence
"""
# period = 200
# sma = self.df[f'{period} mavg'][:period]
# rest_of_close = self.df[self.close_col][period:]
# series_for_ewm = pd.concat([sma, rest_of_close])
series_for_ewm = self.df[self.close_col]
self.df[f'{big_ema} ema'] = series_for_ewm.ewm(span=big_ema, adjust=False).mean()
self.df[f'{small_ema} ema'] = series_for_ewm.ewm(span=small_ema, adjust=False).mean()
self.df['MACD'] = (self.df[f'{small_ema} ema'] - self.df[f'{big_ema} ema'])
self.df['Signal'] = self.df['MACD'].ewm(span=signal_span, adjust=False).mean()
self.df['Crossover'] = self.df['MACD'] - self.df['Signal']
self.df['YCrossover'] = self.df.Crossover.shift() #yesterday crossover
self.df['MACD_indicator'] = 0
self.df.loc[(self.df.Crossover < 0) & (self.df.YCrossover > 0), 'MACD_indicator'] = 1 # Sell, cross line going negative
self.df.loc[(self.df.Crossover > 0) & (self.df.YCrossover < 0), 'MACD_indicator'] = 2 # Buy, cross line going positive
def Bollinger_Bands(self, n: int = 20, ndev: float = 2.0):
"""
VOLATILITY INDICATOR
Bollinger Bands
"""
self.bb_mavg = f'{n} mavg'
self.df[self.bb_mavg] = self.df[self.close_col].rolling(window=n).mean()
# set .std(ddof=0) for population standard deviation instead of sample deviation
self.df['BBstd'] = self.df[self.close_col].rolling(window=n).std(ddof=0)
self.df['BBUpper'] = self.df[self.bb_mavg] + self.df.BBstd * ndev
self.df['BBLower'] = self.df[self.bb_mavg] - self.df.BBstd * ndev
self.df['BB_indicator'] = 0
self.df.loc[(self.df[self.close_col] < self.df['BBLower']), 'BB_indicator'] = 1 # close was below band
self.df.loc[(self.df[self.close_col] > self.df['BBUpper']), 'BB_indicator'] = 2 # close was above band
def Stochastic(self, n: int=14, d_n: int=3, over: float=80, under: float=20):
"""
MOMENTUM INDICATOR
Stochastic Oscillator Indicator
STOL = Stochastic Low
STOH = Stochastic High
STOK = Stochastic %K
STOD = Stochastic %D
n = number of days to consider
d_n = rolling average for D line
over = above this percent value is over bought territory
under = below this percent value is over sold territory
"""
self.sto_over = over
self.sto_under = under
self.df['STOL'] = self.df['Low'].rolling(window=n).min()
self.df['STOH'] = self.df['High'].rolling(window=n).max()
self.df['STOK'] = 100 * ( (self.df[self.close_col] - self.df.STOL) / (self.df.STOH - self.df.STOL) ) # fast
self.df['STOD'] = self.df['STOK'].rolling(window=d_n).mean() # slow
self.df['STO_indicator'] = 0
self.df.loc[(self.df.STOK < self.df.STOD) &
(self.df.STOK.shift(1) > self.df.STOD.shift(1)) &
(self.df.STOD > over), 'STO_indicator'] = 1 # Sell, fast crosses below slow in the high range
self.df.loc[(self.df.STOK > self.df.STOD) &
(self.df.STOK.shift(1) < self.df.STOD.shift(1)) &
(self.df.STOD < under), 'STO_indicator'] = 2 # Buy, fast crosses up over slow in the low range
# @nb.jit(fastmath=True, nopython=True)
def RSI(self, n: int = 14, over: float=70, under: float=30):
"""
MOMENTUM INDICATOR
Relative Strength Index
over = above this percent line is over bought territory
under = below this percent line is over sold territory
"""
self.rsi_over = over
self.rsi_under = under
self.df['RSIchange'] = self.df[self.close_col].diff(1)
self.df['RSIgain'] = 0
self.df['RSIloss'] = 0
self.df.loc[self.df.RSIchange > 0, 'RSIgain'] = self.df.RSIchange
self.df.loc[self.df.RSIchange < 0, 'RSIloss'] = -self.df.RSIchange
self.df['AvgGain'] = self.df.RSIgain.ewm(com=n - 1, min_periods=n, adjust=False).mean()
self.df['AvgLoss'] = self.df.RSIloss.ewm(com=n - 1, min_periods=n, adjust=False).mean()
self.df['RSI'] = 100 - (100 / (1 + abs(self.df.AvgGain / self.df.AvgLoss)))
self.df['RSI_indicator'] = 0
self.df.loc[self.df.RSI > over, 'RSI_indicator'] = 1 # Sell, in overbought range
self.df.loc[self.df.RSI < under, 'RSI_indicator'] = 2 # Buy, in oversold range
def CMF(self, n: int = 20, buffer: float=0.05):
"""
VOLUME INDICATOR
Chaikin Money Flow Indicator
Money Flow Multiplier = ((Close value – Low value) – (High value – Close value)) / (High value – Low value)
Money Flow Volume = Money Flow Multiplier x Volume for the Period
CMF = n-day Average of the Daily Money Flow Volume / n-day Average of the Volume
buffer = above this buffer is bullish buy territory
below this negative buffer is bearish sell territory
"""
self.df["MFV"] = ((self.df[self.close_col] - self.df.Low) -
(self.df.High - self.df[self.close_col])) / (self.df.High - self.df.Low) * self.df.Volume
self.df["CMF"] = self.df.MFV.rolling(window=n, min_periods=n).mean() / self.df.Volume.rolling(window=n, min_periods=n).mean()
self.df["CMF_indicator"] = 0
self.df.loc[self.df.CMF < -buffer, "CMF_indicator"] = 1 # Sell, crossed into negative territory
self.df.loc[self.df.CMF > buffer, "CMF_indicator"] = 2 # Buy, crossed into positive territory
self.cmf_buffer = buffer
def HA(self, ema: int=12):
"""
Heikin-Ashi Smoothed Buy Sell
"""
self.df["HA_Close"] = (self.df[self.close_col] +
self.df.Open + self.df.Low + self.df.High) / 4
self.df["HA_Open"] = (self.df[self.close_col].shift() +
self.df.Open.shift()) / 2
self.df.iloc[0, self.df.columns.get_loc("HA_Open")
] = (self.df.iloc[0]['Open'] +
self.df.iloc[0][self.close_col]
) / 2 # set value for the first HA_Open row
self.df["HA_Low"] = self.df[["Low", "HA_Open", "HA_Close"]].min(axis=1)
self.df["HA_High"] = self.df[["High", "HA_Open", "HA_Close"]].max(axis=1)
self.df['HA_day_color'] = 'red'
self.df.loc[self.df["HA_Close"] >= self.df["HA_Open"], 'HA_day_color'] = 'green'
self.df.loc[self.df["HA_Close"] == self.df["HA_Open"], 'HA_day_color'] = 'gray'
self.df[f'HA {ema} ema'] = self.df["HA_Close"].ewm(span=ema, adjust=False).mean()
def VolumeByPrice(self, start: datetime, end: datetime):
pass
def plot_data_mpf(self):
mc = mpf.make_marketcolors(up='g', down='r', edge='inherit', wick='inherit', volume='inherit', ohlc='inherit', alpha=0.5) #style="charles"
s = mpf.make_mpf_style(base_mpl_style='seaborn', marketcolors=mc)
mpf.plot(self.df, type='candlestick', mav=(1, 200), volume=True, style=s, figratio=(16,8), figscale=1, title=self.stock)
def candlestick_plot(self, open: str="Open",
close: str=None, low: str="Low", high: str="High",
day_color: str="day_color", ax=None,
positive_color: str='g', negative_color: str='r'):
if close is None:
close = self.close_col
width_bar = 0.8
width_stick = 0.15
self.df['bar_top'] = self.df[open]
self.df.loc[self.df[close] >= self.df[open], 'bar_top'] = self.df[close]
self.df['bar_bot'] = self.df[open]
self.df.loc[self.df[close] < self.df[open], 'bar_bot'] = self.df[close]
ax.bar(x=self.df.index,
height=self.df.bar_top - self.df.bar_bot,
width=width_bar,
bottom=self.df.bar_bot,
color=self.df[day_color],
edgecolor=self.df[day_color],
alpha=0.5)
ax.bar(x=self.df.index, height=self.df[high] - self.df.bar_top, width=width_stick, bottom=self.df.bar_top, color=self.df[day_color], edgecolor=self.df[day_color], alpha=0.5)
ax.bar(x=self.df.index, height=self.df[low] - self.df.bar_bot, width=width_stick, bottom=self.df.bar_bot, color=self.df[day_color], edgecolor=self.df[day_color], alpha=0.5)
return ax
def plot_data(self, show_plot: bool = False, save_plot: bool = True, zoom: int = 0):
# make Date a column in the DataFrame
self.df.reset_index(inplace=True)
# number of charts to create is determined by finding columns in the dataframe
extra_charts = len(set(["MACD", "STOK", "CMF", "RSI"]).intersection(set(self.df.columns)))
fig, axs = plt.subplots(2 + extra_charts, 1, sharex=True, gridspec_kw={'hspace': 0, 'height_ratios': [7, 1] + [2] * extra_charts})
axs_count = 0
text_offset = 1
text_lower = 0.05 * self.df[self.close_col].iloc[-1]
self.df.plot(y=self.close_col, ax=axs[0])
# axs[0] = self.candlestick_plot(ax=axs[0],
# open="HA_Open",
# close="HA_Close",
# low="HA_Low",
# high="HA_High",
# day_color="HA_day_color")
# self.df.plot(y=f'HA 12 ema', ax=axs[0], linestyle='-')
if self.plot_type == 'candlestick':
axs[0] = self.candlestick_plot(ax=axs[0])
else:
self.df.plot(y=self.close_col, ax=axs[0])
for sma in self.sma:
if self.df[f'{sma} mavg'].count() > 0:
self.df.plot(y=f'{sma} mavg', ax=axs[0], linestyle='--')
for ema in self.ema:
if self.df[f'{ema} ema'].count() > 0:
self.df.plot(y=f'{ema} ema', ax=axs[0], linestyle=':')
if self.priceline:
axs[0].axhline(y=self.priceline, color='k', linestyle='-', alpha=0.5)
if self.dateline and self.dateline >= self.df.Date.min() and self.dateline <= self.df.Date.max():
date_index = self.df.index[self.df.Date >= self.dateline].tolist()[0]
axs[0].axvline(x=date_index, color='k', linestyle='-', alpha=0.5)
annotate_date_text = axs[0].annotate(self.dateline.strftime("%b/%d/%Y"),
xy=(date_index - 2, self.df.Close.min() * 1.05),
xytext=(date_index -2, self.df.Close.min() * 1.05),
bbox=dict(boxstyle="round", fc="0.7")
)
annotate_date_text.set_alpha(.5)
# Plot the volume chart below the price chart
axs_count += 1
axs[axs_count].bar(x=self.df.index, height=self.df.Volume, width=1, color=self.df.day_color)
# self.df.plot(y='Volume', kind='bar', ax=axs[axs_count])
# axs0 = axs[0].twinx()
if "BBstd" in self.df.columns:
axs[0].fill_between(self.df.index, self.df.BBUpper, self.df.BBLower, color='gray', alpha=0.3)
self.df.plot(y=self.bb_mavg, ax=axs[0], linestyle="--")
if "MACD" in self.df.columns:
self.df.index.name = 'orig_index'
self.df.reset_index(inplace=True)
bull_bear_period_transparency = 0.2
bullish_momentum_periods = self.df[(self.df.MACD > 0) & (self.df.MACD > self.df.Signal)]['orig_index']
for row, dfg in bullish_momentum_periods.groupby((bullish_momentum_periods.diff() != 1).cumsum()):
axs[0].axvspan(dfg.index.min(), dfg.index.max(), color='green', alpha=bull_bear_period_transparency)
# bearish_momentum_periods = self.df[(self.df.MACD < 0) & (self.df.MACD < self.df.Signal)]['Date']
# for row, dfg in bearish_momentum_periods.groupby((bearish_momentum_periods.diff() != pd.Timedelta('1d')).cumsum()):
# axs[0].axvspan(dfg.index.min(), dfg.index.max(), color='red', alpha=bull_bear_period_transparency)
bearish_momentum_periods = self.df[(self.df.MACD < 0) & (self.df.MACD < self.df.Signal)]['orig_index']
for row, dfg in bearish_momentum_periods.groupby((bearish_momentum_periods.diff() != 1).cumsum()):
axs[0].axvspan(dfg.index.min(), dfg.index.max(), color='red', alpha=bull_bear_period_transparency)
axs_count += 1
# MACD buy sell indicators
for index, row in self.df[self.df.MACD_indicator == 2].iterrows():
axs[axs_count].text(index, row.MACD, 'B', color='g')
for index, row in self.df[self.df.MACD_indicator == 1].iterrows():
axs[axs_count].text(index, row.MACD, 'S', color='r')
# MACD bars
self.df["MACD Crossover diff"] = self.df.Crossover.diff(1)
self.df["MACD bar color"] = 'r'
self.df.loc[self.df["MACD Crossover diff"] > 0, "MACD bar color"] = 'g'
axs[axs_count].bar(self.df.index, self.df.Crossover, width=1, color=self.df["MACD bar color"])
axs[axs_count].axhline(y=0, color='gray', linestyle='-.')
self.df.plot(y=['MACD', 'Signal', 'Crossover'], ax=axs[axs_count])
axs[axs_count].legend(loc='center left')
if 'STOK' in self.df.columns:
axs_count += 1
axs[axs_count].axhline(y=self.sto_over, color='k', linestyle=':')
axs[axs_count].axhline(y=self.sto_under, color='k', linestyle=':')
axs[axs_count].fill_between(self.df.index, self.sto_over,
self.df.STOK, where=self.df.STOK > self.sto_over,
interpolate=True, color='red', alpha=0.5)
axs[axs_count].fill_between(self.df.index, self.sto_under,
self.df.STOK, where=self.df.STOK < self.sto_under,
interpolate=True, color='green', alpha=0.5)
self.df.plot(y=['STOK', 'STOD'], ax=axs[axs_count])
axs[axs_count].legend(loc='center left')
for index, row in self.df[self.df.STO_indicator == 2].iterrows():
axs[axs_count].text(index, row.STOK, 'B', color='g')
for index, row in self.df[self.df.STO_indicator == 1].iterrows():
axs[axs_count].text(index, row.STOK, 'S', color='r')
if 'CMF' in self.df.columns:
axs_count += 1
axs[axs_count].axhline(y=0, color='gray', linestyle='-.')
axs[axs_count].fill_between(self.df.index, self.cmf_buffer,
self.df.CMF, where=self.df.CMF > self.cmf_buffer,
interpolate=True, color='green', alpha=0.5)
axs[axs_count].fill_between(self.df.index, -self.cmf_buffer,
self.df.CMF, where=self.df.CMF < -self.cmf_buffer,
interpolate=True, color='red', alpha=0.5)
self.df.plot(y='CMF', ax=axs[axs_count])
axs[axs_count].legend(loc='center left')
if 'RSI' in self.df.columns:
axs_count += 1
axs[axs_count].axhline(y=80, color='k', linestyle=':', alpha=0.5)
axs[axs_count].axhline(y=self.rsi_over, color='k', linestyle=':')
axs[axs_count].axhline(y=50, color='gray', linestyle='-.')
axs[axs_count].axhline(y=self.rsi_under, color='k', linestyle=':')
axs[axs_count].axhline(y=20, color='k', linestyle=':', alpha=0.5)
axs[axs_count].fill_between(self.df.index, self.rsi_over,
self.df.RSI, where=self.df.RSI > self.rsi_over,
interpolate=True, color='red', alpha=0.7)
axs[axs_count].fill_between(self.df.index, self.rsi_under,
self.df.RSI, where=self.df.RSI < self.rsi_under,
interpolate=True, color='green', alpha=0.7)
self.df.plot(y='RSI', ax=axs[axs_count])
axs[axs_count].legend(loc='center left')
formatter = WeekdayDateFormatter(self.df.Date)
for ax in axs:
# turn on tick marks for the right side of the graph
ax.tick_params(labelright=True, right=True)
ax.yaxis.set_minor_locator(AutoMinorLocator(4))
ax.yaxis.set_tick_params(which='minor', right='on')
# Turn on the minor TICKS, which are required for the minor GRID
ax.minorticks_on()
# Customize the major grid
ax.grid(which='major', linestyle='-', linewidth='1')
# Customize the minor grid
ax.grid(which='minor', linestyle=':')
ax.xaxis.set_major_formatter(formatter)
fig.autofmt_xdate()
plt.xticks(rotation=5)
title = f'Stock {self.stock}'
if self.stock_name is not None:
title += f' - {self.stock_name}'
fig.suptitle(title)
fig.tight_layout()
figManager = plt.get_current_fig_manager()
figManager.window.state('zoomed') # maximize the window
# zoom to the last number of days indicated by zoom integer parameter
if zoom > 0 and self.df.index[-1] > zoom:
start_idx = zoom
plt.xlim(self.df.index[-start_idx], self.df.index[-1]+2)
if save_plot:
min_date = self.df.Date.min().strftime("%Y%m%d")
max_date = self.df.Date.max().strftime("%Y%m%d")
plt.savefig(f'img/{self.stock} - {min_date} - {max_date}.png')
if show_plot:
plt.show()
plt.close()
def make_figure(self):
self.fig, self.axs = plt.subplots(2, 1, sharex=True, gridspec_kw={'hspace': 0, 'height_ratios': [5, 2]})
def plot(self, ax):
self.df.reset_index(inplace=True)
def compare_stocks(args):
"""
compare all stocks in the list.
each stock shows percent increase and decrease by day compared to the first
closing day of the chosen range.
"""
fig, axs = plt.subplots(2, 1, sharex=True, gridspec_kw={'hspace': 0, 'height_ratios': [5, 2]})
objs = []
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env runaiida
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import shutil
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from aiida.orm import load_node
from aiida_yambo.utils.common_helpers import *
from aiida_yambo.parsers.utils import *
from aiida.orm.nodes.process.workflow.workchain import WorkChainNode
def take_fermi(calc_node_pk): # calc_node_pk = node_conv_wfl.outputs.last_calculation
node = load_node(calc_node_pk)
path_folder = node.outputs.retrieved._repository._repo_folder.abspath+'/path/'
for i in os.listdir(path_folder):
if 'r-aiida.out' in i:
file = open(path_folder+i,'r')
for line in file:
if '[X]Fermi Level' in line:
print('The Fermi level is {}'.format(line.split()[3]))
ef = float(line.split()[3])
if '[X] Fermi Level' in line:
print('The Fermi level is {}'.format(line.split()[4]))
ef = float(line.split()[4])
return ef
def collect_all_params(story, param_list=['BndsRnXp','GbndRnge','NGsBlkXp']):
if isinstance(story,WorkChainNode):
y = story.outputs.story.get_dict()
df= pd.DataFrame(y)
elif isinstance(story,int):
x = load_node(story)
y = x.outputs.story.get_dict()
df= pd.DataFrame(y)
elif isinstance(story,dict):
df= pd.DataFrame(story)
elif 'DataFrame' in str(type(story)):
df=story
else:
raise TypeError('You have to provide: node, node_pk, output_dict or dataframe')
list_for_df=[]
for calc in df['calc_pk']:
node = load_node(int(calc))
node_pw = find_pw_parent(node, calc_type=['nscf','scf'])
mesh = node_pw.inputs.kpoints.get_kpoints_mesh()[0]
distance_mesh = get_distance_from_kmesh(node_pw)
list_for_df.append([node.inputs.parameters.get_dict()[j] for j in param_list]+\
[mesh]+[distance_mesh]+df[df['calc_pk']==calc]['result_eV'].values.tolist()\
+[df[df['calc_pk']==calc]['useful'].values])
df_c=pd.DataFrame(list_for_df,columns=param_list+['mesh','distance_mesh']+['result_eV','useful'])
return df_c
def collect_2D_results(story=None, last_c=None, ef = 0): #returns array (val_1,val_2....,result_eV_1,...) and pandas DF to be further analyzed
if isinstance(story,WorkChainNode):
y = story.outputs.story.get_dict()
story=pd.DataFrame(y)
elif isinstance(story,int):
x = load_node(story)
y = x.outputs.story.get_dict()
story=pd.DataFrame(y)
elif isinstance(story,dict):
story= | pd.DataFrame(story) | pandas.DataFrame |
#
# Data for analyzing causality.
# By <NAME>
#
# Classes:
# ccm
# embed
#
# Paper:
# Detecting Causality in Complex Ecosystems
# Ge<NAME> et al. 2012
#
# Thanks to <NAME> and <NAME>
#
# Notes:
# Originally I thought this can be made way faster by only calculting the
# distances once and then chopping it to a specific library length. It turns out
# that calculating the distances is cheaper than filtering the indices.
#
import numpy as np
from sklearn import neighbors
from sklearn import metrics
import skccm.utilities as ut
import pandas as pd
import time
class CCM:
"""
Convergent cross mapping for two embedded time series
"""
def __init__(self, weights='exp', score_metric='corrcoef', verbose=False):
"""
Parameters
----------
weights : weighting scheme for predictions
- exp : exponential weighting
score : how to score the predictions
-'score'
-'corrcoef'
verbose : prints out calculation status
"""
self.weights = weights
self.score_metric = score_metric
self.verbose = verbose
def fit(self,X1,X2):
"""
Fit the training data for ccm. Creates seperate near neighbor regressors
for X1 and X2 independently.
X1 : embedded time series of shape (num_samps,embed_dim)
X2 : embedded time series of shape (num_samps,embed_dim)
near_neighs : string
- 'sorround' : this is what the paper uses
- 'all' : calculate the distance to all near neighbors
"""
# Save X1_train and X2_train for prediction later. Confusing,
# but we need to make predictions about our testing set using these.
self.X1 = X1
self.X2 = X2
#to sorround a point, there must be ndim + 1 points
# we add two here because the closest neighbor is itself. so that is
# going to be dropped.
near_neighs = X1.shape[1] + 2
self.knn1 = neighbors.KNeighborsRegressor(near_neighs)
self.knn2 = neighbors.KNeighborsRegressor(near_neighs)
def predict_no_drop(self,lib_lengths):
"""
Make a prediction
Parameters
----------
X1_test : test set
X2_test : test set
lib_lengths : list of library lengths to test
"""
X1_pred = []
X2_pred = []
for liblen in lib_lengths:
x1_p = np.empty(self.X1.shape)
x2_p = np.empty(self.X2.shape)
#keep only the indices that are less than library length
self.knn1.fit(self.X1[:liblen], self.X1[:liblen])
self.knn2.fit(self.X2[:liblen], self.X2[:liblen])
dist1,ind1 = self.knn1.kneighbors(self.X1)
dist2,ind2 = self.knn2.kneighbors(self.X2)
#drop indices and distances to themselves
dist1 = dist1[:,1:]
dist2 = dist2[:,1:]
ind1 = ind1[:,1:]
ind2 = ind2[:,1:]
for j in range(self.X1.shape[1]):
W1 = ut.exp_weight(dist1)
W2 = ut.exp_weight(dist2)
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1[ind2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2[ind1, j] * W1, axis=1)
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
return X1_pred, X2_pred
def predict_drop_in_list(self,lib_lengths,emb_ind1,emb_ind2):
"""
Make a prediction, but the same indices cant be matched with each other.
Parameters
----------
lib_lengths : library lengths to Test
e_ind1 : indices of the first embed time series.
e_ind2 : indices of the second embed time series.
"""
X1_pred = []
X2_pred = []
#need to reset the class ot use all neighbors so that the appropriate
# neighbors can be dropped for each class
self.knn1 = neighbors.KNeighborsRegressor(len(self.X1))
self.knn2 = neighbors.KNeighborsRegressor(len(self.X2))
self.knn1.fit(self.X1, self.X1)
self.knn2.fit(self.X2, self.X2)
dist1,ind1 = self.knn1.kneighbors(self.X1)
dist2,ind2 = self.knn2.kneighbors(self.X2)
#find the conflicting indices
conf1 = ut.conflicting_indices(emb_ind1)
conf2 = ut.conflicting_indices(emb_ind2)
#throw out the indices that are in the embedding
dist1, ind1 = ut.throw_out_nn_indices(dist1,ind1,conf1)
dist2, ind2 = ut.throw_out_nn_indices(dist2,ind2,conf2)
n_sorround = self.X1.shape[1] + 1
#flipping allows for a faster implentation as we can feed
# ut.in_libary_len smaller and smaller arrays
for liblen in lib_lengths:
#keep only the indices that are less than library length
#t0 = time.time()
i_1, d_1 = ut.in_library_len_keep(ind1, dist1, liblen,n_sorround)
i_2, d_2 = ut.in_library_len_keep(ind2, dist2, liblen,n_sorround)
#t1 = time.time()
#t0 = time.time()
W1 = ut.exp_weight(d_1)
W2 = ut.exp_weight(d_2)
x1_p = np.empty(self.X1.shape)
x2_p = np.empty(self.X2.shape)
for j in range(self.X1.shape[1]):
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1[i_2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2[i_1, j] * W1, axis=1)
#t1 = time.time()
#print('second_loop:',np.around(t1-t0,4))
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
if self.verbose: print("predictions made")
return X1_pred, X2_pred
def score(self,how='corrcoef'):
"""
Evalulate the predictions. Calculates the skill down each column
and averages them together to get the total skill.
how : how to score the predictions
-'score'
-'corrcoef'
"""
num_preds = self.X1.shape[1]
score_1 = []
score_2 = []
for x1_p, x2_p in zip(self.X1_pred, self.X2_pred):
sc1 = np.empty(num_preds)
sc2 = np.empty(num_preds)
for ii in range(num_preds):
p1 = x1_p[:,ii]
p2 = x2_p[:,ii]
if self.score_metric == 'score':
sc1[ii] = ut.score(p1,self.X1[:,ii])
sc2[ii] = ut.score(p2,self.X2[:,ii])
if self.score_metric == 'corrcoef':
sc1[ii] = ut.corrcoef(p1,self.X1[:,ii])
sc2[ii] = ut.corrcoef(p2,self.X2[:,ii])
score_1.append( np.mean(sc1) )
score_2.append( np.mean(sc2) )
return score_1, score_2
class Embed:
def __init__(self,X):
"""
Parameters
----------
X : series or dataframe,
"""
if type(X) is pd.pandas.core.frame.DataFrame:
self.df = X
else:
self.X = X
def df_mutual_information(self,max_lag):
"""
Calculates the mutual information along each row of a time series.
Ensure that the time series is continuous in time and sampled regularly.
You can resample it hourly, daily, minutely etc. if needed.
Parameters
----------
max_lag : int
maximum amount to shift the time series
Returns
-------
mi : dataframe, shape(max_lag,num_cols)
columns are the columns of the original dataframe with rows being
the mutual information
"""
cols = self.df.columns
mi = np.empty((max_lag, len(cols)))
for i,col in enumerate(cols):
self.X = self.df[col].values
mi[:,i] = self.mutual_information(max_lag)
mi = | pd.DataFrame(mi,columns=cols) | pandas.DataFrame |
import datetime as dt
import os
import pickle
from typing import Dict, List
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import activations
from dl_portfolio.logger import LOGGER
from dl_portfolio.data import get_features
from dl_portfolio.pca_ae import build_model
from dl_portfolio.regularizers import WeightsOrthogonality
from dl_portfolio.regressors.nonnegative_linear.ridge import NonnegativeRidge
from dl_portfolio.regressors.nonnegative_linear.base import NonnegativeLinear
from dl_portfolio.constant import BASE_FACTOR_ORDER_DATASET2, BASE_FACTOR_ORDER_DATASET1
from sklearn.linear_model import LinearRegression, Lasso
LOG_BASE_DIR = './dl_portfolio/log'
def build_linear_model(ae_config, reg_type: str, **kwargs):
if reg_type == 'nn_ridge':
if ae_config.l_name == 'l2':
alpha = kwargs.get('alpha', ae_config.l)
kwargs['alpha'] = alpha
else:
alpha = kwargs.get('alpha')
assert alpha is not None
model = NonnegativeRidge(**kwargs)
elif reg_type == 'nn_ls_custom':
model = NonnegativeLinear()
elif reg_type == 'nn_ls':
model = LinearRegression(positive=True, fit_intercept=False, **kwargs)
elif reg_type == 'nn_lasso':
if ae_config.l_name == 'l1':
alpha = kwargs.get('alpha', ae_config.l)
kwargs['alpha'] = alpha
else:
alpha = kwargs.get('alpha')
assert alpha is not None
model = Lasso(positive=True, fit_intercept=False, **kwargs)
else:
raise NotImplementedError(reg_type)
return model
def fit_nnls_one_cv(cv: int, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str,
ae_config, reg_type: str = 'nn_ridge', **kwargs):
model, scaler, dates, test_data, test_features, prediction, embedding, decoding = load_result(ae_config,
test_set,
data,
assets,
base_dir,
cv)
prediction -= scaler['attributes']['mean_']
prediction /= np.sqrt(scaler['attributes']['var_'])
mse_or = np.mean((test_data - prediction) ** 2, 0)
relu_activation_layer = tf.keras.Model(inputs=model.input, outputs=model.get_layer('encoder').output)
relu_activation = relu_activation_layer.predict(test_data)
relu_activation = pd.DataFrame(relu_activation, index=prediction.index)
# Fit linear encoder to the factors
# input_dim = model.layers[0].input_shape[0][-1]
# encoding_dim = model.layers[1].output_shape[-1]
# vlin_encoder = create_linear_encoder_with_constraint(input_dim, encoding_dim)
# lin_encoder.fit(test_data_i, relu_activation_i, batch_size = 1, epochs=500, verbose=2,
# max_queue_size=20, workers=2*os.cpu_count()-1, use_multiprocessing=True)
# factors_nnls_i = lin_encoder.predict(test_data_i)
# lin_embedding = pd.DataFrame(encoder.layers[1].weights[0].numpy(), index=embed.index)
# # Fit non-negative linear least square to the factor
reg_nnls = build_linear_model(ae_config, reg_type, **kwargs)
x = test_data.copy()
mean_ = np.mean(x, 0)
# Center the data as we do not fit intercept
x = x - mean_
reg_nnls.fit(x, relu_activation)
# Now compute intercept: it is just the mean of the dependent variable
intercept_ = np.mean(relu_activation).values
factors_nnls = reg_nnls.predict(x) + intercept_
factors_nnls = pd.DataFrame(factors_nnls, index=prediction.index)
# Get reconstruction error based on nnls embedding
if ae_config.model_type == "pca_ae_model":
# For PCA AE model encoder and decoder share weights
weights = reg_nnls.coef_.copy()
# Compute bias (reconstruction intercept)
bias = mean_ - np.dot(np.mean(factors_nnls, 0), weights)
elif ae_config.model_type == "ae_model":
weights = model.get_layer('decoder').get_weights()[0]
bias = model.get_layer('decoder').get_weights()[1]
else:
raise NotImplementedError(ae_config.model_type)
# Reconstruction
pred_nnls_model = np.dot(factors_nnls, weights) + bias
mse_nnls_model = np.mean((test_data - pred_nnls_model) ** 2, 0)
# pred_nnls_factors = pd.concat([pred_nnls_factors, pd.DataFrame(pred_nnls_factors_i,
# columns=pred.columns,
# index=pred.index)])
pred_nnls_model = pd.DataFrame(pred_nnls_model, columns=prediction.columns, index=prediction.index)
test_data = pd.DataFrame(test_data, columns=prediction.columns, index=prediction.index)
reg_coef = pd.DataFrame(weights.T, index=embedding.index)
return test_data, embedding, decoding, reg_coef, relu_activation, factors_nnls, prediction, pred_nnls_model, mse_or, mse_nnls_model
def get_nnls_analysis(test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str, ae_config,
reg_type: str = 'nn_ridge', **kwargs):
"""
:param test_set:
:param data:
:param assets:
:param base_dir:
:param ae_config:
:param reg_type: regression type to fit "nn_ridge" for non negative Ridge or "nn_ls" for non negative LS
:return:
"""
test_data = pd.DataFrame()
prediction = pd.DataFrame()
# pred_nnls_factors = pd.DataFrame()
pred_nnls_model = pd.DataFrame()
factors_nnls = pd.DataFrame()
relu_activation = pd.DataFrame()
embedding = {}
decoding = {}
reg_coef = {}
mse = {
'original': [],
'nnls_factors': [],
'nnls_model': []
}
# cv = 0
for cv in ae_config.data_specs:
LOGGER.info(f'CV: {cv}')
test_data_i, embedding_i, decoding_i, reg_coef_i, relu_activation_i, factors_nnls_i, pred, pred_nnls_model_i, mse_or, mse_nnls_model = fit_nnls_one_cv(
cv,
test_set,
data,
assets,
base_dir,
ae_config,
reg_type=reg_type,
**kwargs)
embedding[cv] = embedding_i
decoding[cv] = decoding_i
reg_coef[cv] = reg_coef_i
relu_activation = pd.concat([relu_activation, relu_activation_i])
factors_nnls = pd.concat([factors_nnls, factors_nnls_i])
prediction = pd.concat([prediction, pred])
pred_nnls_model = pd.concat([pred_nnls_model, pred_nnls_model_i])
test_data = pd.concat([test_data, test_data_i])
mse['original'].append(mse_or)
mse['nnls_model'].append(mse_nnls_model)
results = {
'test_data': test_data,
'prediction': prediction,
# 'pred_nnls_factors': pred_nnls_factors,
'pred_nnls_model': pred_nnls_model,
'factors_nnls': factors_nnls,
'relu_activation': relu_activation,
'mse': mse,
'embedding': embedding,
'decoding': decoding,
'reg_coef': reg_coef
}
return results
def reorder_columns(data, new_order):
return data.iloc[:, new_order]
def load_result_wrapper(config, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str,
reorder_features: bool = True, first_cv=None):
test_data = pd.DataFrame()
prediction = pd.DataFrame()
features = pd.DataFrame()
relu_activation = pd.DataFrame()
residuals = pd.DataFrame()
embedding = {}
decoding = {}
cvs = list(config.data_specs.keys())
if first_cv:
cvs = [cv for cv in cvs if cv >= first_cv]
for cv in cvs:
embedding[cv] = {}
model, scaler, dates, t_data, f, pred, embed, decod, relu_act = load_result(config,
test_set,
data,
assets,
base_dir,
cv,
reorder_features)
t_data = pd.DataFrame(t_data, columns=pred.columns, index=pred.index)
t_data *= scaler["attributes"]["scale_"]
t_data += scaler["attributes"]["mean_"]
test_data = pd.concat([test_data, t_data])
prediction = pd.concat([prediction, pred])
features = pd.concat([features, f])
if relu_act is not None:
relu_activation = pd.concat([relu_activation, relu_act])
residuals = pd.concat([residuals, t_data - pred])
embedding[cv] = embed
decoding[cv] = decod
return test_data, prediction, features, residuals, embedding, decoding, relu_activation
def get_linear_encoder(config, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str, cv: str,
reorder_features=True):
"""
:param model_type: 'ae' or 'nmf'
:param test_set:
:param data:
:param assets:
:param base_dir:
:param cv:
:param ae_config:
:return:
"""
model_type = config.model_type
assert model_type in ["pca_ae_model", "ae_model", "convex_nmf", "semi_nmf"]
assert test_set in ["train", "val", "test"]
scaler = pickle.load(open(f'{base_dir}/{cv}/scaler.p', 'rb'))
input_dim = len(assets)
model, encoder, extra_features = build_model(config.model_type,
input_dim,
config.encoding_dim,
n_features=None,
extra_features_dim=1,
activation=config.activation,
batch_normalization=config.batch_normalization,
kernel_initializer=config.kernel_initializer,
kernel_constraint=config.kernel_constraint,
kernel_regularizer=config.kernel_regularizer,
activity_regularizer=config.activity_regularizer,
loss=config.loss,
uncorrelated_features=config.uncorrelated_features,
weightage=config.weightage)
model.load_weights(f'{base_dir}/{cv}/model.h5')
layer_name = list(filter(lambda x: 'uncorrelated_features_layer' in x, [l.name for l in model.layers]))[0]
encoder = tf.keras.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
dense_layer = tf.keras.Model(inputs=model.input, outputs=model.get_layer('encoder').output)
dense_layer.layers[-1].activation = activations.linear
assert dense_layer.layers[-1].activation == activations.linear
assert encoder.layers[1].activation == activations.linear
data_spec = config.data_specs[cv]
if test_set == 'test':
_, _, test_data, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
elif test_set == 'val':
_, test_data, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
elif test_set == 'train':
# For first cv: predict on train data then for the others used previous validation data for prediction
test_data, _, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
else:
raise NotImplementedError(test_set)
# Prediction
test_features = encoder.predict(test_data)
lin_activation = dense_layer.predict(test_data)
index = dates[test_set]
test_features = pd.DataFrame(test_features, index=index)
lin_activation = pd.DataFrame(lin_activation, index=index)
if reorder_features:
embedding = pd.read_pickle(f'{base_dir}/{cv}/encoder_weights.p')
if config.dataset == "dataset1":
base_order = BASE_FACTOR_ORDER_DATASET1
elif config.dataset == "dataset2":
base_order = BASE_FACTOR_ORDER_DATASET2
else:
raise NotImplementedError()
new_order = [embedding.loc[c].idxmax() for c in base_order]
test_features = reorder_columns(test_features, new_order)
test_features.columns = base_order
lin_activation = reorder_columns(lin_activation, new_order)
lin_activation.columns = base_order
return model, test_features, lin_activation
def load_result(config, test_set: str, data: pd.DataFrame, assets: List[str], base_dir: str, cv: str,
reorder_features=True):
"""
:param model_type: 'ae' or 'nmf'
:param test_set:
:param data:
:param assets:
:param base_dir:
:param cv:
:param ae_config:
:return:
"""
model_type = config.model_type
assert model_type in ["pca_ae_model", "ae_model", "convex_nmf", "semi_nmf"]
assert test_set in ["train", "val", "test"]
scaler = pickle.load(open(f'{base_dir}/{cv}/scaler.p', 'rb'))
input_dim = len(assets)
if "ae" in model_type:
embedding = pd.read_pickle(f'{base_dir}/{cv}/encoder_weights.p')
if model_type == "pca_ae_model":
decoding = embedding.copy()
elif model_type == "ae_model":
decoding = pd.read_pickle(f'{base_dir}/{cv}/decoder_weights.p')
else:
pass
model, encoder, extra_features = build_model(config.model_type,
input_dim,
config.encoding_dim,
n_features=None,
extra_features_dim=1,
activation=config.activation,
batch_normalization=config.batch_normalization,
kernel_initializer=config.kernel_initializer,
kernel_constraint=config.kernel_constraint,
kernel_regularizer=config.kernel_regularizer,
activity_regularizer=config.activity_regularizer,
loss=config.loss,
uncorrelated_features=config.uncorrelated_features,
weightage=config.weightage)
model.load_weights(f'{base_dir}/{cv}/model.h5')
layer_name = list(filter(lambda x: 'uncorrelated_features_layer' in x, [l.name for l in model.layers]))[0]
encoder = tf.keras.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
elif model_type == "convex_nmf":
model = pickle.load(open(f'{base_dir}/{cv}/model.p', "rb"))
embedding = model.encoding.copy()
embedding = pd.DataFrame(embedding, index=assets)
decoding = model.components.copy()
decoding = pd.DataFrame(decoding, index=assets)
elif model_type == "semi_nmf":
model = pickle.load(open(f'{base_dir}/{cv}/model.p', "rb"))
decoding = model.components.copy()
decoding = pd.DataFrame(decoding, index=assets)
embedding = decoding.copy()
else:
raise NotImplementedError(model_type)
data_spec = config.data_specs[cv]
if test_set == 'test':
_, _, test_data, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
elif test_set == 'val':
_, test_data, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
elif test_set == 'train':
# For first cv: predict on train data then for the others used previous validation data for prediction
if cv == 0:
test_data, _, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
else:
data_spec = config.data_specs[cv - 1]
_, test_data, _, _, dates, _ = get_features(data,
data_spec['start'],
data_spec['end'],
assets,
val_start=data_spec['val_start'],
test_start=data_spec.get('test_start'),
scaler=scaler)
else:
raise NotImplementedError(test_set)
# Prediction
if "ae" in model_type:
pred = model.predict(test_data)
test_features = encoder.predict(test_data)
relu_activation_layer = tf.keras.Model(inputs=model.input, outputs=model.get_layer('encoder').output)
relu_activation = relu_activation_layer.predict(test_data)
elif "nmf" in model_type:
test_features = model.transform(test_data)
pred = model.inverse_transform(test_features)
else:
raise NotImplementedError(model_type)
pred *= np.sqrt(scaler['attributes']['var_'])
pred += scaler['attributes']['mean_']
if test_set == "train" and cv > 0:
index = dates["val"]
else:
index = dates[test_set]
pred = | pd.DataFrame(pred, columns=assets, index=index) | pandas.DataFrame |
"""
July 2021
This code retrieves the calculation of sand use for concrete and glass production in the building sector in 26 global regions. For the original code & latest updates, see: https://github.com/
The dynamic material model is based on the BUMA model developed by <NAME>, Leiden University, the Netherlands. For the original code & latest updates, see: https://github.com/SPDeetman/BUMA
The dynamic stock model is based on the ODYM model developed by <NAME>, Uni Freiburg, Germany. For the original code & latest updates, see: https://github.com/IndEcol/ODYM
*NOTE: Insert location of GloBus-main folder in 'dir_path' (line 23) before running the code
Software version: Python 3.7
"""
#%% GENERAL SETTING & STATEMENTS
import pandas as pd
import numpy as np
import os
import ctypes
import math
# set current directory
dir_path = ""
os.chdir(dir_path)
# Set general constants
regions = 26 #26 IMAGE regions
building_types = 4 #4 building types: detached, semi-detached, appartments & high-rise
area = 2 #2 areas: rural & urban
materials = 2 #2 materials: Concrete, Glass
inflation = 1.2423 #gdp/cap inflation correction between 2005 (IMAGE data) & 2016 (commercial calibration) according to https://www.bls.gov/data/inflation_calculator.htm
# Set Flags for sensitivity analysis
flag_alpha = 0 # switch for the sensitivity analysis on alpha, if 1 the maximum alpha is 10% above the maximum found in the data
flag_ExpDec = 0 # switch to choose between Gompertz and Exponential Decay function for commercial floorspace demand (0 = Gompertz, 1 = Expdec)
flag_Normal = 0 # switch to choose between Weibull and Normal lifetime distributions (0 = Weibull, 1 = Normal)
flag_Mean = 0 # switch to choose between material intensity settings (0 = regular regional, 1 = mean, 2 = high, 3 = low, 4 = median)
#%%Load files & arrange tables ----------------------------------------------------
if flag_Mean == 0:
file_addition = ''
elif flag_Mean == 1:
file_addition = '_mean'
elif flag_Mean ==2:
file_addition = '_high'
elif flag_Mean ==3:
file_addition = '_low'
else:
file_addition = '_median'
# Load Population, Floor area, and Service value added (SVA) Database csv-files
pop = pd.read_csv('files_population/pop.csv', index_col = [0]) # Pop; unit: million of people; meaning: global population (over time, by region)
rurpop = pd.read_csv('files_population/rurpop.csv', index_col = [0]) # rurpop; unit: %; meaning: the share of people living in rural areas (over time, by region)
housing_type = pd.read_csv('files_population\Housing_type.csv') # Housing_type; unit: %; meaning: the share of the NUMBER OF PEOPLE living in a particular building type (by region & by area)
floorspace = pd.read_csv('files_floor_area/res_Floorspace.csv') # Floorspace; unit: m2/capita; meaning: the average m2 per capita (over time, by region & area)
floorspace = floorspace[floorspace.Region != regions + 1] # Remove empty region 27
avg_m2_cap = pd.read_csv('files_floor_area\Average_m2_per_cap.csv') # Avg_m2_cap; unit: m2/capita; meaning: average square meters per person (by region & area (rural/urban) & building type)
sva_pc_2005 = pd.read_csv('files_GDP/sva_pc.csv', index_col = [0])
sva_pc = sva_pc_2005 * inflation # we use the inflation corrected SVA to adjust for the fact that IMAGE provides gdp/cap in 2005 US$
# load material density data csv-files
building_materials_concrete = pd.read_csv('files_material_density\Building_materials_concrete' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
building_materials_glass = pd.read_csv('files_material_density\Building_materials_glass' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
materials_commercial_concrete = pd.read_csv('files_material_density\materials_commercial_concrete' + file_addition + '.csv', index_col = [0]) # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
materials_commercial_glass = pd.read_csv('files_material_density\materials_commercial_glass' + file_addition + '.csv', index_col = [0]) # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
# Load fitted regression parameters for comercial floor area estimate
if flag_alpha == 0:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters.csv', index_col = [0])
else:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters_alpha.csv', index_col = [0])
# Ensure full time series for pop & rurpop (interpolation, some years are missing)
rurpop2 = rurpop.reindex(list(range(1970,2061,1))).interpolate()
pop2 = pop.reindex(list(range(1970,2061,1))).interpolate()
# Remove 1st year, to ensure same Table size as floorspace data (from 1971)
pop2 = pop2.iloc[1:]
rurpop2 = rurpop2.iloc[1:]
#pre-calculate urban population
urbpop = 1 - rurpop2 # urban population is 1 - the fraction of people living in rural areas (rurpop)
# Restructure the tables to regions as columns; for floorspace
floorspace_rur = floorspace.pivot(index="t", columns="Region", values="Rural")
floorspace_urb = floorspace.pivot(index="t", columns="Region", values="Urban")
# Restructuring for square meters (m2/cap)
avg_m2_cap_urb = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_urb.columns = list(map(int,avg_m2_cap_urb.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_urb2 = avg_m2_cap_urb.drop(['Region']) # Remove idle row
avg_m2_cap_rur = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_rur.columns = list(map(int,avg_m2_cap_rur.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_rur2 = avg_m2_cap_rur.drop(['Region']) # Remove idle row
# Restructuring for the Housing types (% of population living in them)
housing_type_urb = housing_type.loc[housing_type['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
housing_type_urb.columns = list(map(int,housing_type_urb.iloc[0])) # name columns according to the row containing the region-labels
housing_type_urb2 = housing_type_urb.drop(['Region']) # Remove idle row
housing_type_rur = housing_type.loc[housing_type['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
housing_type_rur.columns = list(map(int,housing_type_rur.iloc[0])) # name columns according to the row containing the region-labels
housing_type_rur2 = housing_type_rur.drop(['Region']) # Remove idle row
#%% COMMERCIAL building space demand (stock) calculated from Gomperz curve (fitted, using separate regression model)
# Select gompertz curve paramaters for the total commercial m2 demand (stock)
alpha = gompertz['All']['a'] if flag_ExpDec == 0 else 25.601
beta = gompertz['All']['b'] if flag_ExpDec == 0 else 28.431
gamma = gompertz['All']['c'] if flag_ExpDec == 0 else 0.0415
# find the total commercial m2 stock (in Millions of m2)
commercial_m2_cap = pd.DataFrame(index=range(1971,2061), columns=range(1,27))
for year in range(1971,2061):
for region in range(1,27):
if flag_ExpDec == 0:
commercial_m2_cap[region][year] = alpha * math.exp(-beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
else:
commercial_m2_cap[region][year] = max(0.542, alpha - beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
# Subdivide the total across Offices, Retail+, Govt+ & Hotels+
commercial_m2_cap_office = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Offices
commercial_m2_cap_retail = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Retail & Warehouses
commercial_m2_cap_hotels = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Hotels & Restaurants
commercial_m2_cap_govern = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Hospitals, Education, Government & Transportation
minimum_com_office = 25
minimum_com_retail = 25
minimum_com_hotels = 25
minimum_com_govern = 25
for year in range(1971,2061):
for region in range(1,27):
# get the square meter per capita floorspace for 4 commercial applications
office = gompertz['Office']['a'] * math.exp(-gompertz['Office']['b'] * math.exp((-gompertz['Office']['c']/1000) * sva_pc[str(region)][year]))
retail = gompertz['Retail+']['a'] * math.exp(-gompertz['Retail+']['b'] * math.exp((-gompertz['Retail+']['c']/1000) * sva_pc[str(region)][year]))
hotels = gompertz['Hotels+']['a'] * math.exp(-gompertz['Hotels+']['b'] * math.exp((-gompertz['Hotels+']['c']/1000) * sva_pc[str(region)][year]))
govern = gompertz['Govt+']['a'] * math.exp(-gompertz['Govt+']['b'] * math.exp((-gompertz['Govt+']['c']/1000) * sva_pc[str(region)][year]))
#calculate minimum values for later use in historic tail(Region 20: China @ 134 $/cap SVA)
minimum_com_office = office if office < minimum_com_office else minimum_com_office
minimum_com_retail = retail if retail < minimum_com_retail else minimum_com_retail
minimum_com_hotels = hotels if hotels < minimum_com_hotels else minimum_com_hotels
minimum_com_govern = govern if govern < minimum_com_govern else minimum_com_govern
# Then use the ratio's to subdivide the total commercial floorspace into 4 categories
commercial_sum = office + retail + hotels + govern
commercial_m2_cap_office[region][year] = commercial_m2_cap[region][year] * (office/commercial_sum)
commercial_m2_cap_retail[region][year] = commercial_m2_cap[region][year] * (retail/commercial_sum)
commercial_m2_cap_hotels[region][year] = commercial_m2_cap[region][year] * (hotels/commercial_sum)
commercial_m2_cap_govern[region][year] = commercial_m2_cap[region][year] * (govern/commercial_sum)
#%% Add historic tail (1720-1970) + 100 yr initial --------------------------------------------
# load historic population development
hist_pop = pd.read_csv('files_initial_stock\hist_pop.csv', index_col = [0]) # initial population as a percentage of the 1970 population; unit: %; according to the Maddison Project Database (MPD) 2018 (Groningen University)
# Determine the historical average global trend in floorspace/cap & the regional rural population share based on the last 10 years of IMAGE data
floorspace_urb_trend_by_region = [0 for j in range(0,26)]
floorspace_rur_trend_by_region = [0 for j in range(0,26)]
rurpop_trend_by_region = [0 for j in range(0,26)]
commercial_m2_cap_office_trend = [0 for j in range(0,26)]
commercial_m2_cap_retail_trend = [0 for j in range(0,26)]
commercial_m2_cap_hotels_trend = [0 for j in range(0,26)]
commercial_m2_cap_govern_trend = [0 for j in range(0,26)]
# For the RESIDENTIAL & COMMERCIAL floorspace: Derive the annual trend (in m2/cap) over the initial 10 years of IMAGE data
for region in range(1,27):
floorspace_urb_trend_by_year = [0 for i in range(0,10)]
floorspace_rur_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_office_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_retail_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_hotels_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_govern_trend_by_year = [0 for i in range(0,10)]
# Get the growth by year (for the first 10 years)
for year in range(1970,1980):
floorspace_urb_trend_by_year[year-1970] = floorspace_urb[region][year+1]/floorspace_urb[region][year+2]
floorspace_rur_trend_by_year[year-1970] = floorspace_rur[region][year+1]/floorspace_rur[region][year+2]
commercial_m2_cap_office_trend_by_year[year-1970] = commercial_m2_cap_office[region][year+1]/commercial_m2_cap_office[region][year+2]
commercial_m2_cap_retail_trend_by_year[year-1970] = commercial_m2_cap_retail[region][year+1]/commercial_m2_cap_retail[region][year+2]
commercial_m2_cap_hotels_trend_by_year[year-1970] = commercial_m2_cap_hotels[region][year+1]/commercial_m2_cap_hotels[region][year+2]
commercial_m2_cap_govern_trend_by_year[year-1970] = commercial_m2_cap_govern[region][year+1]/commercial_m2_cap_govern[region][year+2]
rurpop_trend_by_region[region-1] = ((1-(rurpop[str(region)][1980]/rurpop[str(region)][1970]))/10)*100
floorspace_urb_trend_by_region[region-1] = sum(floorspace_urb_trend_by_year)/10
floorspace_rur_trend_by_region[region-1] = sum(floorspace_rur_trend_by_year)/10
commercial_m2_cap_office_trend[region-1] = sum(commercial_m2_cap_office_trend_by_year)/10
commercial_m2_cap_retail_trend[region-1] = sum(commercial_m2_cap_retail_trend_by_year)/10
commercial_m2_cap_hotels_trend[region-1] = sum(commercial_m2_cap_hotels_trend_by_year)/10
commercial_m2_cap_govern_trend[region-1] = sum(commercial_m2_cap_govern_trend_by_year)/10
# Average global annual decline in floorspace/cap in %, rural: 1%; urban 1.2%; commercial: 1.26-2.18% /yr
floorspace_urb_trend_global = (1-(sum(floorspace_urb_trend_by_region)/26))*100 # in % decrease per annum
floorspace_rur_trend_global = (1-(sum(floorspace_rur_trend_by_region)/26))*100 # in % decrease per annum
commercial_m2_cap_office_trend_global = (1-(sum(commercial_m2_cap_office_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_retail_trend_global = (1-(sum(commercial_m2_cap_retail_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_hotels_trend_global = (1-(sum(commercial_m2_cap_hotels_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_govern_trend_global = (1-(sum(commercial_m2_cap_govern_trend)/26))*100 # in % decrease per annum
# define historic floorspace (1820-1970) in m2/cap
floorspace_urb_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=floorspace_urb.columns)
floorspace_rur_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=floorspace_rur.columns)
rurpop_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=rurpop.columns)
pop_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=pop2.columns)
commercial_m2_cap_office_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_govern.columns)
# Find minumum or maximum values in the original IMAGE data (Just for residential, commercial minimum values have been calculated above)
minimum_urb_fs = floorspace_urb.values.min() # Region 20: China
minimum_rur_fs = floorspace_rur.values.min() # Region 20: China
maximum_rurpop = rurpop.values.max() # Region 9 : Eastern Africa
# Calculate the actual values used between 1820 & 1970, given the trends & the min/max values
for region in range(1,regions+1):
for year in range(1820,1971):
# MAX of 1) the MINimum value & 2) the calculated value
floorspace_urb_1820_1970[region][year] = max(minimum_urb_fs, floorspace_urb[region][1971] * ((100-floorspace_urb_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
floorspace_rur_1820_1970[region][year] = max(minimum_rur_fs, floorspace_rur[region][1971] * ((100-floorspace_rur_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_office_1820_1970[region][year] = max(minimum_com_office, commercial_m2_cap_office[region][1971] * ((100-commercial_m2_cap_office_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_retail_1820_1970[region][year] = max(minimum_com_retail, commercial_m2_cap_retail[region][1971] * ((100-commercial_m2_cap_retail_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_hotels_1820_1970[region][year] = max(minimum_com_hotels, commercial_m2_cap_hotels[region][1971] * ((100-commercial_m2_cap_hotels_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_govern_1820_1970[region][year] = max(minimum_com_govern, commercial_m2_cap_govern[region][1971] * ((100-commercial_m2_cap_govern_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
# MIN of 1) the MAXimum value & 2) the calculated value
rurpop_1820_1970[str(region)][year] = min(maximum_rurpop, rurpop[str(region)][1970] * ((100+rurpop_trend_by_region[region-1])/100)**(1970-year)) # average annual INcrease by region
# just add the tail to the population (no min/max & trend is pre-calculated in hist_pop)
pop_1820_1970[str(region)][year] = hist_pop[str(region)][year] * pop[str(region)][1970]
urbpop_1820_1970 = 1 - rurpop_1820_1970
# To avoid full model setup in 1820 (all required stock gets built in yr 1) we assume another tail that linearly increases to the 1820 value over a 100 year time period, so 1720 = 0
floorspace_urb_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=floorspace_urb.columns)
floorspace_rur_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=floorspace_rur.columns)
rurpop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=rurpop.columns)
urbpop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=urbpop.columns)
pop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=pop2.columns)
commercial_m2_cap_office_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_govern.columns)
for region in range(1,27):
for time in range(1721,1820):
# MAX(0,...) Because of floating point deviations, leading to negative stock in some cases
floorspace_urb_1721_1820[int(region)][time] = max(0.0, floorspace_urb_1820_1970[int(region)][1820] - (floorspace_urb_1820_1970[int(region)][1820]/100)*(1820-time))
floorspace_rur_1721_1820[int(region)][time] = max(0.0, floorspace_rur_1820_1970[int(region)][1820] - (floorspace_rur_1820_1970[int(region)][1820]/100)*(1820-time))
rurpop_1721_1820[str(region)][time] = max(0.0, rurpop_1820_1970[str(region)][1820] - (rurpop_1820_1970[str(region)][1820]/100)*(1820-time))
urbpop_1721_1820[str(region)][time] = max(0.0, urbpop_1820_1970[str(region)][1820] - (urbpop_1820_1970[str(region)][1820]/100)*(1820-time))
pop_1721_1820[str(region)][time] = max(0.0, pop_1820_1970[str(region)][1820] - (pop_1820_1970[str(region)][1820]/100)*(1820-time))
commercial_m2_cap_office_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_office_1820_1970[region][1820] - (commercial_m2_cap_office_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_retail_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_retail_1820_1970[region][1820] - (commercial_m2_cap_retail_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_hotels_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_hotels_1820_1970[region][1820] - (commercial_m2_cap_hotels_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_govern_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_govern_1820_1970[region][1820] - (commercial_m2_cap_govern_1820_1970[region][1820]/100)*(1820-time))
# combine historic with IMAGE data here
rurpop_tail = rurpop_1820_1970.append(rurpop2, ignore_index=False)
urbpop_tail = urbpop_1820_1970.append(urbpop, ignore_index=False)
pop_tail = pop_1820_1970.append(pop2, ignore_index=False)
floorspace_urb_tail = floorspace_urb_1820_1970.append(floorspace_urb, ignore_index=False)
floorspace_rur_tail = floorspace_rur_1820_1970.append(floorspace_rur, ignore_index=False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index=False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index=False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index=False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index=False)
rurpop_tail = rurpop_1721_1820.append(rurpop_1820_1970.append(rurpop2, ignore_index=False), ignore_index=False)
urbpop_tail = urbpop_1721_1820.append(urbpop_1820_1970.append(urbpop, ignore_index=False), ignore_index=False)
pop_tail = pop_1721_1820.append(pop_1820_1970.append(pop2, ignore_index=False), ignore_index=False)
floorspace_urb_tail = floorspace_urb_1721_1820.append(floorspace_urb_1820_1970.append(floorspace_urb, ignore_index=False), ignore_index=False)
floorspace_rur_tail = floorspace_rur_1721_1820.append(floorspace_rur_1820_1970.append(floorspace_rur, ignore_index=False), ignore_index=False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1721_1820.append(commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index=False), ignore_index=False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1721_1820.append(commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index=False), ignore_index=False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1721_1820.append(commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index=False), ignore_index=False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1721_1820.append(commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index=False), ignore_index=False)
#%% SQUARE METER Calculations -----------------------------------------------------------
# adjust the share for urban/rural only (shares in csv are as percantage of the total(Rur + Urb), we needed to adjust the urban shares to add up to 1, same for rural)
housing_type_rur3 = housing_type_rur2/housing_type_rur2.sum()
housing_type_urb3 = housing_type_urb2/housing_type_urb2.sum()
# calculte the total rural/urban population (pop2 = millions of people, rurpop2 = % of people living in rural areas)
people_rur = pd.DataFrame(rurpop_tail.values*pop_tail.values, columns=pop_tail.columns, index=pop_tail.index)
people_urb = pd.DataFrame(urbpop_tail.values*pop_tail.values, columns=pop_tail.columns, index=pop_tail.index)
# calculate the total number of people (urban/rural) BY HOUSING TYPE (the sum of det,sem,app & hig equals the total population e.g. people_rur)
people_det_rur = pd.DataFrame(housing_type_rur3.iloc[0].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_sem_rur = | pd.DataFrame(housing_type_rur3.iloc[1].values*people_rur.values, columns=people_rur.columns, index=people_rur.index) | pandas.DataFrame |
import pytest
import jax.numpy as np
import pandas as pd
from pzflow import Flow
from pzflow.bijectors import Chain, Reverse, Scale
from pzflow.distributions import *
@pytest.mark.parametrize(
"data_columns,bijector,info,file",
[
(None, None, None, None),
(("x", "y"), None, None, None),
(None, Reverse(), None, None),
(("x", "y"), None, None, "file"),
(None, Reverse(), None, "file"),
(None, None, "fake", "file"),
],
)
def test_bad_inputs(data_columns, bijector, info, file):
with pytest.raises(ValueError):
Flow(data_columns, bijector=bijector, info=info, file=file)
@pytest.mark.parametrize(
"flow",
[
Flow(("redshift", "y"), Reverse(), latent=Normal(2)),
Flow(("redshift", "y"), Reverse(), latent=Tdist(2)),
Flow(("redshift", "y"), Reverse(), latent=Uniform((-3, 3), (-3, 3))),
],
)
def test_returns_correct_shape(flow):
xarray = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
x = pd.DataFrame(xarray, columns=("redshift", "y"))
conditions = flow._get_conditions(x, xarray.shape[0])
x_with_errs = flow._array_with_errs(x)
assert x_with_errs.shape == (3, 4)
x_with_errs = flow._array_with_errs(x, skip="redshift")
assert x_with_errs.shape == (3, 3)
xfwd, xfwd_log_det = flow._forward(flow._params, xarray, conditions=conditions)
assert xfwd.shape == x.shape
assert xfwd_log_det.shape == (x.shape[0],)
xinv, xinv_log_det = flow._inverse(flow._params, xarray, conditions=conditions)
assert xinv.shape == x.shape
assert xinv_log_det.shape == (x.shape[0],)
J = flow._jacobian(flow._params, xarray, conditions=conditions)
assert J.shape == (3, 2, 2)
nsamples = 4
assert flow.sample(nsamples).shape == (nsamples, x.shape[1])
assert flow.log_prob(x).shape == (x.shape[0],)
grid = np.arange(0, 2.1, 0.12)
pdfs = flow.posterior(x, column="y", grid=grid)
assert pdfs.shape == (x.shape[0], grid.size)
pdfs = flow.posterior(x.iloc[:, 1:], column="redshift", grid=grid)
assert pdfs.shape == (x.shape[0], grid.size)
pdfs = flow.posterior(x.iloc[:, 1:], column="redshift", grid=grid, batch_size=2)
assert pdfs.shape == (x.shape[0], grid.size)
assert len(flow.train(x, epochs=11, verbose=True)) == 12
def test_error_convolution():
xarray = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
x = pd.DataFrame(xarray, columns=("redshift", "y"))
flow = Flow(("redshift", "y"), Reverse(), latent=Normal(2))
assert flow.log_prob(x, convolve_err=True).shape == (x.shape[0],)
assert np.allclose(
flow.log_prob(x, convolve_err=True),
flow.log_prob(x, convolve_err=False),
)
grid = np.arange(0, 2.1, 0.12)
pdfs = flow.posterior(x, column="y", grid=grid, convolve_err=True)
assert pdfs.shape == (x.shape[0], grid.size)
assert (
len(flow.train(x, epochs=11, convolve_err=True, burn_in_epochs=4, verbose=True))
== 17
)
flow = Flow(("redshift", "y"), Reverse(), latent=Tdist(2))
with pytest.raises(ValueError):
flow.log_prob(x, convolve_err=True).shape
with pytest.raises(ValueError):
flow.posterior(x, column="y", grid=grid, convolve_err=True)
with pytest.raises(ValueError):
flow.train(x, epochs=11, convolve_err=True, burn_in_epochs=4, verbose=True)
def test_columns_with_errs():
columns = ("redshift", "y")
flow = Flow(columns, Reverse())
xarray = np.array([[1, 2, 0.4, 0.2], [3, 4, 0.1, 0.9]])
x = pd.DataFrame(xarray, columns=("redshift", "y", "y_err", "redshift_err"))
x_with_errs = flow._array_with_errs(x)
assert np.allclose(x_with_errs, np.array([[1, 2, 0.2, 0.4], [3, 4, 0.9, 0.1]]))
xarray = np.array([[1, 2, 0.4], [3, 4, 0.1]])
x = pd.DataFrame(xarray, columns=("redshift", "y", "y_err"))
x_with_errs = flow._array_with_errs(x)
assert np.allclose(x_with_errs, np.array([[1, 2, 0, 0.4], [3, 4, 0, 0.1]]))
xarray = np.array([[1, 2, 0.4, 0.2], [3, 4, 0.1, 0.9]])
x = pd.DataFrame(xarray, columns=("redshift", "y", "y_err", "redshift_err"))
x_with_errs = flow._array_with_errs(x, skip="redshift")
assert np.allclose(x_with_errs, np.array([[2, 0, 0.4], [4, 0, 0.1]]))
def test_jacobian():
columns = ("redshift", "y")
flow = Flow(columns, Chain(Reverse(), Scale(2.0)))
xarray = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
conditions = flow._get_conditions(None, xarray.shape[0])
J = flow._jacobian(flow._params, xarray, conditions=conditions)
assert np.allclose(
J,
np.array([[[0, 2.0], [2.0, 0]], [[0, 2.0], [2.0, 0]], [[0, 2.0], [2.0, 0]]]),
)
def test_posterior_batch():
columns = ("redshift", "y")
flow = Flow(columns, Reverse())
xarray = np.array([[1, 2], [3, 4], [5, 6]])
x = | pd.DataFrame(xarray, columns=columns) | pandas.DataFrame |
import json
import os
import random
from random import sample
import numpy as np
import numpy.random
import re
from collections import Counter
import inspect
import pandas as pd
import matplotlib.pyplot as plt
import requests
from IPython.display import HTML
import seaborn as sns
import networkx as nx
from pylab import rcParams
try:
from wordcloud import WordCloud
except ImportError:
print("wordcloud er ikke installert, kan ikke lage ordskyer")
#************** For defining wordbag search
def dict2pd(dictionary):
res = pd.DataFrame.from_dict(dictionary).fillna(0)
s = (res.mean(axis=0))
s = s.rename('snitt')
res = res.append(s)
return res.sort_values(by='snitt', axis=1, ascending=False).transpose()
def def2dict(ddef):
res = dict()
defs = ddef.split(';')
for d in defs:
lex = d.split(':')
if len(lex) == 2:
#print('#'.join(lex))
hyper = lex[0].strip()
occurrences = [x.strip() for x in lex[1].split(',')]
res[hyper] = occurrences
for x in res:
for y in res[x]:
if y.capitalize() not in res[x]:
res[x].append(y.capitalize())
return res
def wordbag_eval(wordbag, urns):
if type(urns) is list:
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
urns = urns
else:
urns = [urns]
param = dict()
param['wordbags'] = wordbag
param['urns'] = urns
r = requests.post("https://api.nb.no/ngram/wordbags", json = param)
return dict2pd(r.json())
def wordbag_eval_para(wordbag, urns):
if type(urns) is list:
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
urns = urns
else:
urns = [urns]
param = dict()
param['wordbags'] = wordbag
param['urns'] = urns
r = requests.post("https://api.nb.no/ngram/wordbags_para", json = param)
return r.json()
def get_paragraphs(urn, paras):
"""Return paragraphs for urn"""
param = dict()
param['paragraphs'] = paras
param['urn'] = urn
r = requests.get("https://api.nb.no/ngram/paragraphs", json=param)
return dict2pd(r.json())
### ******************* wordbag search end
def ner(text = None, dist=False):
"""Analyze text for named entities - set dist = True will return the four values that go into decision"""
r = []
if text != None:
r = requests.post("https://api.nb.no/ngram/ner", json={'text':text,'dist':dist})
return r.json()
#**** names ****
def check_navn(navn, limit=2, remove='Ja Nei Nå Dem De Deres Unnskyld Ikke Ah Hmm <NAME> Jaja Jaha'.split()):
"""Removes all items in navn with frequency below limit and words in all case as well as all words in list 'remove'"""
r = {x:navn[x] for x in navn if navn[x] > limit and x.upper() != x and not x in remove}
return r
def sentences(urns, num=300):
if isinstance(urns[0], list):
urns = [str(x[0]) for x in urns]
params = {'urns':urns,
'num':num}
res = requests.get("https://api.nb.no/ngram/sentences", params=params)
return res.json()
def names(urn, ratio = 0.3, cutoff = 2):
""" Return namens in book with urn. Returns uni- , bi-, tri- and quadgrams """
if type(urn) is list:
urn = urn[0]
r = requests.get('https://api.nb.no/ngram/names', json={'urn':urn, 'ratio':ratio, 'cutoff':cutoff})
x = r.json()
result = (
Counter(x[0][0]),
Counter({tuple(x[1][i][0]):x[1][i][1] for i in range(len(x[1]))}),
Counter({tuple(x[2][i][0]):x[2][i][1] for i in range(len(x[2]))}),
Counter({tuple(x[3][i][0]):x[3][i][1] for i in range(len(x[3]))})
)
return result
def name_graph(name_struct):
m = []
for n in name_struct[0]:
m.append(frozenset([n]))
for n in name_struct[1:]:
m += [frozenset(x) for x in n]
G = []
for x in m:
for y in m:
if x < y:
G.append((' '.join(x), ' '.join(y)))
N = []
for x in m:
N.append(' '.join(x))
Gg = nx.Graph()
Gg.add_nodes_from(N)
Gg.add_edges_from(G)
return Gg
def aggregate_urns(urnlist):
"""Sum up word frequencies across urns"""
if isinstance(urnlist[0], list):
urnlist = [u[0] for u in urnlist]
r = requests.post("https://api.nb.no/ngram/book_aggregates", json={'urns':urnlist})
return r.json()
# Norweigan word bank
def word_variant(word, form):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/variant_form", params={'word':word, 'form':form})
return r.json()
def word_paradigm(word):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/paradigm", params = {'word': word})
return r.json()
def word_form(word):
""" Find alternative form for a given word form, e.g. word_variant('spiste', 'pres-part') """
r = requests.get("https://api.nb.no/ngram/word_form", params = {'word': word})
return r.json()
def word_lemma(word):
""" Find lemma form for a given word form """
r = requests.get("https://api.nb.no/ngram/word_lemma", params = {'word': word})
return r.json()
def word_freq(urn, words):
""" Find frequency of words within urn """
params = {'urn':urn, 'words':words}
r = requests.post("https://api.nb.no/ngram/freq", json=params)
return dict(r.json())
def tot_freq(words):
""" Find total frequency of words """
params = {'words':words}
r = requests.post("https://api.nb.no/ngram/word_frequencies", json=params)
return dict(r.json())
def book_count(urns):
params = {'urns':urns}
r = requests.post("https://api.nb.no/ngram/book_count", json=params)
return dict(r.json())
def sttr(urn, chunk=5000):
r = requests.get("https://api.nb.no/ngram/sttr", json = {'urn':urn, 'chunk':chunk})
return r.json()
def totals(top=200):
r = requests.get("https://api.nb.no/ngram/totals", json={'top':top})
return dict(r.json())
def navn(urn):
if type(urn) is list:
urn = urn[0]
r = requests.get('https://api.nb.no/ngram/tingnavn', json={'urn':urn})
return dict(r.json())
def digibokurn_from_text(T):
"""Return URNs as 13 digits (any sequence of 13 digits is counted as an URN)"""
return re.findall("(?<=digibok_)[0-9]{13}", T)
def urn_from_text(T):
"""Return URNs as 13 digits (any sequence of 13 digits is counted as an URN)"""
return re.findall("[0-9]{13}", T)
def metadata(urn=None):
urns = pure_urn(urn)
#print(urns)
r = requests.post("https://api.nb.no/ngram/meta", json={'urn':urns})
return r.json()
def pure_urn(data):
"""Convert URN-lists with extra data into list of serial numbers.
Args:
data: May be a list of URNs, a list of lists with URNs as their
initial element, or a string of raw texts containing URNs
Any pandas dataframe or series. Urns must be in the first column of dataframe.
Returns:
List[str]: A list of URNs. Empty list if input is on the wrong
format or contains no URNs
"""
korpus_def = []
if isinstance(data, list):
if not data: # Empty list
korpus_def = []
if isinstance(data[0], list): # List of lists
try:
korpus_def = [str(x[0]) for x in data]
except IndexError:
korpus_def = []
else: # Assume data is already a list of URNs
korpus_def = [str(int(x)) for x in data]
elif isinstance(data, str):
korpus_def = [str(x) for x in urn_from_text(data)]
elif isinstance(data, (int, np.integer)):
korpus_def = [str(data)]
elif isinstance(data, pd.DataFrame):
col = data.columns[0]
urns = pd.to_numeric(data[col])
korpus_def = [str(int(x)) for x in urns.dropna()]
elif isinstance(data, pd.Series):
korpus_def = [str(int(x)) for x in data.dropna()]
return korpus_def
#### N-Grams from fulltext updated
def unigram(word, period=(1950, 2020), media = 'bok', ddk=None, topic=None, gender=None, publisher=None, lang=None, trans=None):
r = requests.get("https://api.nb.no/ngram/unigrams", params={
'word':word,
'ddk':ddk,
'topic':topic,
'gender':gender,
'publisher':publisher,
'lang':lang,
'trans':trans,
'period0':period[0],
'period1':period[1],
'media':media
})
return frame(dict(r.json()))
def bigram(first,second, period=(1950, 2020), media = 'bok', ddk=None, topic=None, gender=None, publisher=None, lang=None, trans=None):
r = requests.get("https://api.nb.no/ngram/bigrams", params={
'first':first,
'second':second,
'ddk':ddk,
'topic':topic,
'gender':gender,
'publisher':publisher,
'lang':lang,
'trans':trans,
'period0':period[0],
'period1':period[1],
'media':media
})
return frame(dict(r.json()))
def book_counts(period=(1800, 2050)):
r = requests.get("https://api.nb.no/ngram/book_counts", params={
'period0':period[0],
'period1':period[1],
})
return frame(dict(r.json()))
####
def difference(first, second, rf, rs, years=(1980, 2000),smooth=1, corpus='bok'):
"""Compute difference of difference (first/second)/(rf/rs)"""
try:
a_first = nb_ngram(first, years=years, smooth=smooth, corpus=corpus)
a_second = nb_ngram(second, years=years, smooth=smooth, corpus=corpus)
a = a_first.join(a_second)
b_first = nb_ngram(rf, years=years, smooth=smooth, corpus=corpus)
b_second = nb_ngram(rs, years=years, smooth=smooth, corpus=corpus)
if rf == rs:
b_second.columns = [rs + '2']
b = b_first.join(b_second)
s_a = a.mean()
s_b = b.mean()
f1 = s_a[a.columns[0]]/s_a[a.columns[1]]
f2 = s_b[b.columns[0]]/s_b[b.columns[1]]
res = f1/f2
except:
res = 'Mangler noen data - har bare for: ' + ', '.join([x for x in a.columns.append(b.columns)])
return res
def df_combine(array_df):
"""Combine one columns dataframes"""
import pandas as pd
cols = []
for i in range(len(a)):
#print(i)
if array_df[i].columns[0] in cols:
array_df[i].columns = [array_df[i].columns[0] + '_' + str(i)]
cols.append(array_df[i].columns[0])
return pd.concat(a, axis=1, sort=True)
def col_agg(df, col='sum'):
c = df.sum(axis=0)
c = pd.DataFrame(c)
c.columns = [col]
return c
def row_agg(df, col='sum'):
c = df.sum(axis=1)
c = pd.DataFrame(c)
c.columns = [col]
return c
def get_freq(urn, top=50, cutoff=3):
"""Get frequency list for urn"""
if isinstance(urn, list):
urn = urn[0]
r = requests.get("https://api.nb.no/ngram/urnfreq", json={'urn':urn, 'top':top, 'cutoff':cutoff})
return Counter(dict(r.json()))
####=============== GET URNS ==================##########
def book_corpus(words = None, author = None,
title = None, subtitle = None, ddk = None, subject = None,
period=(1100, 2020),
gender=None,
lang = None,
trans= None,
limit=20 ):
return frame(book_urn(words, author, title, subtitle, ddk, subject, period, gender, lang, trans, limit),
"urn author title year".split())
def book_urn(words = None, author = None,
title = None, subtitle = None, ddk = None, subject = None,
period=(1100, 2020),
gender=None,
lang = None,
trans= None,
limit=20 ):
"""Get URNs for books with metadata"""
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
return get_urn(query)
def unique_urns(korpus, newest=True):
author_title = {(c[1],c[2]) for c in korpus}
corpus = {(c[0], c[1]):[d for d in korpus if c[0] == d[1] and c[1]==d[2]] for c in author_title }
for c in corpus:
corpus[c].sort(key=lambda c: c[3])
if newest == True:
res = [corpus[c][-1] for c in corpus]
else:
res = [corpus[c][0] for c in corpus]
return res
def refine_book_urn(urns = None, words = None, author = None,
title = None, ddk = None, subject = None, period=(1100, 2020), gender=None, lang = None, trans= None, limit=20 ):
"""Refine URNs for books with metadata"""
# if empty urns nothing to refine
if urns is None or urns == []:
return []
# check if urns is a metadata list, and pick out first elements if that is the case
if isinstance(urns[0], list):
urns = [x[0] for x in urns]
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period' and i != 'urns'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
#print(query)
return refine_urn(urns, query)
def best_book_urn(word = None, author = None,
title = None, ddk = None, subject = None, period=(1100, 2020), gender=None, lang = None, trans= None, limit=20 ):
"""Get URNs for books with metadata"""
if word is None:
return []
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'period' and i != 'word'}
query['year'] = period[0]
query['next'] = period[1] - period[0]
return get_best_urn(word, query)
def get_urn(metadata=None):
"""Get urns from metadata"""
if metadata is None:
metadata = {}
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 100
if not 'year' in metadata:
metadata['year'] = 1900
r = requests.get('https://api.nb.no/ngram/urn', json=metadata)
return r.json()
def refine_urn(urns, metadata=None):
"""Refine a list urns using extra information"""
if metadata is None:
metadata = {}
metadata['urns'] = urns
if not ('words' in metadata):
metadata['words'] = []
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 520
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.post('https://api.nb.no/ngram/refineurn', json=metadata)
return r.json()
def get_best_urn(word, metadata=None):
"""Get the best urns from metadata containing a specific word"""
metadata['word'] = word
if not ('next' in metadata or 'neste' in metadata):
metadata['next'] = 600
if not 'year' in metadata:
metadata['year'] = 1500
r = requests.get('https://api.nb.no/ngram/best_urn', json=metadata)
return r.json()
def get_papers(top=5, cutoff=5, navn='%', yearfrom=1800, yearto=2020, samplesize=100):
"""Get newspapers"""
div = lambda x, y: (int(x/y), x % y)
chunks = 20
# split samplesize into chunks, go through the chunks and then the remainder
(first, second) = div(samplesize, chunks)
r = []
# collect chunkwise
for i in range(first):
r += requests.get("https://api.nb.no/ngram/avisfreq", json={'navn':navn, 'top':top, 'cutoff':cutoff,
'yearfrom':yearfrom, 'yearto':yearto,'samplesize':chunks}
).json()
# collect the remainder
r += requests.get("https://api.nb.no/ngram/avisfreq", json={'navn':navn, 'top':top, 'cutoff':cutoff,
'yearfrom':yearfrom, 'yearto':yearto,'samplesize':second}
).json()
return [dict(x) for x in r]
def urn_coll(word, urns=[], after=5, before=5, limit=1000):
"""Find collocations for word in a set of book URNs. Only books at the moment"""
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
r = requests.post("https://api.nb.no/ngram/urncoll", json={'word':word, 'urns':urns,
'after':after, 'before':before, 'limit':limit})
res = pd.DataFrame.from_dict(r.json(), orient='index')
if not res.empty:
res = res.sort_values(by=res.columns[0], ascending = False)
return res
def urn_coll_words(words, urns=None, after=5, before=5, limit=1000):
"""Find collocations for a group of words within a set of books given by a list of URNs. Only books at the moment"""
coll = pd.DataFrame()
if urns != None:
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
colls = Counter()
if isinstance(words, str):
words = words.split()
res = Counter()
for word in words:
try:
res += Counter(
requests.post(
"https://api.nb.no/ngram/urncoll",
json={
'word':word,
'urns':urns,
'after':after,
'before':before,
'limit':limit}
).json()
)
except:
True
coll = pd.DataFrame.from_dict(res, orient='index')
if not coll.empty:
coll = coll.sort_values(by=coll.columns[0], ascending = False)
return coll
def get_aggregated_corpus(urns, top=0, cutoff=0):
res = Counter()
if isinstance(urns[0], list): # urns assumed to be list of list with urn-serial as first element
urns = [u[0] for u in urns]
for u in urns:
#print(u)
res += get_freq(u, top = top, cutoff = cutoff)
return pd.DataFrame.from_dict(res, orient='index').sort_values(by=0, ascending = False)
def compare_word_bags(bag_of_words, another_bag_of_words, first_freq = 0, another_freq = 1, top=100, first_col = 0, another_col= 0):
"""Compare two columns taken from two or one frame. Parameters x_freq are frequency limits used to cut down candidate words
from the bag of words. Compare along the columns where first_col and another_col are column numbers. Typical situation is that
bag_of_words is a one column frame and another_bag_of_words is another one column frame. When the columns are all from one frame,
just change column numbers to match the columns"""
diff = bag_of_words[bag_of_words > first_freq][bag_of_words.columns[first_col]]/another_bag_of_words[another_bag_of_words > another_freq][another_bag_of_words.columns[another_col]]
return frame(diff, 'diff').sort_values(by='diff', ascending=False)[:top]
def collocation(
word,
yearfrom=2010,
yearto=2018,
before=3,
after=3,
limit=1000,
corpus='avis',
lang='nob',
title='%',
ddk='%',
subtitle='%'):
"""Defined collects frequencies for a given word"""
data = requests.get(
"https://api.nb.no/ngram/collocation",
params={
'word':word,
'corpus':corpus,
'yearfrom':yearfrom,
'before':before,
'after':after,
'limit':limit,
'yearto':yearto,
'title':title,
'ddk':ddk,
'subtitle':subtitle}).json()
return pd.DataFrame.from_dict(data['freq'], orient='index')
def collocation_data(words, yearfrom = 2000, yearto = 2005, limit = 1000, before = 5, after = 5, title = '%', corpus='bok'):
"""Collocation for a set of words sum up all the collocations words is a list of words or a blank separated string of words"""
import sys
a = dict()
if isinstance(words, str):
words = words.split()
for word in words:
print(word)
try:
a[word] = collocation(
word,
yearfrom = yearfrom, yearto = yearto, limit = limit,
corpus = corpus, before = before,
after = after, title = title
)
a[word].columns = [word]
except:
print(word, ' feilsituasjon', sys.exc_info())
result = pd.DataFrame()
for w in a:
result = result.join(a[w], how='outer')
return pd.DataFrame(result.sum(axis=1)).sort_values(by=0, ascending=False)
class CollocationCorpus:
from random import sample
def __init__(self, corpus = None, name='', maximum_texts = 500):
urns = pure_urn(corpus)
if len(urns) > maximum_texts:
selection = random(urns, maximum_texts)
else:
selection = urns
self.corpus_def = selection
self.corpus = get_aggregated_corpus(self.corpus_def, top=0, cutoff=0)
def summary(self, head=10):
info = {
'corpus_definition':self.corpus[:head],
'number_of_words':len(self.corpus)
}
return info
def collocation_old(word, yearfrom=2010, yearto=2018, before=3, after=3, limit=1000, corpus='avis'):
data = requests.get(
"https://api.nb.no/ngram/collocation",
params={
'word':word,
'corpus':corpus,
'yearfrom':yearfrom,
'before':before,
'after':after,
'limit':limit,
'yearto':yearto}).json()
return pd.DataFrame.from_dict(data['freq'], orient='index')
def heatmap(df, color='green'):
return df.fillna(0).style.background_gradient(cmap=sns.light_palette(color, as_cmap=True))
def get_corpus_text(urns, top = 0, cutoff=0):
k = dict()
if isinstance(urns, list):
# a list of urns, or a korpus with urns as first element
if isinstance(urns[0], list):
urns = [u[0] for u in urns]
else:
# assume it is a single urn, text or number
urns = [urns]
for u in urns:
#print(u)
k[u] = get_freq(u, top = top, cutoff = cutoff)
df = pd.DataFrame(k)
res = df.sort_values(by=df.columns[0], ascending=False)
return res
def normalize_corpus_dataframe(df):
colsums = df.sum()
for x in colsums.index:
#print(x)
df[x] = df[x].fillna(0)/colsums[x]
return True
def show_korpus(korpus, start=0, size=4, vstart=0, vsize=20, sortby = ''):
"""Show corpus as a panda dataframe
start = 0 indicates which dokument to show first, dataframe is sorted according to this
size = 4 how many documents (or columns) are shown
top = 20 how many words (or rows) are shown"""
if sortby != '':
val = sortby
else:
val = korpus.columns[start]
return korpus[korpus.columns[start:start+size]].sort_values(by=val, ascending=False)[vstart:vstart + vsize]
def aggregate(korpus):
"""Make an aggregated sum of all documents across the corpus, here we use average"""
return pd.DataFrame(korpus.fillna(0).mean(axis=1))
def convert_list_of_freqs_to_dataframe(referanse):
"""The function get_papers() returns a list of frequencies - convert it"""
res = []
for x in referanse:
res.append( dict(x))
result = pd.DataFrame(res).transpose()
normalize_corpus_dataframe(result)
return result
def get_corpus(top=0, cutoff=0, navn='%', corpus='avis', yearfrom=1800, yearto=2020, samplesize=10):
if corpus == 'avis':
result = get_papers(top=top, cutoff=cutoff, navn=navn, yearfrom=yearfrom, yearto=yearto, samplesize=samplesize)
res = convert_list_of_freqs_to_dataframe(result)
else:
urns = get_urn({'author':navn, 'year':yearfrom, 'neste':yearto-yearfrom, 'limit':samplesize})
res = get_corpus_text([x[0] for x in urns], top=top, cutoff=cutoff)
return res
class Cluster:
def __init__(self, word = '', filename = '', period = (1950,1960) , before = 5, after = 5, corpus='avis', reference = 200,
word_samples=1000):
if word != '':
self.collocates = collocation(word, yearfrom=period[0], yearto = period[1], before=before, after=after,
corpus=corpus, limit=word_samples)
self.collocates.columns = [word]
if type(reference) is pd.core.frame.DataFrame:
reference = reference
elif type(reference) is int:
reference = get_corpus(yearfrom=period[0], yearto=period[1], corpus=corpus, samplesize=reference)
else:
reference = get_corpus(yearfrom=period[0], yearto=period[1], corpus=corpus, samplesize=int(reference))
self.reference = aggregate(reference)
self.reference.columns = ['reference_corpus']
self.word = word
self.period = period
self.corpus = corpus
else:
if filename != '':
self.load(filename)
def cluster_set(self, exponent=1.1, top = 200, aslist=True):
combo_corp = self.reference.join(self.collocates, how='outer')
normalize_corpus_dataframe(combo_corp)
korpus = compute_assoc(combo_corp, self.word, exponent)
korpus.columns = [self.word]
if top <= 0:
res = korpus.sort_values(by=self.word, ascending=False)
else:
res = korpus.sort_values(by=self.word, ascending=False).iloc[:top]
if aslist == True:
res = HTML(', '.join(list(res.index)))
return res
def add_reference(self, number=20):
ref = get_corpus(yearfrom=self.period[0], yearto=self.period[1], samplesize=number)
ref = aggregate(ref)
ref.columns = ['add_ref']
normalize_corpus_dataframe(ref)
self.reference = aggregate(self.reference.join(ref, how='outer'))
return True
def save(self, filename=''):
if filename == '':
filename = "{w}_{p}-{q}.json".format(w=self.word,p=self.period[0], q = self.period[1])
model = {
'word':self.word,
'period':self.period,
'reference':self.reference.to_dict(),
'collocates':self.collocates.to_dict(),
'corpus':self.corpus
}
with open(filename, 'w', encoding = 'utf-8') as outfile:
print('lagrer til:', filename)
outfile.write(json.dumps(model))
return True
def load(self, filename):
with open(filename, 'r') as infile:
try:
model = json.loads(infile.read())
#print(model['word'])
self.word = model['word']
self.period = model['period']
self.corpus = model['corpus']
self.reference = pd.DataFrame(model['reference'])
self.collocates = pd.DataFrame(model['collocates'])
except:
print('noe gikk galt')
return True
def search_words(self, words, exponent=1.1):
if type(words) is str:
words = [w.strip() for w in words.split()]
df = self.cluster_set(exponent=exponent, top=0, aslist=False)
sub= [w for w in words if w in df.index]
res = df.transpose()[sub].transpose().sort_values(by=df.columns[0], ascending=False)
return res
def wildcardsearch(params=None):
if params is None:
params = {'word': '', 'freq_lim': 50, 'limit': 50, 'factor': 2}
res = requests.get('https://api.nb.no/ngram/wildcards', params=params)
if res.status_code == 200:
result = res.json()
else:
result = {'status':'feil'}
resultat = pd.DataFrame.from_dict(result, orient='index')
if not(resultat.empty):
resultat.columns = [params['word']]
return resultat
def sorted_wildcardsearch(params):
res = wildcardsearch(params)
if not res.empty:
res = res.sort_values(by=params['word'], ascending=False)
return res
def make_newspaper_network(key, wordbag, titel='%', yearfrom='1980', yearto='1990', limit=500):
if type(wordbag) is str:
wordbag = wordbag.split()
r = requests.post("https://api.nb.no/ngram/avisgraph", json={
'key':key,
'words':wordbag,
'yearto':yearto,
'yearfrom':yearfrom,
'limit':limit})
G = nx.Graph()
if r.status_code == 200:
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > 0 and x != y])
else:
print(r.text)
return G
def make_network(urn, wordbag, cutoff=0):
if type(urn) is list:
urn = urn[0]
if type(wordbag) is str:
wordbag = wordbag.split()
G = make_network_graph(urn, wordbag, cutoff)
return G
def make_network_graph(urn, wordbag, cutoff=0):
r = requests.post("https://api.nb.no/ngram/graph", json={'urn':urn, 'words':wordbag})
G = nx.Graph()
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > cutoff and x != y])
return G
def make_network_name_graph(urn, tokens, tokenmap=None, cutoff=2):
if isinstance(urn, list):
urn = urn[0]
# tokens should be a list of list of tokens. If it is list of dicts pull out the keys (= tokens)
if isinstance(tokens[0], dict):
tokens = [list(x.keys()) for x in tokens]
r = requests.post("https://api.nb.no/ngram/word_graph", json={'urn':urn, 'tokens':tokens, 'tokenmap':tokenmap})
#print(r.text)
G = nx.Graph()
G.add_weighted_edges_from([(x,y,z) for (x,y,z) in r.json() if z > cutoff and x != y])
return G
def token_convert_back(tokens, sep='_'):
""" convert a list of tokens to string representation"""
res = [tokens[0]]
for y in tokens:
res.append([tuple(x.split(sep)) for x in y])
l = len(res)
for x in range(1, 4-l):
res.append([])
return res
def token_convert(tokens, sep='_'):
""" convert back to tuples """
tokens = [list(x.keys()) for x in tokens]
tokens = [[(x,) for x in tokens[0]], tokens[1], tokens[2], tokens[3]]
conversion = []
for x in tokens:
conversion.append([sep.join(t) for t in x])
return conversion
def token_map_to_tuples(tokens_as_strings, sep='_', arrow='==>'):
tuples = []
for x in tokens_as_strings:
token = x.split(arrow)[0].strip()
mapsto = x.split(arrow)[1].strip()
tuples.append((tuple(token.split(sep)), tuple(mapsto.split(sep))))
#tuples = [(tuple(x.split(arrow).strip()[0].split(sep)), tuple(x.split(arrow)[1].strip().split(sep))) for x in tokens_as_strings]
return tuples
def token_map(tokens, strings=False, sep='_', arrow= '==>'):
""" tokens as from nb.names()"""
if isinstance(tokens[0], dict):
# get the keys(), otherwise it is already just a list of tokens up to length 4
tokens = [list(x.keys()) for x in tokens]
# convert tokens to tuples and put them all in one list
tokens = [(x,) for x in tokens[0]] + tokens[1] + tokens[2] + tokens[3]
tm = []
#print(tokens)
for token in tokens:
if isinstance(token, str):
trep = (token,)
elif isinstance(token, list):
trep = tuple(token)
token = tuple(token)
else:
trep = token
n = len(trep)
#print(trep)
if trep[-1].endswith('s'):
cp = list(trep[:n-1])
cp.append(trep[-1][:-1])
cp = tuple(cp)
#print('copy', cp, trep)
if cp in tokens:
#print(trep, cp)
trep = cp
larger = [ts for ts in tokens if set(ts) >= set(trep)]
#print(trep, ' => ', larger)
larger.sort(key=lambda x: len(x), reverse=True)
tm.append((token,larger[0]))
res = tm
if strings == True:
res = [sep.join(x[0]) + ' ' + arrow + ' ' + sep.join(x[1]) for x in tm]
return res
def draw_graph_centrality(G, h=15, v=10, fontsize=20, k=0.2, arrows=False, font_color='black', threshold=0.01):
node_dict = nx.degree_centrality(G)
subnodes = dict({x:node_dict[x] for x in node_dict if node_dict[x] >= threshold})
x, y = rcParams['figure.figsize']
rcParams['figure.figsize'] = h, v
pos =nx.spring_layout(G, k=k)
ax = plt.subplot()
ax.set_xticks([])
ax.set_yticks([])
G = G.subgraph(subnodes)
nx.draw_networkx_labels(G, pos, font_size=fontsize, font_color=font_color)
nx.draw_networkx_nodes(G, pos, alpha=0.5, nodelist=subnodes.keys(), node_size=[v * 1000 for v in subnodes.values()])
nx.draw_networkx_edges(G, pos, alpha=0.7, arrows=arrows, edge_color='lightblue', width=1)
rcParams['figure.figsize'] = x, y
return True
def combine(clusters):
"""Make new collocation analyses from data in clusters"""
colls = []
collocates = clusters[0].collocates
for c in clusters[1:]:
collocates = collocates.join(c.collocates, rsuffix='-' + str(c.period[0]))
return collocates
def cluster_join(cluster):
clusters = [cluster[i] for i in cluster]
clst = clusters[0].cluster_set(aslist=False)
for c in clusters[1:]:
clst = clst.join(c.cluster_set(aslist=False), rsuffix = '_'+str(c.period[0]))
return clst
def serie_cluster(word, startår, sluttår, inkrement, before=5, after=5, reference=150, word_samples=500):
tidscluster = dict()
for i in range(startår, sluttår, inkrement):
tidscluster[i] = Cluster(
word,
corpus='avis',
period=(i, i + inkrement - 1),
before=after,
after=after,
reference=reference,
word_samples=word_samples)
print(i, i+inkrement - 1)
return tidscluster
def save_serie_cluster(tidscluster):
for i in tidscluster:
tidscluster[i].save()
return 'OK'
def les_serie_cluster(word, startår, sluttår, inkrement):
tcluster = dict()
for i in range(startår, sluttår, inkrement):
print(i, i+inkrement - 1)
tcluster[i] = Cluster(filename='{w}_{f}-{t}.json'.format(w=word, f=i,t=i+inkrement - 1))
return tcluster
def make_cloud(json_text, top=100, background='white', stretch=lambda x: 2**(10*x), width=500, height=500, font_path=None):
pairs0 = Counter(json_text).most_common(top)
pairs = {x[0]:stretch(x[1]) for x in pairs0}
wc = WordCloud(
font_path=font_path,
background_color=background,
width=width,
#color_func=my_colorfunc,
ranks_only=True,
height=height).generate_from_frequencies(pairs)
return wc
def draw_cloud(sky, width=20, height=20, fil=''):
plt.figure(figsize=(width,height))
plt.imshow(sky, interpolation='bilinear')
figplot = plt.gcf()
if fil != '':
figplot.savefig(fil, format='png')
return
def cloud(pd, column='', top=200, width=1000, height=1000, background='black', file='', stretch=10, font_path=None):
if column == '':
column = pd.columns[0]
data = json.loads(pd[column].to_json())
a_cloud = make_cloud(data, top=top,
background=background, font_path=font_path,
stretch=lambda x: 2**(stretch*x), width=width, height=height)
draw_cloud(a_cloud, fil=file)
return
def make_a_collocation(word, period=(1990, 2000), before=5, after=5, corpus='avis', samplesize=100, limit=2000):
collocates = collocation(word, yearfrom=period[0], yearto=period[1], before=before, after=after,
corpus=corpus, limit=limit)
collocates.columns = [word]
reference = get_corpus(yearfrom=period[0], yearto=period[1], samplesize=samplesize)
ref_agg = aggregate(reference)
ref_agg.columns = ['reference_corpus']
return ref_agg
def compute_assoc(coll_frame, column, exponent=1.1, refcolumn = 'reference_corpus'):
return pd.DataFrame(coll_frame[column]**exponent/coll_frame.mean(axis=1))
class Corpus:
def __init__(self, filename = '', target_urns = None, reference_urns = None, period = (1950,1960), author='%',
title='%', ddk='%', gender='%', subject='%', reference = 100, max_books=100):
params = {
'year':period[0],
'next': period[1]-period[0],
'subject':subject,
'ddk':ddk,
'author':author,
#'gender':gender, ser ikke ut til å virke for get_urn - sjekk opp APIet
'title':title,
'limit':max_books,
'reference':reference
}
self.params = params
self.coll = dict()
self.coll_graph = dict()
if filename == '':
if target_urns != None:
målkorpus_def = target_urns
else:
målkorpus_def = get_urn(params)
#print("Antall bøker i målkorpus ", len(målkorpus_def))
if isinstance(målkorpus_def[0], list):
målkorpus_urn = [str(x[0]) for x in målkorpus_def]
#print(målkorpus_urn)
else:
målkorpus_urn = målkorpus_def
if len(målkorpus_urn) > max_books and max_books > 0:
target_urn = list(numpy.random.choice(målkorpus_urn, max_books))
else:
target_urn = målkorpus_urn
if reference_urns != None:
referansekorpus_def = reference_urns
else:
# select from period, usually used only of target is by metadata
referansekorpus_def = get_urn({'year':period[0], 'next':period[1]-period[0], 'limit':reference})
#print("<NAME> i referanse: ", len(referansekorpus_def))
# referansen skal være distinkt fra målkorpuset
referanse_urn = [str(x[0]) for x in referansekorpus_def]
self.reference_urn = referanse_urn
self.target_urn = target_urn
# make sure there is no overlap between target and reference
#
referanse_urn = list(set(referanse_urn) - set(target_urn))
målkorpus_txt = get_corpus_text(target_urn)
normalize_corpus_dataframe(målkorpus_txt)
if referanse_urn != []:
referanse_txt = get_corpus_text(referanse_urn)
normalize_corpus_dataframe(referanse_txt)
combo = målkorpus_txt.join(referanse_txt)
else:
referanse_txt = målkorpus_txt
combo = målkorpus_txt
self.combo = combo
self.reference = referanse_txt
self.target = målkorpus_txt
self.reference = aggregate(self.reference)
self.reference.columns = ['reference_corpus']
## dokumentfrekvenser
mål_docf = pd.DataFrame(pd.DataFrame(målkorpus_txt/målkorpus_txt).sum(axis=1))
combo_docf = pd.DataFrame(pd.DataFrame(combo/combo).sum(axis=1))
ref_docf = pd.DataFrame(pd.DataFrame(referanse_txt/referanse_txt).sum(axis=1))
### Normaliser dokumentfrekvensene
normalize_corpus_dataframe(mål_docf)
normalize_corpus_dataframe(combo_docf)
normalize_corpus_dataframe(ref_docf)
self.målkorpus_tot = aggregate(målkorpus_txt)
self.combo_tot = aggregate(combo)
self.mål_docf = mål_docf
self.combo_docf = combo_docf
self.lowest = self.combo_tot.sort_values(by=0)[0][0]
else:
self.load(filename)
return
def difference(self, freq_exp=1.1, doc_exp=1.1, top = 200, aslist=True):
res = pd.DataFrame(
(self.målkorpus_tot**freq_exp/self.combo_tot)*(self.mål_docf**doc_exp/self.combo_docf)
)
res.columns = ['diff']
if top > 0:
res = res.sort_values(by=res.columns[0], ascending=False).iloc[:top]
else:
res = res.sort_values(by=res.columns[0], ascending=False)
if aslist == True:
res = HTML(', '.join(list(res.index)))
return res
def save(self, filename):
model = {
'params':self.params,
'target': self.målkorpus_tot.to_json(),
'combo': self.combo_tot.to_json(),
'target_df': self.mål_docf.to_json(),
'combo_df': self.combo_docf.to_json()
}
with open(filename, 'w', encoding = 'utf-8') as outfile:
outfile.write(json.dumps(model))
return True
def load(self, filename):
with open(filename, 'r') as infile:
try:
model = json.loads(infile.read())
#print(model['word'])
self.params = model['params']
#print(self.params)
self.målkorpus_tot = pd.read_json(model['target'])
#print(self.målkorpus_tot[:10])
self.combo_tot = pd.read_json(model['combo'])
self.mål_docf = pd.read_json(model['target_df'])
self.combo_docf = pd.read_json(model['combo_df'])
except:
print('noe gikk galt')
return True
def collocations(self, word, after=5, before=5, limit=1000):
"""Find collocations for word in a set of book URNs. Only books at the moment"""
r = requests.post(
"https://api.nb.no/ngram/urncoll",
json={
'word': word,
'urns': self.target_urn,
'after': after,
'before': before,
'limit': limit
}
)
temp = pd.DataFrame.from_dict(r.json(), orient='index')
normalize_corpus_dataframe(temp)
self.coll[word] = temp.sort_values(by = temp.columns[0], ascending = False)
return True
def conc(self, word, before=8, after=8, size=10, combo=0):
if combo == 0:
urns = self.target_urn + self.reference_urn
elif combo == 1:
urns = self.target_urn
else:
urns = self.reference_urn
if len(urns) > 300:
urns = list(numpy.random.choice(urns, 300, replace=False))
return get_urnkonk(word, {'urns':urns, 'before':before, 'after':after, 'limit':size})
def sort_collocations(self, word, comparison = None, exp = 1.0, above = None):
if comparison == None:
comparison = self.combo_tot[0]
try:
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
except KeyError:
print('Constructing a collocation for {w} with default parameters.'.format(w=word))
self.collocations(word)
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
if above == None:
above = self.lowest
res = res[self.combo_tot > above]
return res.sort_values(by = 0, ascending = False)
def search_collocations(self, word, words, comparison = None, exp = 1.0):
if comparison == None:
comparison = self.combo_tot[0]
try:
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
except KeyError:
print('Constructing a collocation for {w} with default parameters.'.format(w=word))
self.collocations(word)
res = pd.DataFrame(self.coll[word][0]**exp/comparison)
search_items = list(set(res.index) & set(words))
return res.transpose()[search_items].transpose().sort_values(by = 0, ascending = False)
def summary(self, head=10):
info = {
'parameters':self.params,
'target_urn':self.target_urn[:head],
'reference urn':self.reference_urn[:head],
}
return info
def search_words(self, words, freq_exp=1.1, doc_exp=1.1):
if type(words) is str:
words = [w.strip() for w in words.split()]
df = self.difference(freq_exp = freq_exp, doc_exp=doc_exp,top=0, aslist=False)
sub = [w for w in words if w in df.index]
res = df.transpose()[sub].transpose().sort_values(by=df.columns[0], ascending=False)
return res
def make_collocation_graph(self, target_word, top = 15, before = 4, after = 4, limit = 1000, exp=1):
"""Make a cascaded network of collocations"""
self.collocations(target_word, before=before, after=after, limit=limit)
coll = self.sort_collocations(target_word, exp = exp)
target_graf = dict()
edges = []
for word in coll[:top].index:
edges.append((target_word, word))
if word.isalpha():
self.collocations(word, before=before, after=after, limit=limit)
for w in self.sort_collocations(word, exp = exp)[:top].index:
if w.isalpha():
edges.append((word, w))
target_graph = nx.Graph()
target_graph.add_edges_from(edges)
self.coll_graph[target_word] = target_graph
return target_graph
def vekstdiagram(urn, params=None):
if params is None:
params = {}
# if urn is the value of get_urn() it is a list
# otherwise it just passes
if type(urn) is list:
urn = urn[0]
para = params
para['urn']= urn
r = requests.post('https://api.nb.no/ngram/vekstdiagram', json = para)
return pd.DataFrame(r.json())
def plot_book_wordbags(urn, wordbags, window=5000, pr = 100):
"""Generate a diagram of wordbags in book """
return plot_sammen_vekst(urn, wordbags, window=window, pr=pr)
def plot_sammen_vekst(urn, ordlister, window=5000, pr = 100):
"""Plott alle seriene sammen"""
rammer = []
c = dict()
if isinstance(ordlister, list):
if isinstance(ordlister[0], list):
for l in ordlister:
if l != []:
c[l[0]] = l
else:
c[ordlister[0]] = ordlister
else:
c = ordlister
for key in c:
vekst = vekstdiagram(urn, params = {'words': c[key], 'window':window, 'pr': pr} )
vekst.columns = [key]
rammer.append(vekst)
return pd.concat(rammer)
def spurious_names(n=300):
topwords = totals(n)
Removals = [x.capitalize() for x in topwords if x.isalpha()]
return Removals
def relaterte_ord(word, number = 20, score=False):
G = make_graph(word)
res = Counter(nx.eigenvector_centrality(G)).most_common(number)
if score == False:
res = [x[0] for x in res]
return res
def check_words(urn, ordbag):
if type(urn) is list:
urn = urn[0]
ordliste = get_freq(urn, top=50000, cutoff=0)
res = Counter()
for w in ordbag:
res[w] = ordliste[w]
for p in res.most_common():
if p[1] != 0:
print(p[0], p[1])
else:
break
return True
def nb_ngram(terms, corpus='bok', smooth=3, years=(1810, 2010), mode='relative'):
df = ngram_conv(get_ngram(terms, corpus=corpus), smooth=smooth, years=years, mode=mode)
df.index = df.index.astype(int)
return df
def get_ngram(terms, corpus='avis'):
req = requests.get(
"http://www.nb.no/sp_tjenester/beta/ngram_1/ngram/query?terms={terms}&corpus={corpus}".format(
terms=terms,
corpus=corpus
))
if req.status_code == 200:
res = req.text
else:
res = "[]"
return json.loads(res)
def ngram_conv(ngrams, smooth=1, years=(1810,2013), mode='relative'):
ngc = {}
# check if relative frequency or absolute frequency is in question
if mode.startswith('rel') or mode=='y':
arg = 'y'
else:
arg = 'f'
for x in ngrams:
if x != []:
ngc[x['key']] = {z['x']:z[arg] for z in x['values'] if int(z['x']) <= years[1] and int(z['x']) >= years[0]}
return pd.DataFrame(ngc).rolling(window=smooth, win_type='triang').mean()
def make_graph(words, lang='nob', cutoff=20, leaves=0):
"""Get galaxy from ngram-database. English and German provided by Google N-gram.
Set leaves=1 to get the leaves. Parameter cutoff only works for lang='nob'.
Specify English by setting lang='eng' and German by lang='ger'"""
params = dict()
params['terms'] = words
params['corpus'] = lang
params['limit'] = cutoff
params['leaves'] = leaves
result = requests.get("https://www.nb.no/sp_tjenester/beta/ngram_1/galaxies/query", params=params)
G = nx.DiGraph()
edgelist = []
if result.status_code == 200:
graph = json.loads(result.text)
#print(graph)
nodes = graph['nodes']
edges = graph['links']
for edge in edges:
edgelist += [(nodes[edge['source']]['name'], nodes[edge['target']]['name'], abs(edge['value']))]
#print(edgelist)
G.add_weighted_edges_from(edgelist)
return G
def urn_concordance(urns = None, word = None, size = 5, before = None, after = None ):
""" Find a concordance within a corpus as list of URNs. This is a wrapper for get_urnkonk """
# exit if list of urns is empty
if urns is None or word is None:
return []
# The URNs may be presented in different ways.
urns = pure_urn(urns)
# find values and feed everything to get_urnkonk
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'word'}
return get_urnkonk(word, query)
def konk(word, urns=None, before=5, after=5):
if urns == None:
print('URNer mangler')
return
urner = refine_book_urn(words=[word], urns=urns)
return urn_concordance(word=word, urns = sample(urner, min(20, len(urner))),before = before, after = after)
def concordance(word = None, corpus='bok', author=None, title=None, subtitle=None, lang=None, ddk=None, subject=None,
yearfrom = None, yearto=None, before=None, after=None, size=5, gender=None, offset=None, kind='html'):
if word == None:
return []
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
query = {i:values[i] for i in args if values[i] != None and i != 'word' and i != 'kind'}
return get_konk(word, query, kind=kind)
def get_konk(word, params=None, kind='html'):
if params is None:
params = {}
para = params
para['word']= word
corpus = 'bok'
if 'corpus' in para:
corpus = para['corpus']
else:
para['corpus'] = corpus
r = requests.get('https://api.nb.no/ngram/konk', params=para)
if kind=='html':
rows = ""
row_template = ("<tr>"
"<td><a href='{urn}?searchText={kw}' target='_'>{urnredux}</a></td>"
"<td>{b}</td>"
"<td>{w}</td>"
"<td style='text-align:left'>{a}</td>"
"</tr>\n")
if corpus == 'bok':
for x in r.json():
rows += row_template.format(
kw = word,
urn=x['urn'],
urnredux=','.join([x['author'], x['title'], str(x['year'])]),
b=x['before'],
w=x['word'],
a=x['after'])
else:
#print(r.json())
for x in r.json():
rows += row_template.format(
kw = word,
urn=x['urn'],
urnredux='-'.join(x['urn'].split('_')[2:6:3]),
b=x['before'],
w=x['word'],
a=x['after'])
res = "<table>{rows}</table>".format(rows=rows)
res = HTML(res)
elif kind == 'json':
res = r.json()
else:
try:
if corpus == 'bok':
res = pd.DataFrame(r.json())
res = res[['urn','author','title','year','before','word','after']]
else:
res = pd.DataFrame(r.json())
res = res[['urn','before','word','after']]
except:
res= pd.DataFrame()
#r = r.style.set_properties(subset=['after'],**{'text-align':'left'})
return res
def konk_to_html(jsonkonk):
rows = ""
row_template = ("<tr>"
"<td><a href='{urn}' target='_'>{urnredux}</a></td>"
"<td>{b}</td>"
"<td>{w}</td>"
"<td style='text-align:left'>{a}</td>"
"</tr>\n")
for x in jsonkonk:
rows += row_template.format(
urn=x['urn'], urnredux=x['urn'], b=x['before'], w=x['word'], a=x['after'])
res = "<table>{rows}</table>".format(rows=rows)
return res
def central_characters(graph, n=10):
res = Counter(nx.degree_centrality(graph)).most_common(n)
return res
def central_betweenness_characters(graph, n=10):
res = Counter(nx.betweenness_centrality(graph)).most_common(n)
return res
def get_urnkonk(word, params=None, html=True):
if params is None:
params = {}
para = params
para['word']= word
try:
para['urns'] = pure_urn(para['urns'])
except:
print('Parameter urns missing')
r = requests.post('https://api.nb.no/ngram/urnkonk', json = para)
if html:
rows = ""
for x in r.json():
rows += """<tr>
<td>
<a href='{urn}?searchText={kw}' target='_blank' style='text-decoration:none'>{urnredux}</a>
</td>
<td>{b}</td>
<td>{w}</td>
<td style='text-align:left'>{a}</td>
</tr>\n""".format(kw=word,
urn=x['urn'],
urnredux="{t}, {f}, {y}".format(t=x['title'], f=x['author'], y=x['year']),
b=x['before'],
w=x['word'],
a=x['after']
)
res = """<table>{rows}</table>""".format(rows=rows)
res = HTML(res)
else:
res = pd.DataFrame(r.json())
res = res[['urn','before','word','after']]
#r = r.style.set_properties(subset=['after'],**{'text-align':'left'})
return res
def frame(something, name = None):
"""Try to make a frame out of something and name columns according to name, which should be a string or a list of strings,
one for each column. Mismatch in numbers is taken care of."""
if isinstance(something, dict):
res = pd.DataFrame.from_dict(something, orient='index')
else:
res = | pd.DataFrame(something) | pandas.DataFrame |
import pandas as pd
import scipy.stats
import numpy as np
import datetime
pd.set_option('display.width', 1000)
pd.set_option('max.columns', 100)
class HistoricGames(object):
def __init__(self, league, season, bookmaker='BbAv'):
"""
:param league: The league for which historical games should be retrived
per football-data.co.uk standards. 'E0' for the English Premier League,
'D1' for the German Bundesliga etc.
:param season: A four digit integer or a list of four digit integers
corresponding to the final year of the season of interest.
If the season finishes in 2017, season should be 2017.
"""
self.league = league
self.season = season
self.bookmaker = bookmaker
def get_data(self):
"""
:return: A pandas DataFrame with historic results for the league and
season along with outcome probabilities for the chosen bookie.
"""
data = pd.DataFrame()
if type(self.season) == int:
seasons = [self.season]
else:
seasons = self.season
for s in seasons:
base_url = "http://www.football-data.co.uk/mmz4281/"
url = base_url + str(s - 1)[-2:] + str(s)[-2:] + '/' + self.league + '.csv'
tmp = | pd.read_csv(url) | pandas.read_csv |
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as graph #matlab versiyasi pythonun
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd #csv faylini read etmek ucun
import csv
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
#import datamodify as dat
def datatobeTaken():
data = pd.read_csv("turboazmodified.csv")
dataframe = pd.DataFrame(data, columns= ['Yurush','Qiymet','Buraxilis ili'])
yurush = data['Yurush']
qiymet = data['Qiymet']
buraxilishili = data['Buraxilish ili']
yurush = (yurush - yurush.mean()) / yurush.std()
yurush = np.c_[np.ones(yurush.shape[0]),yurush]
qiymet = (qiymet - qiymet.mean()) / qiymet.std()
buraxilishili = (buraxilishili - buraxilishili.mean()) / buraxilishili.std()
yurush.astype(float)
m = len(qiymet)
return yurush, qiymet, buraxilishili;
data = | pd.read_csv("turboazmodified.csv") | pandas.read_csv |
import rba
import copy
import pandas
import time
import numpy
import seaborn
import matplotlib.pyplot as plt
from .rba_Session import RBA_Session
from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
def find_ribosomal_proteins(rba_session, model_processes=['TranslationC', 'TranslationM'], external_annotations=None):
out = []
for i in model_processes:
out += [rba_session.ModelStructure.ProteinInfo.Elements[j]['ProtoID']
for j in list(rba_session.ModelStructure.ProcessInfo.Elements[i]['Composition'].keys()) if j in rba_session.ModelStructure.ProteinInfo.Elements.keys()]
if external_annotations is not None:
out += list(external_annotations['ID'])
return(list(set(out)))
def build_model_compartment_map(rba_session):
out = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[i]['Compartment'] for i in list(
rba_session.ModelStructure.ProteinInfo.Elements.keys())}
return(out)
def build_compartment_annotations(Compartment_Annotations_external, model_protein_compartment_map):
for i in Compartment_Annotations_external.index:
if Compartment_Annotations_external.loc[i, 'ID'] in list(model_protein_compartment_map.keys()):
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 1
else:
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 0
Compartment_Annotations_internal = pandas.DataFrame()
Compartment_Annotations_internal['ID'] = list(model_protein_compartment_map.keys())
Compartment_Annotations_internal['ModelComp'] = list(model_protein_compartment_map.values())
Compartment_Annotations = pandas.concat(
[Compartment_Annotations_internal, Compartment_Annotations_external.loc[Compartment_Annotations_external['modelproteinannotation'] == 0, ['ID', 'ModelComp']]], axis=0)
return(Compartment_Annotations)
def build_dataset_annotations(input, ID_column, Uniprot, Compartment_Annotations, model_protein_compartment_map, ribosomal_proteins):
print('riboprots-----------------')
print(ribosomal_proteins)
out = pandas.DataFrame()
for g in list(input[ID_column]):
out.loc[g, 'ID'] = g
matches = [i for i in list(Uniprot.loc[pandas.isna(
Uniprot['Gene names']) == False, 'Gene names']) if g in i]
mass_prot = numpy.nan
if len(matches) > 0:
mass_prot = len(Uniprot.loc[Uniprot['Gene names'] == matches[0], 'Sequence'].values[0])
out.loc[g, 'AA_residues'] = mass_prot
if g in list(Compartment_Annotations['ID']):
out.loc[g, 'Location'] = Compartment_Annotations.loc[Compartment_Annotations['ID']
== g, 'ModelComp'].values[0]
in_model = 0
if g in model_protein_compartment_map.keys():
in_model = 1
is_ribosomal = 0
if g in ribosomal_proteins:
is_ribosomal = 1
out.loc[g, 'InModel'] = in_model
out.loc[g, 'IsRibosomal'] = is_ribosomal
return(out)
def build_full_annotations_from_dataset_annotations(annotations_list):
out = | pandas.concat(annotations_list, axis=0) | pandas.concat |
""" Official evaluation script for v1.0 of the ComplexWebQuestions dataset. """
import unicodedata
import re
import json
import pandas as pd
def proprocess(answer):
proc_answer = unicodedata.normalize('NFKD', answer).encode('ascii', 'ignore').decode(encoding='UTF-8')
# removing common endings such as "f.c."
proc_answer = re.sub(r'\W', ' ', proc_answer).lower().strip()
# removing The, a, an from begining of answer as proposed by SQuAD dataset answer comparison
if proc_answer.startswith('the '):
proc_answer = proc_answer[4:]
if proc_answer.startswith('a '):
proc_answer = proc_answer[2:]
if proc_answer.startswith('an '):
proc_answer = proc_answer[3:]
return proc_answer
def compute_P1(matched_answers, golden_answer_list, pred_answer):
P1 = 0
if len(matched_answers) > 0:
P1 = 100
return P1
def compare_span_to_answer(spans, gold_answers, question, question_annotated=None):
""" Compares one answers to spans, multiple matches are possible
spans是预测里面的list
"""
if len(spans) == 0:
return []
found_answers = pd.DataFrame(columns=['span', 'answer', 'span_index'])
spans_series = | pd.Series(spans) | pandas.Series |
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# GH16875 coercing of bools
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_,
np.int64, object]:
arr = np.array([], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'int64')
tm.assert_almost_equal(result, np.array([], dtype=np.int64))
assert result.dtype == np.int64
def test_datetimelikes_nan(self):
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]')
res = maybe_downcast_to_dtype(arr, 'datetime64[ns]')
tm.assert_numpy_array_equal(res, exp)
exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]')
res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]')
tm.assert_numpy_array_equal(res, exp)
def test_datetime_with_timezone(self):
# GH 15426
ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
exp = DatetimeIndex([ts, ts])
res = maybe_downcast_to_dtype(exp, exp.dtype)
tm.assert_index_equal(res, exp)
res = maybe_downcast_to_dtype(exp.asi8, exp.dtype)
tm.assert_index_equal(res, exp)
class TestInferDtype(object):
def testinfer_dtype_from_scalar(self):
# Test that infer_dtype_from_scalar is returning correct dtype for int
# and float.
for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32,
np.int32, np.uint64, np.int64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == type(data)
data = 12
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.int64
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == dtypec
data = np.float(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.float64
for data in [True, False]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.bool_
for data in [np.complex64(1), np.complex128(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.complex_
for data in [np.datetime64(1, 'ns'), Timestamp(1),
datetime(2000, 1, 1, 0, 0)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'M8[ns]'
for data in [np.timedelta64(1, 'ns'), Timedelta(1),
timedelta(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'm8[ns]'
for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']:
dt = Timestamp(1, tz=tz)
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True)
assert dtype == 'datetime64[ns, {0}]'.format(tz)
assert val == dt.value
dtype, val = infer_dtype_from_scalar(dt)
assert dtype == np.object_
assert val == dt
for freq in ['M', 'D']:
p = | Period('2011-01-01', freq=freq) | pandas.Period |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from catboost import CatBoostRegressor
from tqdm import tqdm
import gc
import datetime as dt
print('Loading Properties ...')
properties2016 = pd.read_csv('../input/properties_2016.csv', low_memory = False)
properties2017 = pd.read_csv('../input/properties_2017.csv', low_memory = False)
print('Loading Train ...')
train2016 = | pd.read_csv('../input/train_2016_v2.csv', parse_dates=['transactiondate'], low_memory=False) | pandas.read_csv |
"""
Extract sampled paramaters of selected traces and prepare simulation input files with fitted parameters
Outputs:
- 2 csvs with fitting paramerers for a) single best fit and b) n best fits
- 2 csv with samples parameters that can be used as input csv for subsequent simulation (for a and b as above)
- 1 emodl with fitting parameters renamed for each grp for next simulation
- 2 batch files to submit run scenarios for either a) or b) from above
"""
import argparse
import os
import pandas as pd
import numpy as np
import sys
import subprocess
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
from simulation_helpers import shell_header
from sample_parameters import make_identifier, gen_combos
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-s",
"--stem",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default = "Local"
)
parser.add_argument(
"--traces_to_keep_ratio",
type=int,
help="Ratio of traces to keep out of all trajectories",
default=10
)
parser.add_argument(
"--traces_to_keep_min",
type=int,
help="Minimum number of traces to keep, might overwrite traces_to_keep_ratio for small simulations",
default=5
)
parser.add_argument(
"--trace_to_run",
type=str,
choices=["ntraces", "besttrace",None],
help="Whether to run single best trace or n best traces as defined in traces_to_keep_ratio ",
default=None
)
return parser.parse_args()
def modify_emodl_and_save(exp_name,output_path):
"""Reads in emodl file and renames the parameters that had been identified in exact_sample_traces
with grp_suffix to have grp specific parameters.
Assumptions:
1 - each fitting parameters occurs once or twice the lengths as the defined groups (i.e. EMS-1 to 11)
2 - if parameters occur twice for each group, they do that in repeated order (i.e. EMS-1, EMS-1, EMS-2, EMS-2 ...)
3 - duplicated group names are not wanted and removed if accidentally added (i.e. EMS-1_EMS-1)
"""
grp_list = get_grp_list(exp_name)
grp_suffix = grp_list[-1].split('_')[0]
param_cols = pd.read_csv(os.path.join(output_path, f'fitted_parameters_besttrace.csv')).columns
param_cols = [i for i in param_cols if grp_suffix in i]
param_cols_unique = param_cols
for grp in reversed(grp_list):
param_cols_unique = [col.replace(f'_{grp}', '') for col in param_cols_unique]
param_cols_unique = list(set(param_cols_unique))
emodl_name = [file for file in os.listdir(output_path) if 'emodl' in file][0].replace('.emodl','')
emodl_name_new = f'{emodl_name}_resim'
fin = open(os.path.join(output_path, f'{emodl_name}.emodl'), "rt")
emodl_txt = fin.read()
fin.close()
emodl_chunks = emodl_txt.split('@')
sample_cols=[]
for col in param_cols_unique:
col_pos = []
for i, chunk in enumerate(emodl_chunks):
if col in chunk:
col_pos = col_pos + [i]
for i, pos in enumerate(col_pos):
#print(emodl_chunks[pos])
if len(col_pos) <len(grp_list):
sample_cols = sample_cols + [col]
if len(col_pos) == len(grp_list):
emodl_chunks[pos] = f'{emodl_chunks[pos]}_{grp_list[i]}'
if len(col_pos) == len(grp_list)*2:
"""assuming if occuring twice, its the same grp in two consecutive instances"""
grp_list_dup = [grp for grp in grp_list for i in range(2)]
emodl_chunks[pos] = f'{emodl_chunks[pos]}_{grp_list_dup[i]}'
#print(emodl_chunks[pos])
emodl_txt_new = '@'.join(emodl_chunks)
for grp in grp_list:
emodl_txt_new = emodl_txt_new.replace(f'{grp}_{grp}',f'{grp}')
fin = open(os.path.join(output_path, f'{emodl_name_new}.emodl'), "w")
fin.write(emodl_txt_new)
fin.close()
def write_submission_file(trace_selection,Location, r= 'IL',model='locale'):
"""Writes batch file that copies required input csvs and emodl to the corresponding location in git_dir
Assumptions:
Running location fixed to IL for spatial model (momentarily)
"""
emodl_name = [file for file in os.listdir(output_path) if 'emodl' in file][0].replace('.emodl','')
sample_csv = f'sample_parameters_{trace_selection}.csv'
input_csv_str = f' --sample_csv {sample_csv}'
model_str = f' --model {model}'
new_exp_name = f'{exp_name}_resim_{trace_selection}'
csv_from = os.path.join(output_path, sample_csv ).replace("/","\\")
csv_to = os.path.join(git_dir,"experiment_configs","input_csv").replace ("/","\\")
emodl_from = os.path.join(output_path,emodl_name+"_resim.emodl")
emodl_to = os.path.join(git_dir,"emodl",emodl_name+"_resim.emodl").replace("/","\\")
if Location =='Local':
file = open(os.path.join(output_path, 'bat', f'00_runScenarios_{trace_selection}.bat'), 'w')
file.write(
f'copy {csv_from} {csv_to}\n'
f'copy {emodl_from} {emodl_to}\n'
f'cd {git_dir} \n python runScenarios.py -r {r} '
f'-e {str(emodl_name)}_resim.emodl -n {new_exp_name} {model_str} {input_csv_str} \npause')
file.close()
if Location =='NUCLUSTER':
csv_from = csv_from.replace("\\","/")
csv_to = csv_to.replace("\\","/")
emodl_to = emodl_to.replace("\\","/")
jobname = 'runFittedParamSim'
header = shell_header(job_name=jobname)
commands = f'\ncp {csv_from} {csv_to}\n' \
f'\ncp {emodl_from} {emodl_to}\n' \
f'\ncd {git_dir} \n python runScenarios.py -r {r} ' \
f'-e {str(emodl_name)}_resim.emodl -n {new_exp_name} {model_str} {input_csv_str}'
file = open(os.path.join(output_path,'sh', f'00_runScenarios_{trace_selection}.sh'), 'w')
file.write(header + commands)
file.close()
file = open(os.path.join(output_path, f'submit_runScenarios_{trace_selection}.sh'), 'w')
file.write(
f'cd {os.path.join(output_path,"sh")}\n'
f'sbatch 00_runScenarios_{trace_selection}.sh\n')
file.close()
def extract_sample_traces(exp_name,traces_to_keep_ratio, traces_to_keep_min):
"""Identifies parameters that vary as fitting parameters and writes them out into csvs.
Combines fitting with sample parameters to simulate 'full' simulation.
Assumption:
Parameter that wish to no be grp specific were fixed
(could be aggregated before fitting, which needs to be edited together with trace_selection.py)
"""
df_samples = pd.read_csv(os.path.join(output_path, 'sampled_parameters.csv'))
"""Drop parameter columns that have equal values in all scenarios (rows) to assess fitted parameters"""
nunique = df_samples.apply(pd.Series.nunique)
cols_to_drop = nunique[nunique == 1].index
df_samples = df_samples.drop(cols_to_drop, axis=1)
grp_list = get_grp_list(exp_name)
df_traces = pd.DataFrame()
for grp in grp_list:
grp_nr = grp.split('_')[-1]
grp_suffix= grp.split('_')[0]
"""Drop parameters that correspond to other regions"""
grp_channels = [i for i in df_samples.columns if grp_suffix in i]
grp_cols_to_drop = [i for i in grp_channels if grp_nr != i.split('_')[-1]]
df_samples_sub = df_samples.drop(grp_cols_to_drop, axis=1)
rank_export_df = pd.read_csv(os.path.join(output_path, f'traces_ranked_region_{str(grp_nr)}.csv'))
n_traces_to_keep = int(len(rank_export_df) / traces_to_keep_ratio)
if n_traces_to_keep < traces_to_keep_min and len(rank_export_df) >= traces_to_keep_min:
n_traces_to_keep = traces_to_keep_min
if len(rank_export_df) < traces_to_keep_min:
n_traces_to_keep = len(rank_export_df)
df_samples_sub = | pd.merge(how='left', left=rank_export_df[['scen_num','norm_rank']], left_on=['scen_num'], right=df_samples_sub, right_on=['scen_num']) | pandas.merge |
Subsets and Splits