prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from results.test_dicts import marker_styles, draw_order, metric_ylim, best_function, is_metric_increasing, \
metric_short_name, metric_en_name
excluded_methods = ['Simple split']
RESULTS_ROOT_DIR = 'detailed_results'
ROOT_DIR = 'std_out'
SCENARIO = ''
BOXPLOTS_ROOT_DIR = os.path.join(ROOT_DIR, 'boxplots')
BOXPLOTS_SVG_ROOT_DIR = os.path.join(BOXPLOTS_ROOT_DIR, 'svg')
DETAILED_PLOTS_ROOT_DIR = os.path.join(ROOT_DIR, 'detailed_plots')
DETAILED_PLOTS_SVG_ROOT_DIR = os.path.join(DETAILED_PLOTS_ROOT_DIR, 'svg')
LATEX_ROOT_DIR = os.path.join(ROOT_DIR, 'latex')
CSV_ROOT_DIR = os.path.join(ROOT_DIR, 'csv')
# PLOTS_ROOT_DIR = os.path.join(ROOT_DIR, 'plots')
def read_results(root_dir):
oracle_dfs = []
metrics_dfs = []
for dataset in os.listdir(root_dir):
for file in os.listdir(os.path.join(root_dir, dataset)):
if os.path.isdir(os.path.join(root_dir, dataset, file)): # oracle metrics
for oracle_file in os.listdir(os.path.join(root_dir, dataset, file)):
df = pd.read_csv(os.path.join(root_dir, dataset, file, oracle_file))
oracle_dfs.append(df)
else: # method metrics
df = pd.read_csv(os.path.join(root_dir, dataset, file))
metrics_dfs.append(df)
oracle_df = pd.concat(oracle_dfs)
metrics_df = | pd.concat(metrics_dfs) | pandas.concat |
import sys
from intopt_energy_mlp import intopt_energy
sys.path.insert(0,'../..')
sys.path.insert(0,"../../Interior")
sys.path.insert(0,"../../EnergyCost")
from intopt_energy_mlp import *
from KnapsackSolving import *
from get_energy import *
from ICON import *
import itertools
import scipy as sp
import numpy as np
import time,datetime
import pandas as pd
import logging
from scipy.stats import poisson
from get_energy import get_energy
import time,datetime
import logging
from get_energy import get_energy
import time,datetime
import logging
from scipy.stats import expon
from scipy.stats import beta
from scipy.stats import poisson
if __name__ == '__main__':
formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename='ICONexp.log', level=logging.INFO,format=formatter)
(X_1gtrain, y_train, X_1gtest, y_test) = get_energy("prices2013.dat")
X_1gvalidation = X_1gtest[0:2880,:]
y_validation = y_test[0:2880]
y_test= y_test[2880:]
X_1gtest = X_1gtest[2880:,:]
weights = [[1 for i in range(48)]]
weights = np.array(weights)
X_1gtrain = X_1gtrain[:,1:]
X_1gvalidation = X_1gvalidation[:,1:]
X_1gtest = X_1gtest[:,1:]
file = "../../EnergyCost/load1/instance34.txt"
param = data_reading(file)
## twostage
# clf = twostage_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=1,
# optimizer= optim.SGD, lr=0.1,num_layers=1,epochs=3,validation_relax=False)
# clf.fit(X_1gtrain,y_train)
# test_rslt = clf.validation_result(X_1gtest,y_test)
#
# two_stage_rslt = {'model':'Two-stage','MSE-loss':test_rslt [1],'Regret':test_rslt[0]}
#
# # SPO
# clf = SPO_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=1,
# optimizer= optim.Adam, lr=0.7,num_layers=1,epochs=5,validation_relax=False)
# clf.fit(X_1gtrain,y_train)
# test_rslt = clf.validation_result(X_1gtest,y_test)
# spo_rslt = {'model':'SPO','MSE-loss':test_rslt [1],'Regret':test_rslt[0] }
## Intopt HSD
clf = intopt_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=1,
optimizer= optim.Adam, lr=0.7,num_layers=1,epochs=8,
damping= 1e-6,thr = 0.1,validation_relax=False)
clf.fit(X_1gtrain,y_train)
test_rslt = clf.validation_result(X_1gtest,y_test)
intopt_rslt = {'model':'IntOpt','MSE-loss':test_rslt [1],'Regret':test_rslt[0]}
# QPT
clf = qptl_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=1,num_layers=1,
optimizer= optim.Adam, lr=0.1,epochs= 6,tau=100000,validation_relax=False)
clf.fit(X_1gtrain,y_train,X_test= X_1gtest,y_test= y_test)
test_rslt = clf.validation_result(X_1gtest,y_test)
qpt_rslt = {'model':'QPTL','MSE-loss':test_rslt [1],'Regret':test_rslt[0] }
rslt= pd.DataFrame([two_stage_rslt,spo_rslt, qpt_rslt,intopt_rslt ])
with open("layer0Result.csv", 'a') as f:
rslt.to_csv(f,index=False, header=f.tell()==0)
# layer-1
# twostage
clf = twostage_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=100,
optimizer= optim.Adam, lr=0.01,num_layers=2,epochs=15,validation_relax=False)
clf.fit(X_1gtrain,y_train)
test_rslt = clf.validation_result(X_1gtest,y_test)
two_stage_rslt = {'model':'Two-stage','MSE-loss':test_rslt [1],'Regret':test_rslt[0]}
# SPO
clf = SPO_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=100,
optimizer= optim.Adam, lr=0.1,num_layers=2,epochs=5,validation_relax=False)
clf.fit(X_1gtrain,y_train)
test_rslt = clf.validation_result(X_1gtest,y_test)
spo_rslt = {'model':'SPO','MSE-loss':test_rslt [1],'Regret':test_rslt[0] }
# Intopt HSD
clf = intopt_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=100,
optimizer= optim.Adam, lr=0.1,num_layers=2,epochs=8,
damping=0.00001,thr = 0.1,validation_relax=False)
clf.fit(X_1gtrain,y_train)
test_rslt = clf.validation_result(X_1gtest,y_test)
intopt_rslt = {'model':'IntOpt','MSE-loss':test_rslt [1],'Regret':test_rslt[0] }
# QPT
clf = qptl_energy(input_size=X_1gtrain.shape[1], param=param,hidden_size=100,num_layers=2,
optimizer= optim.Adam, lr=0.1,epochs=6,tau=100000,validation_relax=False)
clf.fit(X_1gtrain,y_train)
test_rslt = clf.validation_result(X_1gtest,y_test)
qpt_rslt = {'model':'QPTL','MSE-loss':test_rslt [1],'Regret':test_rslt[0] }
rslt= | pd.DataFrame([two_stage_rslt, spo_rslt,qpt_rslt,intopt_rslt]) | pandas.DataFrame |
import json
import pandas as pd
import time
"""
需要一下文件:
1、预测的json:bbox_level{}_test_results.json
2、test集的json:test.json
3、sample_submission.csv
"""
LABLE_LEVEL = 4
SCORE_THRESHOLD = 0.001
def json_to_dict(json_file_dir):
with open(json_file_dir, "r") as json_file:
json_dict = json.load(json_file)
json_file.close()
return json_dict
def get_threshold_result_list(label_level=LABLE_LEVEL, score_threshold=SCORE_THRESHOLD):
detect_result_list = json_to_dict('bbox_level{}_test_results.json'.format(label_level)) # detect_result_list 是一个list # {'image_id': 2020005391, 'category_id': 43, 'bbox': [150.59866333007812, 332.810791015625, 370.6794128417969, 480.145263671875], 'score': 0.007447981275618076}
result_Threshold_list = []
for result in detect_result_list:
if result['score'] > score_threshold:
result_Threshold_list.append(result)
print("There are {} bboxes".format(len(result_Threshold_list)))
return result_Threshold_list
def get_images_categories_info(label_level=LABLE_LEVEL):
image_name_id_dict = {}
image_id_name_dict = {}
image_id_WH_dict = {}
original_id_dict = {}
id_original_dict = {}
images_and_categories_dict = json_to_dict('test.json')
images = images_and_categories_dict['images']
categories = images_and_categories_dict['categories']
for i in images:
image_name_id_dict[i['file_name']] = i['id']
image_id_name_dict[i['id']] = i['file_name']
image_id_WH_dict[i['id']] = [i["width"], i["height"]] # 是一个list
for i in categories:
original_id_dict[i["original_id"]] = i['id']
id_original_dict[i['id']] = i['original_id']
return image_name_id_dict, image_id_name_dict, image_id_WH_dict, original_id_dict, id_original_dict
def write_jsonresult_to_csv():
ImageId = []
PredictionString = []
result_Threshold_list = get_threshold_result_list(LABLE_LEVEL, SCORE_THRESHOLD)
_, image_id_name_dict, image_id_WH_dict, _, id_original_dict = get_images_categories_info(label_level=LABLE_LEVEL)
for bbox in result_Threshold_list:
image_id = bbox['image_id']
ImageId.append(image_id_name_dict[image_id][:-4])
image_W = image_id_WH_dict[image_id][0]
image_H = image_id_WH_dict[image_id][1]
bbox_xmin = bbox['bbox'][0]/image_W
bbox_ymin = bbox['bbox'][1]/image_H
bbox_xmax = (bbox['bbox'][0] + bbox['bbox'][2])/image_W
bbox_ymax = (bbox['bbox'][1] + bbox['bbox'][3])/image_H
original_label = id_original_dict[bbox['category_id']]
Confidence = bbox['score']
predictionstring = original_label + ' ' + str(Confidence) + ' ' + str(bbox_xmin)+ ' ' + str(bbox_ymin)+ ' ' + str(bbox_xmax)+ ' ' + str(bbox_ymax) + ' '
PredictionString.append(predictionstring)
print(len(ImageId))
print(len(PredictionString))
sample_csv = pd.read_csv('sample_submission.csv')
sample_csv["PredictionString"] = ""
# sample_ImageId = sample_csv["ImageId"].values.tolist()
series_imageid = pd.Series(ImageId)
series_predictionstring = pd.Series(PredictionString)
# 写入并按图片名合并结果
data = {'ImageId':series_imageid, "PredictionString":series_predictionstring}
df = pd.DataFrame(data)
df = pd.concat([sample_csv, df], ignore_index=True)
df = df.groupby(by="ImageId")['PredictionString'].sum()
series_imageid = df.index.tolist()
series_imageid = | pd.Series(series_imageid) | pandas.Series |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
from enum import Enum
from typing import Union, Iterable
import datetime as dt
from gs_quant.data import Dataset
import pandas as pd
import pytz
import numpy as np
from gs_quant.data import DataFrequency
class MissingDataStrategy(Enum):
fill_forward = 'fill_forward'
interpolate = 'interpolate'
fail = 'fail'
class DataSource:
def get_data(self, state):
raise RuntimeError("Implemented by subclass")
class GsDataSource(DataSource):
def __init__(self, data_set: str, asset_id: str, min_date: datetime.date = None, max_date: datetime.date = None,
value_header: str = 'rate'):
self._data_set = data_set
self._asset_id = asset_id
self._min_date = min_date
self._max_date = max_date
self._value_header = value_header
self._loaded_data = None
def get_data(self, state: Union[datetime.date, datetime.datetime] = None):
if self._loaded_data is None:
ds = Dataset(self._data_set)
if self._min_date:
self._loaded_data = ds.get_data(self._min_date, self._max_date, assetId=(self._asset_id,))
else:
return ds.get_data(state, state, assetId=(self._asset_id,))[self._value_header]
return self._loaded_data[self._value_header].at[pd.to_datetime(state)]
class GenericDataSource(DataSource):
def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy = MissingDataStrategy.fail):
"""
A data source which holds a pandas series indexed by date or datetime
:param data_set: a pandas dataframe indexed by date or datetime
:param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take
effect if using get_data, gat_data_range has no expectations of the number of
expected data points.
"""
self._data_set = data_set
self._missing_data_strategy = missing_data_strategy
self._tz_aware = isinstance(self._data_set.index[0],
datetime.datetime) and self._data_set.index[0].tzinfo is not None
if self._missing_data_strategy == MissingDataStrategy.interpolate:
self._data_set.interpolate()
elif self._missing_data_strategy == MissingDataStrategy.fill_forward:
self._data_set.ffill()
def get_data(self, state: Union[datetime.date, datetime.datetime, Iterable]):
"""
Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value
:param state: a date, datetime or a list of dates or datetimes
:return: float value
"""
if isinstance(state, Iterable):
return [self.get_data(i) for i in state]
if self._tz_aware:
state = pytz.utc.localize(state)
if pd.Timestamp(state) in self._data_set:
return self._data_set[ | pd.Timestamp(state) | pandas.Timestamp |
'''
Created on Jan 11, 2016
@author: jch
'''
import numpy
import pandas
from collections import Mapping, OrderedDict
from blocks.log.log import TrainingLogBase
class _TimeSlice(Mapping):
def __init__(self, time, log):
self._time = time
self._columns = log._columns
assert isinstance(self._columns, OrderedDict)
def __getitem__(self, item):
ndarr = self._columns[item]
time = self._time
idx = ndarr['idx'].searchsorted(time)
if idx < ndarr.shape[0]:
row = ndarr[idx]
if row['idx'] == time:
return row['val']
raise KeyError
def __iter__(self):
time = self._time
for k, ndarr in self._columns.iteritems():
times = ndarr['idx']
idx = times.searchsorted(time)
if idx < times.shape[0] and times[idx] == time:
yield k
def __len__(self):
l = 0
time = self._time
for ndarr in self._columns.itervalues():
times = ndarr['idx']
idx = times.searchsorted(time)
if idx < times.shape[0] and times[idx] == time:
l += 1
return l
class NDarrayLog(TrainingLogBase):
"""Better Training log
Columns are stored as ndarrays. Binary search is used to find
historical times.
"""
def get_dtype(self, obj):
if hasattr(obj, 'dtype'):
return (obj.dtype, obj.shape)
DTYPES = {
int: numpy.int,
float: numpy.float,
bool: numpy.bool}
return DTYPES.get(type(obj), numpy.dtype('object'))
def __init__(self):
self._columns = OrderedDict()
self._col_tops = {}
self.status = {}
self._current_time = 0
self._current_dict = {}
TrainingLogBase.__init__(self)
def __getitem__(self, time):
self._check_time(time)
if time == self._current_time:
return self._current_dict
elif time > self._current_time:
# Append the last value to column arrays
for k, v in self._current_dict.iteritems():
if k in self._columns:
col = self._columns[k]
if col.dtype[1] != self.get_dtype(v):
new_dtype = [
('idx', col.dtype[0]),
('val', numpy.promote_types(col.dtype[1],
self.get_dtype(v)))
]
self._columns[k] = col.astype(new_dtype, copy=False)
col = self._columns[k]
idx = self._col_tops[k]
self._col_tops[k] = idx + 1
if idx >= col.shape[0]:
col2 = numpy.empty((1.3 * idx), col.dtype)
col2[:idx] = col
col2[idx:]['idx'] = 2147483647
col = col2
self._columns[k] = col2
col[idx] = (self._current_time, v)
else:
self._columns[k] = numpy.empty(
(10,),
dtype=[('idx', numpy.int32),
('val', self.get_dtype(v))])
self._columns[k]['idx'][:] = 2147483647
self._columns[k][0] = (self._current_time, v)
self._col_tops[k] = 1
self._current_time = time
self._current_dict = {}
return self._current_dict
else:
return _TimeSlice(time, self)
def __setitem__(self, time, value):
self._check_time(time)
if time == self._current_time:
self._current_dict = value
else:
raise KeyError("Can't modify log entries for the past")
def to_pandas(self):
"""
Return a pandas DataFrame view of the log.
"""
# Write down the last record
if self._current_dict:
# Executes if self._current_dict has uncommitted chages
unused_dict = self[self._current_time + 1]
series = {}
for name, col in self._columns.iteritems():
col = col[:self._col_tops[name]].copy()
if col['val'].ndim == 1:
dtype = col['val'].dtype
data = col['val']
else:
dtype = 'object'
data = list(col['val'])
s = | pandas.Series(data, index=col['idx'], dtype=dtype) | pandas.Series |
### Twitter Data Tools
## <NAME>
## Created: 8/15/2018
## Updated: 8/23/2018
import os
import re
import sys
import math
import nltk
import errno
import tarfile
import unidecode
import numpy as np
import pandas as pd
import subprocess as sb
def get_id_sets(data):
parent = list(data['tweet']['tweet_id']['parent'].keys())
retweet = list(data['tweet']['tweet_id']['retweet'].keys())
reply = list(data['tweet']['tweet_id']['reply'].keys())
replies = []
for i in reply:
replies.extend(data['tweet']['tweet_id']['reply'][i])
replies = np.unique(replies)
normal = list(set(parent) - set(retweet) - set(reply) - set(replies))
sets_ids = [retweet,reply,replies,normal]
sets_label = ['retweet','reply','replies','normal']
tweet_id_sets = {}
code = {}
for i, j in enumerate(sets_label):
tweet_id_sets[j] = sets_ids[i]
for k in sets_ids[i]:
if k not in code.keys():
code[k] = np.zeros(len(sets_label),dtype=int)
code[k][i] = 1
else:
code[k][i] = 1
tweet_id_sets['set-code'] = code
tweet_id_sets['set-label'] = sets_label
return tweet_id_sets
def read(STDVID,date_range):
try:
directory = '/twitter-tweets-stream/'+STDVID+'-processed/'
filename = 'keywordStream'+STDVID+'Tweets_'+date_range+'_processed.npy.tar.gz'
filename_tar = directory+filename
filename_tar_directory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+filename_tar
file0_tar = tarfile.open(filename_tar_directory)
file0_tar.extract(filename.replace('.tar.gz',''))
out = np.load(filename.replace('.tar.gz',''),allow_pickle=True).item()
os.remove(filename.replace('.tar.gz',''))
file0_tar.close()
except FileNotFoundError:
try:
directory = '/raw-data/twitter-tweets-stream/'+STDVID+'-processed/'
filename = 'keywordStream'+STDVID+'Tweets_'+date_range+'_processed.npy.tar.gz'
filename_tar = directory+filename
filename_tar_directory = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+filename_tar
file0_tar = tarfile.open(filename_tar_directory)
file0_tar.extract(filename.replace('.tar.gz',''))
out = np.load(filename.replace('.tar.gz',''),allow_pickle=True).item()
os.remove(filename.replace('.tar.gz',''))
file0_tar.close()
except:
print('Error: The '+STDVID+'-processed directory can not be found or the dataset does not exist anywhere.')
print('Please refer to the README.md file.')
try:
sys.exit()
except SystemExit:
sys.exit
# tweet id sets
tweet_id_sets = get_id_sets(out)
return out, tweet_id_sets
def tweet_id_categories(table,dtype=str):
tweet_ids = {}
if dtype != str:
tweet_ids['retweets'] = table.index[table['RTT'] == 1].tolist()
tweet_ids['replied'] = table.index[table['RPT'] == 1].tolist()
tweet_ids['replies'] = table.index[table['TRP'] == 1].tolist()
tweet_ids['normal'] = table.index[table['NRT'] == 1].tolist()
else:
tweet_ids['retweets'] = table.index[table['RTT'] == '1'].tolist()
tweet_ids['replied'] = table.index[table['RPT'] == '1'].tolist()
tweet_ids['replies'] = table.index[table['TRP'] == '1'].tolist()
tweet_ids['normal'] = table.index[table['NRT'] == '1'].tolist()
return tweet_ids
def read_csv(stream,date_range,dtype=str,subset=False,which_set='subset',which_set_params=[]):
out_T = None
out_U = None
if subset == False:
try:
directory_1 = stream+'-tabulated/'
pathway_T = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+directory_1+stream+'-processed-'+date_range+'_tabulated-tweet.csv.gz'
pathway_U = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+directory_1+stream+'-processed-'+date_range+'_tabulated-user.csv.gz'
out_T = | pd.read_csv(pathway_T,compression='gzip',sep=',',index_col=0,header=0,dtype=str) | pandas.read_csv |
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import pandas as pd
import datetime
import os
def crawling(id_, page, lastupdate=None):
headers = {
'authority': 'feedback.aliexpress.com',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'origin': 'https://feedback.aliexpress.com',
'content-type': 'application/x-www-form-urlencoded',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-gpc': '1',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://feedback.aliexpress.com/display/productEvaluation.htm',
'accept-language': 'zh-CN,zh;q=0.9',
}
data = {
'ownerMemberId': '235231370',
'memberType': 'seller',
'productId': str(id_),
'companyId': '',
'evaStarFilterValue': 'all Stars',
'evaSortValue': 'sortlarest@feedback',
'page': str(page),
'currentPage': str(page),
'startValidDate': '',
'i18n': 'true',
'withPictures': 'false',
'withAdditionalFeedback': 'false',
'onlyFromMyCountry': 'false',
'version': '',
'isOpened': 'true',
'translate': ' Y ',
'jumpToTop': 'true',
'v': '2'
}
response = requests.post('https://feedback.aliexpress.com/display/productEvaluation.htm', headers=headers, data=data)
soup = BeautifulSoup(response.content, 'html.parser')
if lastupdate != None:
dates = [datetime.datetime.strptime(temp.text, '%d %b %Y %H:%M').replace(hour=0, minute=0) for temp in soup.find_all('span', class_='r-time-new') if datetime.datetime.strptime(temp.text, '%d %b %Y %H:%M') >= lastupdate]
else:
dates = [datetime.datetime.strptime(temp.text, '%d %b %Y %H:%M').replace(hour=0, minute=0) for temp in soup.find_all('span', class_='r-time-new')]
countries = [temp.text for temp in soup.find_all('div', class_='user-country')][:len(dates)]
return dates, countries
def save_file(company, name, id_, df, now):
idx = | pd.date_range(df.index[0], df.index[-1]) | pandas.date_range |
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import numpy as np
import pandas as pd
import warnings
from sklearn.linear_model import LinearRegression
import scipy.cluster.hierarchy as sch
import datetime
import random
class backtest_model:
"""
Given a user-defined portfolio construction strategy (a function that takes in stock-related data and returns portfolio weights) and
the data that the user wish the strategy to be tested on, calculate several evaluation metrics of the portfolio, including
net_returns, sharpe ratio, certainty equivalent returns, turnover, etc.
Various inputs can be modified to suit the needs of strategy and backtesting scenarios, such as price-impact models,
transaction costs, etc.
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param trace_back: indicate whether the strategy need to trace back to past portfolios to function. Note: please handle the boundary situation where past portfolios is empty in the strategy function
:type trace_back: bool
:param name: name of the strategy to be tested
:type name: str
:param missing_val : indicate whether user strategy function can handle missing values in the data on its own. True means the function can deal with missing values. False means it cannot
:type missing_val: bool
"""
def __init__(self, strategy, involved_data_type, need_extra_data=False, trace_back=False, name='Unnamed', missing_val=False):
"""
Initiate the model with the strategy function, and clarify involved data types needed, whose sequence MUST be consistent
with that of the list of dataframes used inside strategy function
:param strategy: user-defined function that serves as portfolio construction strategy
:type strategy: function
:param involved_data_type: a list of strings that indicate the type of data {'price','return','ex_return'} used in the strategy, the order of the strings will be the order that data are passed to the strategy
:type involved_data_type: list
:param need_extra_data: indicate whether the strategy need extra_data (data other than {'price','return','ex_return'}) to function. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type need_extra_data: bool
:param trace_back: indicate whether the strategy need to trace back to past portfolios to function. Note: please handle the boundary situation where past portfolios is empty in the strategy function
:type trace_back: bool
:param name: name of the strategy to be tested
:type name: str
:param missing_val : indicate whether user strategy function can handle missing values in the data on its own. True means the function can deal with missing values. False means it cannot. A wrapper function would be applied to the strategy function to deal with missing data. It will only pass in columns with full data and assign to other assets weight 0 while keeping the relative position the same. Warning: 1. The wrapper will slow the running speed significantly. 2. The wrapper does not cover missing data in "extra_data"..
:type missing_val: bool
"""
def wrapper(function, list_df, extra_data=pd.DataFrame(), historical_portfolios=pd.DataFrame()):
length = list_df[0].shape[1]
for frame in list_df:
if length >= len(frame.columns[frame.isna().any() == False]):
length = len(frame.columns[frame.isna().any() == False])
position_nan = frame.isna().any().values
w = np.zeros(list_df[0].shape[1])
if need_extra_data:
if trace_back:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],extra_data, historical_portfolios)
else:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],extra_data)
else:
if trace_back:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df],historical_portfolios)
else:
w[position_nan == False] = function([frame[frame.columns[position_nan == False]] for frame in list_df])
return w
if not missing_val:
if name not in ['naive allocation portfolio',
'inverse variance allocation portfolio',
'min. variance allocation portfolio',
'basic mean-variance allocation portfolio',
'Fama-French 3-factor model portfolio',
'hierarchical-risk-parity portfolio',
'Bayes_Stein_shrinkage portfolio']:
warnings.warn('The library will deal with missing data. Running speed will be significantly reduced!')
if need_extra_data:
if trace_back:
self.__strategy = lambda x,y,z: wrapper(strategy, x,extra_data=y,historical_portfolios=z)
else:
self.__strategy = lambda x,y: wrapper(strategy, x,extra_data=y)
else:
if trace_back:
self.__strategy = lambda x,z: wrapper(strategy, x,historical_portfolios=z)
else:
self.__strategy = lambda x: wrapper(strategy, x)
else:
self.__strategy = strategy
if type(involved_data_type) != list:
raise Exception('"involved_data_type" must be given in a list')
else:
self.__involved_data_type = involved_data_type
if type(need_extra_data) != bool:
raise Exception('"need_extra_data" must be a bool variable')
else:
self.__need_extra_data = need_extra_data
if type(trace_back) != bool:
raise Exception('"trace_back" must be a bool variable')
else:
self.__trace_back = trace_back
if type(name) != str:
raise Exception('"name" must be a string variable')
else:
self.name = name
self.__last_test_frequency = None
self.__last_test_portfolios = None
self.__price_impact = False
self.__sharpe = None
self.__ceq = None
self.__average_turnover = None
self.__total_turnover = None
self.__net_returns = None
self.__net_excess_returns = None
# function to prepare data, including change of frequency, convert between price, return and ex_return
def __prepare_data(self, data, freq_data, data_type, rf, interval, window, freq_strategy,
volume=pd.DataFrame(), price_impact=False):
if not isinstance(data, pd.DataFrame):
raise Exception('Please provide correct format of test data!')
try:
data.index = pd.to_datetime(data.index)
except:
print(
'Invalid index provided in your test data, please make sure that index is in compatible datetime format')
volume.index = pd.to_datetime(volume.index)
data = data.copy()
if data_type == 'return':
if freq_data != freq_strategy:
warnings.warn(
'data_type==return with interval>1 or change of frequency, Expect large amount of computational error')
data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
data = (1 + data).apply(lambda x: np.cumprod(x))
data = data.resample(freq_strategy).ffill().fillna(method='ffill').pct_change(fill_method=None).dropna(axis=0, how='all')
normal_return_df = data.iloc[:,:-1]
risk_free_df=data.iloc[:,-1]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0).dropna(axis=0, how='all')
return (normal_return_df, excess_return_df, risk_free_df,
pd.DataFrame(index=normal_return_df.index))
else:
normal_return_df = data
excess_return_df = normal_return_df.sub(rf.values, axis=0)
return (normal_return_df, excess_return_df, rf.loc[normal_return_df.index],
pd.DataFrame(index=normal_return_df.index))
elif data_type == 'ex_return':
if freq_data != freq_strategy:
warnings.warn(
'data_type==ex_return with interval>1 or change of frequency, Expect large amount of computational error')
data = data.add(rf, axis=0)
data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
data = (1 + data).apply(lambda x: np.cumprod(x))
data = data.resample(freq_strategy).ffill().fillna(method='ffill').pct_change(fill_method=None).dropna(axis=0, how='all')
normal_return_df = data.iloc[:, :-1]
risk_free_df = data.iloc[:, -1]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0).dropna(axis=0, how='all')
return (normal_return_df, excess_return_df, risk_free_df,
pd.DataFrame(index=normal_return_df.index))
else:
excess_return_df = data
normal_return_df = excess_return_df.add(rf, axis=0)
return (normal_return_df, excess_return_df, rf.loc[normal_return_df.index],
pd.DataFrame(index=normal_return_df.index))
elif data_type == 'price':
#data['###rf'] = rf # add 'rf' to the dataframe to go through transformation together
rf_df=np.cumprod(1+rf)
if freq_data != freq_strategy:
data = data.resample(freq_strategy).ffill().fillna(method='ffill')
rf_df=rf_df.resample(freq_strategy).ffill().fillna(method='ffill')
if price_impact:
volume = volume.resample(freq_strategy).mean()
normal_return_df = data.pct_change(fill_method=None).dropna(axis=0, how='all')
risk_free_df=rf_df.pct_change(fill_method=None).dropna(axis=0,how='all').loc[normal_return_df.index]
excess_return_df = normal_return_df.sub(risk_free_df.values, axis=0)
if price_impact:
return (normal_return_df, excess_return_df, volume.loc[normal_return_df.index],
risk_free_df,
data.loc[normal_return_df.index])
else:
return (normal_return_df, excess_return_df, risk_free_df,
data.loc[normal_return_df.index])
# rebalance function to be applied to each rolling window of length (window)
def __rebalance(self, ex_return_df, normal_return_df, price_df, window, extra_data=None):
historical_portfolios = []
map = {'price': price_df, 'ex_return': ex_return_df, 'return': normal_return_df}
if self.__need_extra_data:
if self.__trace_back:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
extra_data.loc[df.index],
historical_portfolios))
else:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
extra_data.loc[df.index]))
else:
if self.__trace_back:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type],
historical_portfolios))
else:
for df in ex_return_df.rolling(window):
if df.shape[0] >= window:
historical_portfolios.append(
self.__strategy([map[i].loc[df.index] for i in self.__involved_data_type]))
return historical_portfolios
def __test_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, volume, c, initial_wealth, extra_data, price_impact_model='default',power=0.6):
# prepare data
normal_return_df, excess_return_df, volume, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy,
volume,
price_impact=True)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T - 2: # 2 here can change later
raise Exception(
'Too few samples to test on will result in poor performance : reduce window or decrease interval or '
'increase length of data')
# apply rolling windows with __rebalance
portfolios = self.__rebalance(excess_return_df, normal_return_df, price_df, window, extra_data)
try:
assert sum(portfolios[0]) <= 1 + 0.000001
except:
raise Exception(
'Please make sure your strategy builds a portfolios whose sum of weights does not exceed 1!')
portfolios = pd.DataFrame(portfolios).iloc[::interval]
# save the portfolios for calling
self.__last_test_portfolios = portfolios.set_axis(excess_return_df.columns.values, axis='columns').set_axis(
excess_return_df.iloc[window - 1::interval].index.values, axis='index')
if interval > 1:
if price_df.empty:
df=normal_return_df.join(risk_free_rate)
df=(1+df.iloc[window-1:]).apply(lambda x:np.cumprod(x)).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
normal_return_df=df.iloc[:,:-1]
risk_free_rate=df.iloc[:,-1]
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[window - 1::interval].iloc[1:]
else:
price_df = price_df.iloc[window - 1::interval]
normal_return_df=price_df.pct_change(fill_method=None).dropna(axis=0,how='all')
risk_free_rate=np.cumprod(1+risk_free_rate[window-1:]).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
excess_return_df=normal_return_df.sub(risk_free_rate.values, axis=0)
price_df=price_df.iloc[1:]
else:
excess_return_df = excess_return_df.iloc[window:]
normal_return_df = normal_return_df.iloc[window:]
risk_free_rate = risk_free_rate.iloc[window:]
price_df = price_df.iloc[window:]
# pre_balance portfolios that serves as denominators
pre_balance_portfolios = (1 + normal_return_df).mul(portfolios.iloc[:-1].values)
# turnover
# normalise portfolio weight before rebalancing at the start of each period
# note that turnover ratio is not affected by price-impact model
pre_balance_portfolios = pre_balance_portfolios.div(pre_balance_portfolios.sum(axis=1).values, axis=0)
diff = (portfolios.iloc[1:].sub(pre_balance_portfolios.values)).dropna(axis=0, how='all')
self.__total_turnover = abs(diff).sum(axis=1).sum()
self.__average_turnover = self.__total_turnover / (T - window)
# pre_balance portfolios that serves as nominators
pre_balance_portfolios_2 = (1 + normal_return_df.iloc[1:]).mul(portfolios.iloc[1:-1].values)
# factor in the initial_wealth for all 'diff','portfolios'
portfolios *= initial_wealth
pre_balance_portfolios *= initial_wealth
pre_balance_portfolios_2 *= initial_wealth
diff *= initial_wealth
# transform volume to average volume
volume = volume.rolling(window).mean().dropna(axis=0, how='all').fillna(method='ffill').loc[normal_return_df.index]
# evolution of money account
pre_balance_money = np.zeros(risk_free_rate.shape[0])
# Money account value after each period, before rebalancing
pi_models = {'default': {'buy': 1 + c * (diff[diff > 0].div((volume * price_df).values)) ** power,
'sell': 1 - c * (abs(diff[diff < 0]).div((volume * price_df).values)) ** power}}
pi_buy, pi_sell = pi_models[price_impact_model]['buy'], pi_models[price_impact_model]['sell']
# sell = ((abs(diff[diff < 0]).mul(1 - ptc_sell)) * (
# 1 - c * (abs(diff[diff < 0]).div((volume * price_df).values)) ** 0.6)).sum(axis=1)
# buy = ((diff[diff >= 0].mul(1 + ptc_buy)) * (
# 1 + c * (diff[diff >= 0].div((volume * price_df).values)) ** 0.6)).sum(axis=1)
sell = ((abs(diff[diff < 0]).mul(1 - ptc_sell)) * pi_sell).sum(axis=1)
buy = ((diff[diff > 0].mul(1 + ptc_buy)) * pi_buy).sum(axis=1)
fixed = diff[diff != 0].count(axis=1).mul(ftc)
after_balance_money = pre_balance_money + sell - buy - fixed
pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
# net_returns
self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
def __test_no_price_impact(self, data, freq_data, data_type, rf, interval, window, freq_strategy, ptc_buy,
ptc_sell, ftc, initial_wealth, extra_data):
# prepare data
normal_return_df, excess_return_df, risk_free_rate, price_df = self.__prepare_data(data, freq_data,
data_type, rf,
interval, window,
freq_strategy)
T = excess_return_df.shape[0] # length of dataset
N = excess_return_df.shape[1] # number of assets
if window < N:
warnings.warn('window length smaller than the number of assets, may not get feasible portfolios')
if window >= T - 2: # 3 here can change later
raise Exception(
'Too few samples to test on will result in poor performance : reduce window or decrease interval or '
'increase length of data')
# apply rolling windows with __rebalance
portfolios = self.__rebalance(excess_return_df, normal_return_df, price_df, window, extra_data)
try:
assert sum(portfolios[0]) <= 1 + 0.000001
except:
raise Exception(
'Please make sure your strategy builds a portfolios whose sum of weights does not exceed 1!')
portfolios = pd.DataFrame(portfolios).iloc[::interval]
# save the portfolios for calling
self.__last_test_portfolios = portfolios.set_axis(excess_return_df.columns.values, axis='columns').set_axis(
excess_return_df.iloc[window - 1::interval].index.values, axis='index')
if interval > 1:
if price_df.empty:
df = normal_return_df.join(risk_free_rate)
df = (1 + df.iloc[window - 1:]).apply(lambda x: np.cumprod(x)).iloc[::interval].pct_change(fill_method=None).dropna(
axis=0, how='all')
normal_return_df = df.iloc[:, :-1]
risk_free_rate = df.iloc[:, -1]
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[window - 1::interval].iloc[1:]
else:
price_df = price_df.iloc[window - 1::interval]
normal_return_df = price_df.pct_change(fill_method=None).dropna(axis=0, how='all')
risk_free_rate=np.cumprod(1+risk_free_rate[window-1:]).iloc[::interval].pct_change(fill_method=None).dropna(axis=0,how='all')
excess_return_df = normal_return_df.sub(risk_free_rate.values, axis=0)
price_df = price_df.iloc[1:]
else:
excess_return_df = excess_return_df.iloc[window:]
normal_return_df = normal_return_df.iloc[window:]
risk_free_rate = risk_free_rate.iloc[window:]
price_df = price_df.iloc[window:]
# pre_balance portfolios that serves as denominators
pre_balance_portfolios = (1 + normal_return_df).mul(portfolios.iloc[:-1].values)
# turnover
# normalise portfolio weight before rebalancing at the start of each period
# note that turnover ratio is not affected by price-impact model
pre_balance_portfolios = pre_balance_portfolios.div(pre_balance_portfolios.sum(axis=1).values, axis=0)
diff = (portfolios.iloc[1:].sub(pre_balance_portfolios.values)).dropna(axis=0, how='all')
self.__total_turnover = abs(diff).sum(axis=1).sum()
self.__average_turnover = self.__total_turnover / (T - window)
# pre_balance portfolios that serves as nominators
pre_balance_portfolios_2 = (1 + normal_return_df.iloc[1:]).mul(portfolios.iloc[1:-1].values)
# if ftc != 0:
# # factor in the initial_wealth for all 'diff','portfolios'
# portfolios *= initial_wealth
# pre_balance_portfolios *= initial_wealth
# pre_balance_portfolios_2 *= initial_wealth
# diff *= initial_wealth
#
# # transaction cost impacts
# sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
# buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
# fixed = diff[diff != 0].count(axis=1).mul(ftc)
# # evolution of money account
# pre_balance_money = np.zeros(risk_free_rate.shape[0])
# after_balance_money = pre_balance_money + sell - buy - fixed
# pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
#
# self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
# pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
#
# self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
# else:
# # transaction cost impacts
# sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
# buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
# # evolution of money account
# pre_balance_money = np.zeros(risk_free_rate.shape[0])
# after_balance_money = pre_balance_money + sell - buy
# pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
#
# self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
# pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
#
# self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
portfolios *= initial_wealth
pre_balance_portfolios *= initial_wealth
pre_balance_portfolios_2 *= initial_wealth
diff *= initial_wealth
# transaction cost impacts
sell = (abs(diff[diff < 0]).mul(1 - ptc_sell)).sum(axis=1)
buy = (diff[diff >= 0].mul(1 + ptc_buy)).sum(axis=1)
fixed = diff[diff != 0].count(axis=1).mul(ftc)
# evolution of money account
pre_balance_money = np.zeros(risk_free_rate.shape[0])
after_balance_money = pre_balance_money + sell - buy - fixed
pre_balance_money_2 = after_balance_money[:-1].mul((1 + risk_free_rate.iloc[1:]).values)
self.__net_returns = (pre_balance_portfolios_2.sum(axis=1).add(pre_balance_money_2.values)).div(
pre_balance_portfolios.sum(axis=1).add(pre_balance_money).iloc[:-1].values) - 1
self.__net_excess_returns = self.__net_returns.sub(risk_free_rate.iloc[1:].values)
self.__sharpe = np.mean(self.__net_excess_returns) / np.std(self.__net_excess_returns, ddof=1)
def backtest(self, data, freq_data, volume=pd.DataFrame(), data_type='price', rf=pd.Series(dtype='float'),
interval=1, window=60,
freq_strategy='D',
price_impact=False, ptc_buy=0, ptc_sell=0, ftc=0, c=1, initial_wealth=1E6,
extra_data=pd.DataFrame(), price_impact_model='default',power=0.6):
"""
Start the backtesting process with the built model. The function itself will not return anything. To get the results,
please call respective functions.
:param data: historical data that the strategy to be tested on. Index must be datetime format compatible
:type data: pd.DataFrame
:param freq_data: The frequency of the data provided, choose between {'D','W','M'}, where 'D' for day,'W' for week and 'M' for month. 'data' must be taken in the smallest unit of respective frequency, e.g. the frequency 'M' means the data is taken at each month
:type freq_data: str
:param volume: trading volume of each asset during each period (array of size T*N), or average trading volume for each asset over all periods (N-d array). If passing in as pd.DataFrame, then its index must match that of the data.
:type volume: pd.DataFrame or list or np.ndarray or pd.Series
:param data_type: choose from {'price','return','ex_return'} where 'price' stands for price data of assets at each timestamp, 'return' stands for normal percentage return of each asset in each period, 'ex_return' stands for percentage return net of risk-free rate
:type data_type: str
:param rf: data for risk-free rate in each period. Note: if 'rf' is passed in as a dataframe or series, the index of 'rf' must match that of 'data'
:type rf: pd.Series or pd.DataFrame or int or float
:param interval: number of periods that users want their portfolios to be rebalanced, the unit is based on 'freq_strategy'. e.g. If 'freq_data' is 'D', while 'freq_strategy' is 'M', and 'interval' is 2, then the portfolio will be rebalanced every 2 months using the user-defined portfolio-construction strategy
:type interval: int
:param window: length of rolling windows of 'data' wanted to feed into 'strategy' function. e.g. 'window'=60 means each time during rebalancing, past 60 periods of 'data' will be passed into user-defined strategy function
:type window: int
:param freq_strategy: The frequency on which the user want to use 'strategy' to rebalance the portfolio, choose between {'D','W','M'}. If "freq_strategy" is different from "freq_data", the library will resample data on "freq_strategy". Note: 'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'
:type freq_strategy: str
:param price_impact: indicate whether to use price-impact model or not
:type price_impact: bool
:param ptc_buy: proportional transaction cost of buying each asset, measured in basis point. Can be a Series or array that provide one cost for each asset, or a single variable that stands for universal transaction cost. Note: Cannot be a list, and must not contain provide labels
:type ptc_buy: pd.Series or np.ndarray or int or float
:param ptc_sell: proportional transaction cost of selling each asset, measured in basis point. Can be a Series or array that provide one cost for each asset, or a single variable that stands for universal transaction cost. Note: Cannot be a list, and must not contain provide labels
:type ptc_sell: pd.Series or np.ndarray or int or float
:param ftc: dollar value of fixed transaction cost of each transaction, measured in one unit of any currency.
:type ftc: int or float
:param c: market depth indicators. Can be a Series or array that provide one market depth for each asset, or a single variable that stands for universal market depth. Note: Do NOT provide labels
:type c: pd.Series or int or np.ndarray or float
:param initial_wealth: dollar value of initial wealth of testing when 'price-impact' is true or 'ftc'!=0
:type initial_wealth: int or float
:param extra_data: extra_data to be passed into 'strategy' only when 'need_extra_data'==True. Note: 1. the datetime index of extra_data must match that of the provided data. 2. change-of-frequency functionality will be suspended if extra data is needed
:type extra_data: pd.DataFrame
:param price_impact_model: choose the price impact model you want to use from {'default'} (testing feature, to be built on)
:type price_impact_model: str
:return: None
"""
random.seed(1)
if price_impact_model not in {'default'}:
raise Exception('Unknown type of "price_impact_model"!')
if type(initial_wealth) != int and type(initial_wealth) != float:
raise Exception('Wrong type of "initial_wealth" given!')
if type(c) != float and type(c) != int and not isinstance(c, pd.Series) and not isinstance(c.np.ndarray):
raise Exception("Wrong type of 'c' given!")
if type(ftc) != int and type(ftc) != float:
raise Exception("Wrong type of 'ftc' given!")
if type(ptc_buy) != int and type(ptc_buy) != float and not isinstance(ptc_buy, pd.Series) and not isinstance(
ptc_buy,
np.ndarray):
raise Exception("Wrong type of 'ptc_buy' provided!")
else:
ptc_buy /= 10000
if type(ptc_sell) != int and type(ptc_sell) != float and not isinstance(ptc_sell, pd.Series) and not isinstance(
ptc_sell,
np.ndarray):
raise Exception("Wrong type of 'ptc_sell' provided!")
else:
ptc_sell /= 10000
if type(price_impact) != bool:
raise Exception("'price_impact' must be a boolean variable")
if freq_data not in {'D', 'W', 'M'}:
raise Exception("'freq_data' must be chosen from {'D','W','M'}")
if freq_strategy not in {'D', 'W', 'M'}:
raise Exception("'freq_strategy' must be chosen from {'D','W','M'}")
if freq_data == 'W' and freq_strategy == 'D':
raise Exception("'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'")
if freq_data == 'M' and freq_strategy in {'D', 'W'}:
raise Exception("'freq_data' should be smaller than 'freq_strategy' with the sequence 'D' < 'W' < 'M'")
if type(window) != int:
raise Exception("'window' must be an 'int' variable")
if type(interval) != int:
raise Exception("'interval' must be an 'int' variable")
if initial_wealth == 1E6:
if price_impact == True or ftc != 0:
warnings.warn('Using default initial_wealth value @1E6!')
if self.__need_extra_data == True:
if isinstance(extra_data, pd.DataFrame) or isinstance(extra_data, pd.Series):
if extra_data.empty:
raise Exception('Please provide extra_data as dataframe')
try:
extra_data.index = pd.to_datetime(extra_data.index)
except:
print(
'Invalid index provided in your "extra_data", please make sure that index is in compatible datetime format')
else:
raise Exception(
'"extra_data" need to be a Series or DataFrame with datetime index corresponding to test data provided')
# if user-defined strategy need extra_data to operate, the library will NOT provide change of frequency functionality
if freq_strategy != freq_data:
raise Exception(
'If "extra_data" needed for your strategy, please make sure "freq_strategy" matches "freq_data"!')
if not extra_data.index.equals(data.index):
raise IndexError('Index of extra_data and index of data do not match!')
if (data_type == 'return' or data_type == 'ex_return') and ('price' in self.__involved_data_type):
raise Exception('"price" data type is involved in your strategy, please provide data with type "price"')
if isinstance(rf, pd.Series) or isinstance(rf, pd.DataFrame):
# if rf.empty and (('ex_return' in self.__involved_data_type) or ('return' in self.__involved_data_type)):
if rf.empty:
raise Exception(
'Please provide risk-free rate! (Set it to 0 if you do not want to consider it. Note that in this case, net_returns and net_excess_returns will be the same)')
if not rf.index.equals(data.index):
raise IndexError('Index of "rf" and index of "data" do not match!')
elif type(rf) == int or type(rf) == float:
rf = pd.Series([rf] * data.shape[0], index=data.index)
else:
raise Exception('Wrong format of "rf" is given.')
# if ftc != 0:
# if data_type != 'price':
# raise Exception('data_type must be "price" when using fixed transaction cost (ftc!=0)')
# divide into price_impact model and no_price_impact model
self.__price_impact = price_impact
frequency_map = {'D': 'Day', 'W': 'Week', 'M': 'Month'}
if price_impact == False:
self.__last_test_frequency = f'{interval} {frequency_map[freq_strategy]}'
self.__test_no_price_impact(data, freq_data, data_type, rf, interval, window, freq_strategy,
ptc_buy, ptc_sell, ftc, initial_wealth, extra_data)
else:
if isinstance(volume, pd.DataFrame):
if not volume.index.equals(data.index):
raise Exception('Index of "volume" and "index" of data do not match!')
elif isinstance(volume, pd.Series) or isinstance(volume, np.ndarray):
try:
volume = pd.DataFrame(volume.reshape(1, -1), columns=data.columns)
except:
print('Check your volume data!')
volume = pd.concat([volume] * data.shape[0]).set_index(data.index)
elif isinstance(volume, list):
try:
volume = | pd.DataFrame([volume], columns=data.columns) | pandas.DataFrame |
import numpy as np
import pandas as pd
import random
import time
from sklearn.utils import shuffle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
from torch.utils.data import DataLoader
from torch.nn.functional import relu,leaky_relu
from torch.nn import Linear
from torch.nn import BatchNorm1d
import networkx as nx
from rdkit import Chem
from torch_geometric.nn import global_max_pool as gmp
from torch_geometric import data as DATA
from torch_geometric.data import Data, DataLoader
from math import sqrt
from rdkit.Chem import AllChem
from torch_geometric.nn import GATConv
from torch_geometric.nn import global_add_pool, global_mean_pool
import matplotlib.pyplot as plt
import pickle
#Convert SMILES to graph representation
def smile_to_graph(smile):
mol = Chem.MolFromSmiles(smile)
if(mol is None):
return None
else:
c_size = mol.GetNumAtoms()
features = []
for atom in mol.GetAtoms():
feature = atom_features(atom)
features.append( feature / sum(feature) )
edges = []
for bond in mol.GetBonds():
edges.append([bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()])
g = nx.Graph(edges).to_directed()
edge_index = []
for e1, e2 in g.edges:
edge_index.append([e1, e2])
return c_size, features, edge_index
# +
#Get compound features
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding(x, allowable_set):
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def atom_features(atom):
return np.array(one_of_k_encoding_unk(atom.GetSymbol(),['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na','Ca', 'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb','Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H','Li', 'Ge', 'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr','Cr', 'Pt', 'Hg', 'Pb', 'Unknown']) +
one_of_k_encoding(atom.GetDegree(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetTotalNumHs(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6,7,8,9,10]) +
[atom.GetIsAromatic()])
# -
#Model architecture
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# SMILES graph branch
#self.n_output = n_output
self.conv1 = GATConv(78, 78, heads=2, dropout=0.1)
self.conv2 = GATConv(78*2, 78*3, dropout=0.1)
self.conv3 = GATConv(78*3, 78 * 4, dropout=0.1)
self.fc_g1 = torch.nn.Linear(78*4, 256)
self.bn2 = BatchNorm1d(256)
self.fc_g2 = Linear(256, 64)
## Protein Sequences
n_filters = 128
self.embedding_xt = nn.Embedding(21 + 1, 128)
self.conv_xt_1 = nn.Conv1d(in_channels=2000, out_channels=n_filters, kernel_size=3)
self.conv_xt_2 = nn.Conv1d(in_channels= 128, out_channels= 128, kernel_size=5)
self.conv_xt_3 = nn.Conv1d(in_channels=128, out_channels=32, kernel_size=8)
self.fc1_xt1 = nn.Linear(32*11, 256)
self.bn3 = BatchNorm1d(256)
self.fc1_xt2 = nn.Linear(256,64)
self.fc12 = nn.Linear(2*64, 128)
self.fc22 = nn.Linear(128, 64)
self.out3 = nn.Linear(64, 1)
def forward(self, data):
# get graph input
self.relu = leaky_relu
flat = nn.Flatten()
x, edge_index, batch = data.x, data.edge_index, data.batch
# get protein input
target = data.target
x = self.conv1(x, edge_index)
x = F.relu(x)
x = self.conv2(x, edge_index)
x = F.relu(x)
x = self.conv3(x, edge_index)
x = F.relu(x)
x = gmp(x, batch) # global max pooling
# flatten
x = F.relu(self.fc_g1(x))
x = F.dropout(x, p=0.1)
x = F.relu(self.fc_g2(x))
# Proteins
embedded_xt = self.embedding_xt(target)
conv_xt = F.relu(F.max_pool1d(self.conv_xt_1(embedded_xt),2))
conv_xt = F.relu(F.max_pool1d(self.conv_xt_2(conv_xt),2))
conv_xt = F.relu(F.max_pool1d(self.conv_xt_3(conv_xt), 2))
#print("Shape of Conv layer: ", conv_xt.shape)
#xt = flat(conv_xt)
xt = conv_xt.view(-1, 32*11)
#print("Flatten XT shape: ", xt.shape)
xt = F.relu(self.fc1_xt1(xt))
xt = F.dropout(xt, p=0.1)
xt = F.relu(self.fc1_xt2(xt))
xt = F.dropout(xt, p=0.1)
xc = torch.cat((x, xt), 1)
# add some dense layers
xc = F.relu(self.fc12(xc))
#xc = F.relu(xc)
xc = F.dropout(xc, p=0.1)
xc = F.relu(self.fc22(xc))
xc = F.dropout(xc, p=0.1)
out = self.out3(xc)
return out
# +
#Calculate loss function
loss_fn = nn.MSELoss()
best_mse = 1000
calculated_mse = 1000
def mse(y,f):
mse = ((y - f)**2).mean(axis=0)
return mse
# -
# ################################## Test Mode #####################################
# +
#Option 0 then use test set else use the sars_cov_2 test set
option=1
if (option==0):
df = pd.read_csv("../data/Test_Compound_Viral_interactions_for_Supervised_Learning.csv")
else:
df = pd.read_csv("../data/sars_cov_2_Compound_Viral_interactions_for_Supervised_Learning.csv")
protein_seqs = df['Sequence'].values.tolist()
seq_voc_dic = "ACDEFGHIKLMNPQRSTVWXY"
seq_dict = {voc:idx for idx,voc in enumerate(seq_voc_dic)}
seq_dict_len = len(seq_dict)
max_seq_len = 2000
# +
#Process the protein sequence
def seq_dict_fun(prot):
x = np.zeros(max_seq_len)
x += 21
for i, ch in enumerate(prot[:max_seq_len]):
x[i] = seq_dict[ch]
return x
for i in range(len(protein_seqs)):
for j in range(len(protein_seqs[i])):
if(protein_seqs[i][j] in seq_voc_dic):
continue
else:
protein_seqs[i][j] = 'X'
PS = [seq_dict_fun(k) for k in protein_seqs]
pt = []
for i in range(len(PS)):
pt.append(PS[i])
protein_inputs = np.array(pt)
for i in range(len(protein_seqs)):
for j in range(len(protein_seqs[i])):
if(protein_seqs[i][j] in seq_voc_dic):
continue
else:
protein_seqs[i][j] = 'X'
# -
smiles = df['canonical_smiles'].values.tolist()
y = df['pchembl_value'].values.tolist()
uniprot = df['uniprot_accession']
inchi = df['standard_inchi_key']
# +
#Get the features from graph to be used in the GAT model
smile_graph = {}
none_smiles = []
got_g = []
for smile in smiles:
g = smile_to_graph(smile)
if(g is None):
print(smile)
none_smiles.append(smile)
else:
got_g.append(smile)
smile_graph[smile] = g
# -
#Get the features from graph model
data_features = []
data_edges = []
data_c_size = []
labels = []
data_list = []
for i in range(len(smiles)):
if(smiles[i] == 'Nc1ccc([S+]2(=O)Nc3nccc[n+]3['):
print(i)
else:
c_size, features, edge_index = smile_graph[smiles[i]]
data_features.append(features)
data_edges.append(edge_index)
data_c_size.append(c_size)
labels = y[i]
target = protein_inputs[i]
GCNData = DATA.Data(x=torch.Tensor(features),
edge_index=torch.LongTensor(edge_index).transpose(1, 0),
y=torch.FloatTensor([labels]))
GCNData.target = torch.LongTensor([target])
GCNData.__setitem__('c_size', torch.LongTensor([c_size]))
data_list.append(GCNData)
# +
#Load the test set and model
test_X = data_list
test_loader = DataLoader(test_X, batch_size=1, shuffle=False, drop_last=False)
device = torch.device('cpu')
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
model.load_state_dict(torch.load('../models/gat_cnn_models/GAT_CNN_2000_3_pooling_checkpoint.pt',map_location=device))#['state_dict'])
# +
#Make the predictions on the test set
model.eval()
total_preds = torch.Tensor()
total_labels = torch.Tensor()
print("Predicting...")
total_pred = []
total_labels = []
with torch.no_grad():
for data in test_loader:
data = data.to(device)
output = model(data)
total_labels.append(data.y.cpu().data.numpy().tolist()[0])
total_pred.append(output.cpu().data.numpy()[0].tolist()[0])
t = np.array(total_labels)
p = np.array(total_pred)
pred1 = mse(t,p)
print("Saving results...")
scores = []
for i in range(len(p)):
tk = []
tk.append(uniprot[i])
tk.append(inchi[i])
tk.append(p[i])
tk.append(t[i])
scores.append(tk)
f1 = | pd.DataFrame(scores) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
qualifier.py
@author <NAME>
<EMAIL>
* qualifier * -> mpi_prep -> mpi_run -> mpi_read -> jackknifer
Program Use Instructions:
Instructions are found in qualifier.ini, as well as space for user input.
Program Description:
Prepares the random and data catalogs for use by mpi_prep.
-sorts data catalog by pixel, redshift
-gets out pixel and redshift distribution of data
-normalizes random catalog to large scale distribution of data
-sorts random catalog by pixel
Directory info:
parent
│
├── corrset
│ ├── qualifier.py #This python program
│ └── ...
└── cats
├── raw
├── matrices
└── ready #Where outputs are saved along with meta hdf
├──meta.hdf
├──C0_D0_zbin1_pix1
├──C0_D0_zbin2_pix1
└── ...
meta.hdf looks like this:
hdfs: 'abins' : (float 'edges'),
'zbins' : (float 'edges'),
'terms' : (str 'name', str 'path', int 'num', str 'type'),
'other' : (int 'nside')
'pops' : (ints... terms...)
terms format:
index name path num type
1. 'D1D2' '/path...' 0 'cross'
2. 'D1R2' '/path...' 0 'cross'
3. 'D0D0' '/path...' 1 'auto'
... ... ... ... ...
corrs format [indexing by correlation number]
index type D0 D1 D2 R0 R1 R2
1. 'cross' '' '/path...' '/path...' ... ... ...
2. 'auto' '/path...' '' '' ... ... ...
... ... ... ... ... ... ... ...
"""
import file_manager
import mp_manager
import numpy as np
import os
import time
import hdf_sorter
import pandas as pd
def current_time(start_time):
return (int(time.time()) - start_time)
def str2bins(string):
"""
Parses string as a list of floats, comma separated
as in: "1.2,3,6,7" -> [1.2, 3.0, 6.0, 7.0]
"""
return [float(s) for s in string.split(",")]
def corr_cfg_read(n, cfg):
"""
Given a corr number and the cfg dictionary from
file_manager.read_config_file(), read the type of corr, the terms, and the
file paths of the catalogs used for this corr
@returns
corr type ('auto' or 'cross')
corr terms (e.g. 'C1D1', 'C1R1', 'C1D2', 'C1R2' for n=1 cross)
term files (e.g. /D1/path, /R1/path, /D2/path, /R2/path)
"""
#Get current directory information
directory = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(directory)
#Figure out what the correlation type is and prepare to read it
c = str("C" + str(n))
corr_type = cfg[c + "_type"].strip(' ')
terms, dat = [], []
#Read the file paths to the terms specified by the correlation type
if corr_type == "cross": # ( D1D2 - D1R2 - D2R1 + R1R2 ) / R1R2
terms = [c+'D1', c+'D2', c+'R1', c+'R2']
dat = [cfg[terms[0]], cfg[terms[1]],
cfg[terms[2]], cfg[terms[3]]]
elif corr_type == "auto": # ( DD - DR + RR ) / RR
terms = [c+'D0', c+'R0']
dat = [cfg[terms[0]], cfg[terms[1]]]
else:
raise ValueError("Provided corr_type in qualifier.ini for n = " +
str(n) + " of " + corr_type + " is invalid.")
#For dat, if 'PARENT/' is in the file name, replace with the parent directory
for i in range(len(dat)):
if dat[i][0:7] == 'PARENT/':
dat[i] = dat[i].replace('PARENT', parent)
#Get the miscellaneous information
misc = [cfg[c+"_str"], cfg[c+"_use_zbins"], cfg[c+"_normalize"]]
#with whether or not to use zbins or normalize, specified by 'y' and 'n'
for i in [1, 2]:
if misc[i] == 'y':
misc[i] = True
else:
misc[i] = False
return corr_type, terms, dat, misc
def meta_hdf():
"""
Load all of the meta information from qualifier.ini and save to meta.hdf
meta.hdf looks like this:
hdfs: 'abins' : (float 'edges'),
'zbins' : (float 'edges'),
'terms' : (str 'name', str 'path', int 'num', str 'type'),
'other' : (int 'nside')
'pops' : (ints... terms...) #(populated after)
terms format: [indexing by term]
index name path num type
0. 'C0D1' '/path...' 0 'cross'
1. 'C0R1' '/path...' 0 'cross'
2. 'C0D2' '/path...' 0 'cross'
3. 'C0R2' '/path...' 0 'cross'
4. 'C1D0' '/path...' 1 'auto'
... ... ... ... ...
corrs format [indexing by correlation number]
index type D0 D1 D2 R0 R1 R2
0. 'cross' '' '/path...' '/path...' ... ... ...
1. 'auto' '/path...' '' '' ... ... ...
... ... ... ... ... ... ... ...
[continued]
index str use_zbins normalize
0. 'name' True True
1. 'name' False True
... ... ...
"""
#Step 1: Load up qualifier.ini with the file manager
directory = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(directory)
cfg = file_manager.read_config_file(directory + "/qualifier.ini")
#Step 2: Prepare the meta.hdf HDFStore
meta_st = pd.HDFStore(parent + "/cats/ready/meta.hdf", mode='w')
#Step 3. Get and append abins and zbins
meta_st.put("abins", pd.DataFrame(str2bins(cfg['abins']),
columns=['abins']), format='table')
meta_st.put("zbins", pd.DataFrame(str2bins(cfg['zbins']),
columns=['zbins']), format='table')
#Step 4. Get the other meta information (n_corrs, nside) and append to meta
other_df = | pd.DataFrame() | pandas.DataFrame |
import operator
import re
import numpy as np
import pandas as pd
import utils
def get_sites_from_kd_dict(transcript_id, sequence, kd_dict, overlap_dist):
if len(sequence) < 9:
return pd.DataFrame(None)
mir_info = {
'prev_loc': -100,
'prev_seq': '',
'prev_kd': 100,
'keep_kds': [],
'keep_locs': [],
'keep_seqs': []
}
pad_seq = 'XXX' + sequence + 'XXX'
seq = 'A' + pad_seq[:11]
# iterate through 12mers in the sequence
for loc, nt in enumerate(pad_seq[11:]):
seq = seq[1:] + nt
if seq in kd_dict:
new_kd = kd_dict[seq]
# if new site is too close to previous site, take site with higher affinity
if (loc - mir_info['prev_loc']) <= overlap_dist:
if new_kd < mir_info['prev_kd']:
mir_info['keep_kds'][-1] = new_kd
mir_info['keep_locs'][-1] = loc
mir_info['keep_seqs'][-1] = seq
mir_info['prev_loc'] = loc
mir_info['prev_kd'] = new_kd
# print('replace')
else:
# print('skipped')
continue
else:
# print('added')
mir_info['keep_kds'].append(new_kd)
mir_info['keep_locs'].append(loc)
mir_info['keep_seqs'].append(seq)
mir_info['prev_loc'] = loc
mir_info['prev_kd'] = new_kd
all_sites = pd.DataFrame({
'transcript': transcript_id,
'12mer': mir_info['keep_seqs'],
'log_kd': mir_info['keep_kds'],
'loc': mir_info['keep_locs']
})
return all_sites
def get_sites_from_kd_dict_improved(transcript_id, sequence, kd_dict, overlap_dist):
if len(sequence) < 9:
return pd.DataFrame(None)
pad_seq = 'XXX' + sequence + 'XXX'
seq = 'A' + pad_seq[:11]
all_sites = []
# iterate through 12mers in the sequence
for loc, nt in enumerate(pad_seq[11:]):
seq = seq[1:] + nt
if seq in kd_dict:
new_kd = kd_dict[seq]
all_sites.append([seq, new_kd, loc])
if len(all_sites) == 0:
return pd.DataFrame(None)
all_sites = pd.DataFrame(all_sites, columns=['12mer','log_kd','loc']).sort_values('log_kd')
all_locs = all_sites['loc'].values
keep_locs = [all_locs[0]]
for loc in all_locs[1:]:
if np.min([np.abs(x - loc) for x in keep_locs]) > overlap_dist:
keep_locs.append(loc)
all_sites = all_sites[all_sites['loc'].isin(keep_locs)]
all_sites['transcript'] = transcript_id
return all_sites.sort_values('loc')
def _priority_order(locs, overlap_dist):
"""Helper function for get_sites_from_utr"""
temp = | pd.DataFrame({'loc': locs}) | pandas.DataFrame |
from dotenv import load_dotenv
load_dotenv()
import os, re, json, unicodedata
import tweepy
from tweepy import Stream,OAuthHandler
from datetime import datetime, timedelta
import nltk
# nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('spanish')
from nltk.tokenize import TweetTokenizer
tweet_tokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
import gensim
import gensim.corpora as corpora
from gensim.utils import lemmatize, simple_preprocess
import pandas as pd
import numpy as np
from more_itertools import sliced
from sklearn.manifold import TSNE
####------------- PARAMETERS -------------####
DEBUG = True
CONSUMER_KEY = os.getenv("TWITTER_CONSUMER_KEY")
CONSUMER_SECRET = os.getenv("TWITTER_CONSUMER_SECRET")
ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN_KEY")
ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET")
CURRENT_TIME = datetime.utcnow() # current date and time in UTC
SEARCH_TIME = CURRENT_TIME - timedelta(hours=24) # look for tweets one hour back in time
# SEARCH_TIME = CURRENT_TIME - timedelta(minutes=30) # look for tweets one hour back in time
### Search API parameters
SEARCH_QUERY = ''
GEOCODE = '6.244203,-75.5812119,40km'
LANG = 'es'
RESULT_TYPE = 'recent' # mixed, recent or popular
RESULTS_PER_CALL = 100 # Max is 100 for Standard API
FILENAME = 'tweets.json' # Where the Tweets should be saved
PRINT_AFTER_X = 100 # Script prints an update to the CLI every time it collected another X Tweets
# LDA topics
NUM_TOPICS = 10
####------------- CODE -------------####
def login():
# Log in
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth,
# support for multiple authentication handlers
# retry 3 times with 5 seconds delay when getting these error codes
# For more details see
# https://dev.twitter.com/docs/error-codes-responses
retry_count=3,retry_delay=5,retry_errors=set([401, 404, 500, 503]),
# monitor remaining calls and block until replenished
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True
)
json_str = json.dumps(api.me()._json, ensure_ascii=False)
parsed_json = json.loads(json_str)
if DEBUG: print( 'Logged to Twitter API with {} account'.format(parsed_json['name']) )
return api
def tweet_collection(api):
if DEBUG: print('Starting Tweet Collection from {} until {}'.format(CURRENT_TIME,SEARCH_TIME))
last_tweet_id = False
args = {}
tweets_counter = 0
tweets_data = []
keep_whiling = True
while True:
if last_tweet_id: args = {'max_id':last_tweet_id}
tweets = tweepy.Cursor(
api.search,
q = SEARCH_QUERY,
count = RESULTS_PER_CALL,
result_type = RESULT_TYPE,
include_entities = True,
geocode = GEOCODE,
lang = LANG,
tweet_mode = 'extended',
**args
).items()
for tweet in tweets:
text, tweet_id, date = parse_tweet(tweet._json)
tweets_data.append({
'text': text,
'id': tweet_id,
'date': date
})
tweets_counter += 1
if tweets_counter % PRINT_AFTER_X == 0:
# print('{}:\t {}\t {}'.format(tweets_counter, date.isoformat(), text))
if DEBUG: print(date - SEARCH_TIME, end='\r')
if not(date > SEARCH_TIME):
return tweets_data
def parse_tweet(tweet_json):
tweet_json_str = json.dumps(tweet_json, ensure_ascii=False).encode('utf8')
tweet = json.loads(tweet_json_str.decode('utf8'))
tweet_id = tweet['id']
tweet_date = datetime.strptime(tweet['created_at'],"%a %b %d %H:%M:%S +0000 %Y")
tweet_text_list = []
# If tweet is truncated and has extended_tweet entity
if tweet['truncated'] and 'extended_tweet' in tweet:
# parse tweet entities from extended_tweet
tweet_text_list.append(tweet['extended_tweet']['full_text'])
else:
# keep tweet entities from root level
tweet_text_list.append(tweet['full_text'])
if 'retweeted_status' in tweet:
tweet_text_list.append(tweet['retweeted_status']['full_text'])
if 'retweeted_status' in tweet and 'extended_tweet' in tweet['retweeted_status']:
tweet_text_list.append(tweet['retweeted_status']['extended_tweet']['full_text'])
if 'retweeted_status' in tweet:
tweet_text_list.append(tweet['retweeted_status']['full_text'])
if 'quoted_status' in tweet:
tweet_text_list.append(tweet['quoted_status']['full_text'])
if 'quoted_status' in tweet and 'extended_tweet' in tweet['quoted_status']:
tweet_text_list.append(tweet['quoted_status']['extended_tweet']['full_text'])
tweet_text = max(tweet_text_list, key=len)
return tweet_text, tweet_id, tweet_date
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
def remove_accents(input_str):
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
def tokenize(text):
punctuation='[^-¡!()%/—.,¿?«»;"’:“”_...|]+'
text = re.sub('RT', '', text) # remove retweet tag
text = text.lower() # lowercase text
text = re.sub(r"(?:\@|http?\://|https?\://|www|\#[\w\_])\S+", "", str(text)) # remove urls, mentions and hashtags
text = re.sub(r'\s+', ' ', text) # remove newline chars
text = re.sub(r"\'", "", text) # remove single quotes
text = re.sub(r'\"', "", text) # remove double quotes
text = re.sub(r'\b(a*ha+h[ha]*|a*ja+j[ja]*)\b', "", text) # remove jajaja
text = remove_emoji(text)
text = remove_accents(text)
tokens = tweet_tokenizer.tokenize(text) # tokenize
tokens = [token for token in tokens if (token not in punctuation)] # remove punctuation
tokens = [token for token in tokens if (token not in stop_words)] # remove stopwords
return tokens
def explore_topic(lda_model, topic_number, topn, output=True):
"""
accept a ldamodel, atopic number and topn vocabs of interest
prints a formatted list of the topn terms
"""
terms = []
for term, frequency in lda_model.show_topic(topic_number, topn=topn):
terms += [term]
if output and DEBUG:
print(u'{:20} {:.3f}'.format(term, round(frequency, 3)))
return terms
if __name__ == "__main__":
api = login()
tweets = tweet_collection(api)
tweets_df = pd.DataFrame( tweets , columns=['text', 'id', 'date'])
# tokenize
tweets_df['tokens'] = tweets_df['text'].apply(tokenize)
documents = tweets_df['tokens'].values
id2word = corpora.Dictionary(documents) # Create Dictionary
corpus = [id2word.doc2bow(text) for text in documents] # Create Corpus: Term Document Frequency
# build LDA model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=NUM_TOPICS,
random_state=254,
update_every=1,
chunksize=1000,
passes=2,
alpha=0.01,
eta=0.1,
iterations=100,
per_word_topics=True)
# if DEBUG: print(lda_model.print_topics())
# topic_summaries = []
# if DEBUG: print(u'{:20} {}'.format(u'term', u'frequency') + u'\n')
# for i in range(NUM_TOPICS):
# print('Topic '+str(i)+' |---------------------\n')
# tmp = explore_topic(lda_model,topic_number=i, topn=10, output=True )
# topic_summaries += [tmp[:5]]
# Get topic weights
topic_weights = []
for i, row_list in enumerate( lda_model[ corpus ] ):
topic_weights.append([w for i, w in row_list[0]])
arr = | pd.DataFrame(topic_weights) | pandas.DataFrame |
from .indicator import Indicator
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Moving Average Crossover
-
Buy when short-term moving average > long-term moving average,
sell when short-term moving average < long-term moving average.
"""
class MovingAverageCrossover(Indicator):
def __init__(self, df, short_window=40, long_window=100):
self.df = df
self.short_window = short_window
self.long_window = long_window
def gen_signals(self):
# Initialize the `signals` DataFrame with the `signal` column
signals = | pd.DataFrame(index=self.df.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Get historical information of a series from /mints. Eg: https://cryptoslam.io/cryptopunks/mints
@author: HP
"""
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas
import time
import requests
from bs4 import BeautifulSoup
from selenium.common.exceptions import ElementClickInterceptedException,StaleElementReferenceException,ElementNotInteractableException,NoSuchElementException
from datetime import datetime, timedelta
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
output_directory = dir_path+"\\cryptoslam_mints" # Data will be outputed here
if not os.path.exists(output_directory): # create the folder if not exists already
os.mkdir(output_directory)
def get_transaction_time_from_etherscan(etherscan_links):
transaction_time_list = list()
for link in etherscan_links:
start = time.time()
browser = webdriver.Firefox()
browser.get(link)
time.sleep(1)
transaction_time = browser.find_element_by_xpath("/html/body/div[1]/main/div[3]/div[1]/div[2]/div[1]/div/div[4]/div/div[2]/i")
transaction_time_list.append(transaction_time)
end = time.time()
print("one request took "+ str(end - start) + " seconds")
return transaction_time_list
def find_transaction_time(table_time_column): # NOT ACCURATE
now = datetime.today()
dates = ["minute","hour","day","month","year"]
timestamps = []
for cell in table_time_column:
splitted = cell.split(" ")
integer_time_value = int(splitted[0])
date = splitted[1]
if "second" in cell:
d = datetime.today() - timedelta(seconds=integer_time_value)
if "minute" in cell:
d =datetime.today() - timedelta(minutes=integer_time_value)
elif "hour" in cell:
d =datetime.today() - timedelta(hours=integer_time_value)
elif "day" in cell:
d =datetime.today() - timedelta(days=integer_time_value)
elif "month" in cell:
d =datetime.today() - timedelta(days=30*integer_time_value)
elif "year" in cell:
d =datetime.today() - timedelta(days=360*integer_time_value)
timestamps.append(d)
return timestamps
def obtain_series_links(series_names):
"""
obtain links of mint pages from series names.
returns a list
"""
links = []
for product in series_names[0]:
product = product.lower()
splitted = product.split()
product = "-".join(splitted)
series_link = "https://cryptoslam.io/" + product + "/mints"
links.append((product,series_link))
return links
series_names = pandas.read_pickle("series_names.pkl") # Get series names (cryptopunks, art blocks etc.)
series_main_pages = obtain_series_links(series_names) # contains tuples [("art-blocks","https://cryptoslam.io/art-blocks/mints"),(,)...]
test = [('cryptopunks', 'https://cryptoslam.io/cryptopunks/mints')]
for page in test:
series_names = page[0]
urlpage = page[1]
# If we have it, skip
if os.path.exists(str(output_directory+"\\cryptoslam_"+series_names+"_mints.xlsx")):
continue
options = webdriver.FirefoxOptions()
# options.headless = True
browser = webdriver.Firefox(options=options)
browser.get(urlpage)
# browser.find_element_by_xpath("//select[@name='element_name']/option[text()='option_text']").click()
time.sleep(6)
table_list = []
start = time.time()
# Get 1000 rows (only do it once per series)
try:
ddelement= Select(browser.find_element_by_xpath('/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[1]/div[1]/div/label/select'))
ddelement.select_by_visible_text("1000")
except ElementNotInteractableException as e:
print(e)
time.sleep(2)
ddelement= Select(browser.find_element_by_xpath('/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[1]/div[1]/div/label/select'))
ddelement.select_by_visible_text("1000")
except NoSuchElementException as e:
print(e)
time.sleep(2)
ddelement= Select(browser.find_element_by_xpath('/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[1]/div[1]/div/label/select'))
ddelement.select_by_visible_text("1000")
time.sleep(10) # wait for the page to load 1000 rows
while True : # Keep until all the pages are scraped
soup = BeautifulSoup(browser.page_source)
soup_table = soup.find_all("table")[-1]
soup_table = soup.find("table")
tables = pandas.read_html(str(soup_table))
table = tables[0]
columns_len = len(table.columns)
results_original_owner = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td["+str(columns_len+1)+"]/a")
results_nft = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td[3]/a")
results_etherscan_link = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td[2]/a")
original_owner_data = list()
nft_data = list()
etherscan_links = list()
try:
for result in results_etherscan_link:
link = result.get_attribute("href")
etherscan_links.append(link)
for result in results_original_owner:
product_link = result.get_attribute("data-original-title")
original_owner_data.append(product_link)
for result in results_nft:
product_link = result.get_attribute("href")
nft_data.append(product_link)
except StaleElementReferenceException as e:
print(e)
time.sleep(10)
results_original_owner = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td["+str(columns_len+1)+"]/a")
results_nft = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td[3]/a")
results_etherscan_link = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td[2]/a")
original_owner_data = list()
nft_data = list()
etherscan_links = list()
for result in results_etherscan_link:
link = result.get_attribute("href")
etherscan_links.append(link)
for result in results_original_owner:
product_link = result.get_attribute("data-original-title")
original_owner_data.append(product_link)
for result in results_nft:
product_link = result.get_attribute("href")
nft_data.append(product_link)
table = | pandas.read_html(browser.page_source) | pandas.read_html |
import pandas as pd
import numpy as np
from functools import wraps
import copy
# Pass through pd.DataFrame methods for a (1,1,o,d) shaped triangle:
df_passthru = ['to_clipboard', 'to_csv', 'to_pickle', 'to_excel', 'to_json',
'to_html', 'to_dict', 'unstack', 'pivot', 'drop_duplicates',
'describe', 'melt']
# Aggregate method overridden to the 4D Triangle Shape
agg_funcs = ['sum', 'mean', 'median', 'max', 'min', 'prod', 'var', 'std']
agg_funcs = {item: 'nan'+item for item in agg_funcs}
# def check_triangle_postcondition(f):
# ''' Post-condition check to ensure the integrity of the triangle object
# remains intact. (used for debugging)
# '''
# @wraps(f)
# def wrapper(*args, **kwargs):
# X = f(*args, **kwargs)
# if not hasattr(X, 'triangle'):
# raise ValueError('X is missing triangle attribute')
# if X.triangle.ndim != 4:
# raise ValueError('X.triangle must be a 4-dimensional array')
# if len(X.kdims) != X.triangle.shape[0]:
# raise ValueError('X.index and X.triangle are misaligned')
# if len(X.vdims) != X.triangle.shape[1]:
# raise ValueError('X.columns and X.triangle are misaligned')
# return X
# return wrapper
class TriangleBase:
def __init__(self, data=None, origin=None, development=None,
columns=None, index=None):
# Sanitize Inputs
columns = [columns] if type(columns) is str else columns
origin = [origin] if type(origin) is str else origin
if development is not None and type(development) is str:
development = [development]
key_gr = origin if not development else origin+development
if not index:
index = ['Total']
data_agg = data.groupby(key_gr).sum().reset_index()
data_agg[index[0]] = 'Total'
else:
data_agg = data.groupby(key_gr+index) \
.sum().reset_index()
# Convert origin/development to dates
origin_date = TriangleBase.to_datetime(data_agg, origin)
self.origin_grain = TriangleBase.get_grain(origin_date)
# These only work with valuation periods and not lags
if development:
development_date = TriangleBase.to_datetime(data_agg, development,
period_end=True)
self.development_grain = TriangleBase.get_grain(development_date)
col = 'development'
else:
development_date = origin_date
self.development_grain = self.origin_grain
col = None
# Prep the data for 4D Triangle
data_agg = self.get_axes(data_agg, index, columns,
origin_date, development_date)
data_agg = pd.pivot_table(data_agg, index=index+['origin'],
columns=col, values=columns,
aggfunc='sum')
# Assign object properties
self.kdims = np.array(data_agg.index.droplevel(-1).unique())
self.odims = np.array(data_agg.index.levels[-1].unique())
if development:
self.ddims = np.array(data_agg.columns.levels[-1].unique())
self.ddims = self.ddims*({'Y': 12, 'Q': 3, 'M': 1}
[self.development_grain])
self.vdims = np.array(data_agg.columns.levels[0].unique())
else:
self.ddims = np.array([None])
self.vdims = np.array(data_agg.columns.unique())
self.ddims = self.ddims
self.valuation_date = development_date.max()
self.key_labels = index
self.iloc = _Ilocation(self)
self.loc = _Location(self)
# Create 4D Triangle
triangle = \
np.reshape(np.array(data_agg), (len(self.kdims), len(self.odims),
len(self.vdims), len(self.ddims)))
triangle = np.swapaxes(triangle, 1, 2)
# Set all 0s to NAN for nansafe ufunc arithmetic
triangle[triangle == 0] = np.nan
self.triangle = triangle
# Used to show NANs in lower part of triangle
self.nan_override = False
self.valuation = self._valuation_triangle()
# ---------------------------------------------------------------- #
# ----------------------- Class Properties ----------------------- #
# ---------------------------------------------------------------- #
def _len_check(self, x, y):
if len(x) != len(y):
raise ValueError(f'Length mismatch: Expected axis has ',
f'{len(x)} elements, new values have',
f' {len(y)} elements')
@property
def shape(self):
return self.triangle.shape
@property
def index(self):
return pd.DataFrame(list(self.kdims), columns=self.key_labels)
@property
def columns(self):
return self.idx_table().columns
@columns.setter
def columns(self, value):
self._len_check(self.columns, value)
self.vdims = [value] if type(value) is str else value
@property
def origin(self):
return pd.DatetimeIndex(self.odims, name='origin')
@origin.setter
def origin(self, value):
self._len_check(self.origin, value)
self.odims = [value] if type(value) is str else value
@property
def development(self):
return pd.Series(list(self.ddims), name='development').to_frame()
@development.setter
def development(self, value):
self._len_check(self.development, value)
self.ddims = [value] if type(value) is str else value
@property
def latest_diagonal(self):
return self.get_latest_diagonal()
@property
# @check_triangle_postcondition
def link_ratio(self):
obj = copy.deepcopy(self)
temp = obj.triangle.copy()
temp[temp == 0] = np.nan
val_array = obj.valuation.values.reshape(obj.shape[-2:],order='f')[:, 1:]
obj.triangle = temp[..., 1:]/temp[..., :-1]
obj.ddims = np.array([f'{obj.ddims[i]}-{obj.ddims[i+1]}'
for i in range(len(obj.ddims)-1)])
# Check whether we want to eliminate the last origin period
if np.max(np.sum(~np.isnan(self.triangle[..., -1, :]), 2)-1) == 0:
obj.triangle = obj.triangle[..., :-1, :]
obj.odims = obj.odims[:-1]
val_array = val_array[:-1, :]
obj.valuation = pd.DatetimeIndex(pd.DataFrame(val_array).unstack().values)
return obj
@property
def age_to_age(self):
return self.link_ratio
# ---------------------------------------------------------------- #
# ---------------------- End User Methods ------------------------ #
# ---------------------------------------------------------------- #
# @check_triangle_postcondition
def get_latest_diagonal(self, compress=True):
''' Method to return the latest diagonal of the triangle. Requires
self.nan_overide == False.
'''
obj = copy.deepcopy(self)
diagonal = obj[obj.valuation == obj.valuation_date].triangle
if compress:
diagonal = np.expand_dims(np.nansum(diagonal, 3), 3)
obj.ddims = ['Latest']
obj.valuation = pd.DatetimeIndex(
[pd.to_datetime(obj.valuation_date)]*len(obj.odims))
obj.triangle = diagonal
return obj
# @check_triangle_postcondition
def incr_to_cum(self, inplace=False):
"""Method to convert an incremental triangle into a cumulative triangle.
Parameters
----------
inplace: bool
Set to True will update the instance data attribute inplace
Returns
-------
Updated instance of triangle accumulated along the origin
"""
if inplace:
np.cumsum(np.nan_to_num(self.triangle), axis=3, out=self.triangle)
self.triangle = self.expand_dims(self.nan_triangle())*self.triangle
self.triangle[self.triangle == 0] = np.nan
return self
else:
new_obj = copy.deepcopy(self)
return new_obj.incr_to_cum(inplace=True)
# @check_triangle_postcondition
def cum_to_incr(self, inplace=False):
"""Method to convert an cumlative triangle into a incremental triangle.
Parameters
----------
inplace: bool
Set to True will update the instance data attribute inplace
Returns
-------
Updated instance of triangle accumulated along the origin
"""
if inplace:
temp = np.nan_to_num(self.triangle)[..., 1:] - \
np.nan_to_num(self.triangle)[..., :-1]
temp = np.concatenate((self.triangle[..., 0:1], temp), axis=3)
temp = temp*self.expand_dims(self.nan_triangle())
temp[temp == 0] = np.nan
self.triangle = temp
return self
else:
new_obj = copy.deepcopy(self)
return new_obj.cum_to_incr(inplace=True)
# @check_triangle_postcondition
def grain(self, grain='', incremental=False, inplace=False):
"""Changes the grain of a cumulative triangle.
Parameters
----------
grain : str
The grain to which you want your triangle converted, specified as
'O<x>D<y>' where <x> and <y> can take on values of ``['Y', 'Q', 'M']``
For example, 'OYDY' for Origin Year/Development Year, 'OQDM' for
Origin quarter, etc.
incremental : bool
Not implemented yet
inplace : bool
Whether to mutate the existing Triangle instance or return a new
one.
Returns
-------
Triangle
"""
if inplace:
origin_grain = grain[1:2]
development_grain = grain[-1]
new_tri, o = self._set_ograin(grain=grain, incremental=incremental)
# Set development Grain
dev_grain_dict = {'M': {'Y': 12, 'Q': 3, 'M': 1},
'Q': {'Y': 4, 'Q': 1},
'Y': {'Y': 1}}
if self.shape[3] != 1:
keeps = dev_grain_dict[self.development_grain][development_grain]
keeps = np.where(np.arange(new_tri.shape[3]) % keeps == 0)[0]
keeps = -(keeps + 1)[::-1]
new_tri = new_tri[..., keeps]
self.ddims = self.ddims[keeps]
self.odims = np.unique(o)
self.origin_grain = origin_grain
self.development_grain = development_grain
self.triangle = self._slide(new_tri, direction='l')
self.triangle[self.triangle == 0] = np.nan
self.valuation = self._valuation_triangle()
return self
else:
new_obj = copy.deepcopy(self)
new_obj.grain(grain=grain, incremental=incremental, inplace=True)
return new_obj
def trend(self, trend=0.0, axis=None):
""" Allows for the trending along origin or development
Parameters
----------
trend : float
The amount of the trend
axis : str ('origin' or 'development')
The axis along which to apply the trend factors. The latest period
of the axis is the trend-to period.
Returns
-------
Triangle updated with multiplicative trend applied.
"""
axis = {'origin': -2, 'development': -1}.get(axis, None)
if axis is None:
if self.shape[-2] == 1 and self.shape[-1] != 1:
axis = -1
elif self.shape[-2] != 1 and self.shape[-1] == 1:
axis = -2
else:
raise ValueError('Cannot infer axis, please supply')
trend = (1+trend)**np.arange(self.shape[axis])[::-1]
trend = np.expand_dims(self.expand_dims(trend), -1)
if axis == -1:
trend = np.swapaxes(trend, -2, -1)
obj = copy.deepcopy(self)
obj.triangle = obj.triangle*trend
return obj
def rename(self, axis, value):
if axis == 'index' or axis == 0:
self.index = value
if axis == 'columns' or axis == 1:
self.columns = value
if axis == 'origin' or axis == 2:
self.origin = value
if axis == 'development' or axis == 3:
self.development = value
return self
# ---------------------------------------------------------------- #
# ------------------------ Display Options ----------------------- #
# ---------------------------------------------------------------- #
def __repr__(self):
if (self.triangle.shape[0], self.triangle.shape[1]) == (1, 1):
data = self._repr_format()
return data.to_string()
else:
data = 'Valuation: ' + self.valuation_date.strftime('%Y-%m') + \
'\nGrain: ' + 'O' + self.origin_grain + \
'D' + self.development_grain + \
'\nShape: ' + str(self.shape) + \
'\nindex: ' + str(self.key_labels) + \
'\ncolumns: ' + str(list(self.vdims))
return data
def _repr_html_(self):
''' Jupyter/Ipython HTML representation '''
if (self.triangle.shape[0], self.triangle.shape[1]) == (1, 1):
data = self._repr_format()
if np.nanmean(abs(data)) < 10:
fmt_str = '{0:,.4f}'
elif np.nanmean(abs(data)) < 1000:
fmt_str = '{0:,.2f}'
else:
fmt_str = '{:,.0f}'
if len(self.ddims) > 1 and type(self.ddims[0]) is int:
data.columns = [['Development Lag'] * len(self.ddims),
self.ddims]
default = data.to_html(max_rows=pd.options.display.max_rows,
max_cols=pd.options.display.max_columns,
float_format=fmt_str.format) \
.replace('nan', '')
return default.replace(
f'<th></th>\n <th>{self.development.values[0][0]}</th>',
f'<th>Origin</th>\n <th>{self.development.values[0][0]}</th>')
else:
data = pd.Series([self.valuation_date.strftime('%Y-%m'),
'O' + self.origin_grain + 'D'
+ self.development_grain,
self.shape, self.key_labels, list(self.vdims)],
index=['Valuation:', 'Grain:', 'Shape',
'Index:', "Columns:"],
name='Triangle Summary').to_frame()
pd.options.display.precision = 0
return data.to_html(max_rows=pd.options.display.max_rows,
max_cols=pd.options.display.max_columns)
def _repr_format(self):
''' Flatten to 2D DataFrame '''
x = self.triangle[0, 0]
if type(self.odims[0]) == np.datetime64:
origin = pd.Series(self.odims).dt.to_period(self.origin_grain)
else:
origin = pd.Series(self.odims)
return pd.DataFrame(x, index=origin, columns=self.ddims)
# ---------------------------------------------------------------- #
# ----------------------- Pandas Passthrus ----------------------- #
# ---------------------------------------------------------------- #
def to_frame(self, *args, **kwargs):
""" Converts a triangle to a pandas.DataFrame. Requires an individual
index and column selection to appropriately grab the 2D DataFrame.
Returns
-------
pandas.DataFrame representation of the Triangle.
"""
axes = [num for num, item in enumerate(self.shape) if item > 1]
if self.shape[:2] == (1, 1):
return self._repr_format()
elif len(axes) == 2:
tri = np.squeeze(self.triangle)
axes_lookup = {0: self.kdims, 1: self.vdims,
2: self.odims, 3: self.ddims}
return pd.DataFrame(tri, index=axes_lookup[axes[0]],
columns=axes_lookup[axes[1]])
else:
raise ValueError('len(index) and len(columns) must be 1.')
def plot(self, *args, **kwargs):
""" Passthrough of pandas functionality """
return self.to_frame().plot(*args, **kwargs)
@property
def T(self):
""" Passthrough of pandas functionality """
return self.to_frame().T
# ---------------------------------------------------------------- #
# ---------------------- Arithmetic Overload --------------------- #
# ---------------------------------------------------------------- #
def _validate_arithmetic(self, obj, other):
other = copy.deepcopy(other)
ddims = None
odims = None
if type(other) not in [int, float, np.float64, np.int64]:
if len(self.vdims) != len(other.vdims):
raise ValueError('Triangles must have the same number of \
columns')
if len(self.kdims) != len(other.kdims):
raise ValueError('Triangles must have the same number of',
'index')
if len(self.vdims) == 1:
other.vdims = np.array([None])
# If broadcasting doesn't work, then try intersecting before
# failure
a, b = self.shape[-2:], other.shape[-2:]
if not (a[0] == 1 or b[0] == 1 or a[0] == b[0]) and \
not (a[1] == 1 or b[1] == 1 or a[1] == b[1]):
ddims = set(self.ddims).intersection(set(other.ddims))
odims = set(self.odims).intersection(set(other.odims))
# Need to set string vs int type-casting
obj = obj[obj.origin.isin(odims)][obj.development.isin(ddims)]
other = other[other.origin.isin(odims)][other.development.isin(ddims)]
obj.odims = np.sort(np.array(list(odims)))
obj.ddims = np.sort(np.array(list(ddims)))
other = other.triangle
return obj, other
# @check_triangle_postcondition
def __add__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(obj.triangle) + np.nan_to_num(other)
obj.triangle = obj.triangle * self.expand_dims(obj.nan_triangle())
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
# @check_triangle_postcondition
def __radd__(self, other):
return self if other == 0 else self.__add__(other)
# @check_triangle_postcondition
def __sub__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(obj.triangle) - \
np.nan_to_num(other)
obj.triangle = obj.triangle * self.expand_dims(obj.nan_triangle())
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
# @check_triangle_postcondition
def __rsub__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(other) - \
np.nan_to_num(obj.triangle)
obj.triangle = obj.triangle * self.expand_dims(obj.nan_triangle())
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
def __len__(self):
return self.shape[0]
# @check_triangle_postcondition
def __neg__(self):
obj = copy.deepcopy(self)
obj.triangle = -obj.triangle
return obj
# @check_triangle_postcondition
def __pos__(self):
return self
# @check_triangle_postcondition
def __mul__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(obj.triangle)*other
obj.triangle = obj.triangle * self.expand_dims(obj.nan_triangle())
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
# @check_triangle_postcondition
def __rmul__(self, other):
return self if other == 1 else self.__mul__(other)
# @check_triangle_postcondition
def __truediv__(self, other):
obj = copy.deepcopy(self)
obj, other = self._validate_arithmetic(obj, other)
obj.triangle = np.nan_to_num(obj.triangle)/other
obj.triangle[obj.triangle == 0] = np.nan
obj.vdims = [None] if len(obj.vdims) == 1 else obj.vdims
return obj
# @check_triangle_postcondition
def __rtruediv__(self, other):
obj = copy.deepcopy(self)
obj.triangle = other / self.triangle
obj.triangle[obj.triangle == 0] = np.nan
return obj
def __eq__(self, other):
if np.all(np.nan_to_num(self.triangle) ==
np.nan_to_num(other.triangle)):
return True
else:
return False
def quantile(self, q, *args, **kwargs):
if self.shape[:2] == (1, 1):
return self.to_frame().quantile(q, *args, **kwargs)
return _TriangleGroupBy(self, by=-1).quantile(q, axis=1)
def groupby(self, by, *args, **kwargs):
if self.shape[:2] == (1, 1):
return self.to_frame().groupby(*args, **kwargs)
return _TriangleGroupBy(self, by)
def idx_table_format(self, idx):
if type(idx) is pd.Series:
# One row or one column selection is it k or v?
if len(set(idx.index).intersection(set(self.vdims))) == len(idx):
# One column selection
idx = idx.to_frame().T
idx.index.names = self.key_labels
else:
# One row selection
idx = idx.to_frame()
elif type(idx) is tuple:
# Single cell selection
idx = self.idx_table().iloc[idx[0]:idx[0] + 1,
idx[1]:idx[1] + 1]
return idx
def idx_table(self):
idx = self.kdims
temp = pd.DataFrame(list(idx), columns=self.key_labels)
for num, item in enumerate(self.vdims):
temp[item] = list(zip(np.arange(len(temp)),
(np.ones(len(temp))*num).astype(int)))
temp.set_index(self.key_labels, inplace=True)
return temp
def __getitem__(self, key):
''' Function for pandas style column indexing'''
if type(key) is pd.DataFrame and 'development' in key.columns:
return self._slice_development(key['development'])
if type(key) is np.ndarray:
# Presumes that if I have a 1D array, I will want to slice origin.
if len(key) == self.shape[-2]*self.shape[-1] and self.shape[-1] > 1:
return self._slice_valuation(key)
return self._slice_origin(key)
if type(key) is pd.Series:
return self.iloc[list(self.index[key].index)]
if key in self.key_labels:
# Boolean-indexing of a particular key
return self.index[key]
idx = self.idx_table()[key]
idx = self.idx_table_format(idx)
return _LocBase(self).get_idx(idx)
def __setitem__(self, key, value):
''' Function for pandas style column indexing setting '''
idx = self.idx_table()
idx[key] = 1
self.vdims = np.array(idx.columns.unique())
self.triangle = np.append(self.triangle, value.triangle, axis=1)
# @check_triangle_postcondition
def append(self, obj, index):
return_obj = copy.deepcopy(self)
x = pd.DataFrame(list(return_obj.kdims), columns=return_obj.key_labels)
new_idx = pd.DataFrame([index], columns=return_obj.key_labels)
x = x.append(new_idx)
x.set_index(return_obj.key_labels, inplace=True)
return_obj.triangle = np.append(return_obj.triangle, obj.triangle,
axis=0)
return_obj.kdims = np.array(x.index.unique())
return return_obj
# @check_triangle_postcondition
def _slice_origin(self, key):
obj = copy.deepcopy(self)
obj.odims = obj.odims[key]
obj.triangle = obj.triangle[..., key, :]
return self._cleanup_slice(obj)
# @check_triangle_postcondition
def _slice_valuation(self, key):
obj = copy.deepcopy(self)
obj.valuation_date = obj.valuation[key].max()
key = key.reshape(self.shape[-2:], order='f')
nan_tri = np.ones(self.shape[-2:])
nan_tri = key*nan_tri
nan_tri[nan_tri == 0] = np.nan
o, d = nan_tri.shape
o_idx = np.arange(o)[list(np.sum(np.isnan(nan_tri), 1) != d)]
d_idx = np.arange(d)[list(np.sum(np.isnan(nan_tri), 0) != o)]
obj.odims = obj.odims[np.sum(np.isnan(nan_tri), 1) != d]
if len(obj.ddims) > 1:
obj.ddims = obj.ddims[np.sum(np.isnan(nan_tri), 0) != o]
obj.triangle = (obj.triangle*nan_tri)
obj.triangle = np.take(np.take(obj.triangle, o_idx, -2), d_idx, -1)
return self._cleanup_slice(obj)
# @check_triangle_postcondition
def _slice_development(self, key):
obj = copy.deepcopy(self)
obj.ddims = obj.ddims[key]
obj.triangle = obj.triangle[..., key]
return self._cleanup_slice(obj)
def _cleanup_slice(self, obj):
obj.valuation = obj._valuation_triangle()
if hasattr(obj, '_nan_triangle'):
# Force update on _nan_triangle at next access.
del obj._nan_triangle
obj._nan_triangle = obj.nan_triangle()
return obj
# ---------------------------------------------------------------- #
# ------------------- Data Ingestion Functions ------------------- #
# ---------------------------------------------------------------- #
def get_date_axes(self, origin_date, development_date):
''' Function to find any missing origin dates or development dates that
would otherwise mess up the origin/development dimensions.
'''
def complete_date_range(origin_date, development_date,
origin_grain, development_grain):
''' Determines origin/development combinations in full. Useful for
when the triangle has holes in it. '''
origin_unique = \
pd.period_range(start=origin_date.min(),
end=origin_date.max(),
freq=origin_grain).to_timestamp()
development_unique = \
pd.period_range(start=origin_date.min(),
end=development_date.max(),
freq=development_grain).to_timestamp()
development_unique = TriangleBase.period_end(development_unique)
# Let's get rid of any development periods before origin periods
cart_prod = TriangleBase.cartesian_product(origin_unique,
development_unique)
cart_prod = cart_prod[cart_prod[:, 0] <= cart_prod[:, 1], :]
return pd.DataFrame(cart_prod, columns=['origin', 'development'])
cart_prod_o = \
complete_date_range(pd.Series(origin_date.min()), development_date,
self.origin_grain, self.development_grain)
cart_prod_d = \
complete_date_range(origin_date, pd.Series(origin_date.max()),
self.origin_grain, self.development_grain)
cart_prod_t = pd.DataFrame({'origin': origin_date,
'development': development_date})
cart_prod = cart_prod_o.append(cart_prod_d) \
.append(cart_prod_t).drop_duplicates()
cart_prod = cart_prod[cart_prod['development'] >= cart_prod['origin']]
return cart_prod
def get_axes(self, data_agg, groupby, columns,
origin_date, development_date):
''' Preps axes for the 4D triangle
'''
date_axes = self.get_date_axes(origin_date, development_date)
kdims = data_agg[groupby].drop_duplicates()
kdims['key'] = 1
date_axes['key'] = 1
all_axes = pd.merge(date_axes, kdims, on='key').drop('key', axis=1)
data_agg = \
all_axes.merge(data_agg, how='left',
left_on=['origin', 'development'] + groupby,
right_on=[origin_date, development_date] + groupby) \
.fillna(0)[['origin', 'development'] + groupby + columns]
data_agg['development'] = \
TriangleBase.development_lag(data_agg['origin'],
data_agg['development'])
return data_agg
# ---------------------------------------------------------------- #
# ------------------- Class Utility Functions -------------------- #
# ---------------------------------------------------------------- #
def nan_triangle(self):
'''Given the current triangle shape and grain, it determines the
appropriate placement of NANs in the triangle for future valuations.
This becomes useful when managing array arithmetic.
'''
if self.triangle.shape[2] == 1 or \
self.triangle.shape[3] == 1 or \
self.nan_override:
# This is reserved for summary arrays, e.g. LDF, Diagonal, etc
# and does not need nan overrides
return np.ones(self.triangle.shape[2:])
if len(self.valuation) != len(self.odims)*len(self.ddims) or not \
hasattr(self, '_nan_triangle'):
self.valuation = self._valuation_triangle()
val_array = self.valuation
val_array = val_array.values.reshape(self.shape[-2:], order='f')
nan_triangle = np.array(
pd.DataFrame(val_array) > self.valuation_date)
nan_triangle = np.where(nan_triangle, np.nan, 1)
self._nan_triangle = nan_triangle
return self._nan_triangle
def _valuation_triangle(self, ddims=None):
''' Given origin and development, develop a triangle of valuation
dates.
'''
ddims = self.ddims if ddims is None else ddims
if ddims[0] is None:
ddims = pd.Series([self.valuation_date]*len(self.origin))
return pd.DatetimeIndex(ddims.values)
special_cases = dict(Ultimate='2262-03-01', Latest=self.valuation_date)
if ddims[0] in special_cases.keys():
return pd.DatetimeIndex([pd.to_datetime(special_cases[ddims[0]])] *
len(self.origin))
if type(ddims[0]) is np.str_:
ddims = [int(item[:item.find('-'):]) for item in ddims]
origin = pd.PeriodIndex(self.odims, freq=self.origin_grain) \
.to_timestamp(how='s')
origin = pd.Series(origin)
# Limit origin to valuation date
origin[origin > self.valuation_date] = self.valuation_date
next_development = origin+pd.DateOffset(days=-1, months=ddims[0])
val_array = np.expand_dims(np.array(next_development), -1)
for item in ddims[1:]:
if item == 9999:
next_development = pd.Series([pd.to_datetime('2262-03-01')] *
len(origin))
next_development = np.expand_dims(np.array(
next_development), -1)
else:
next_development = np.expand_dims(
np.array(origin+pd.DateOffset(days=-1, months=item)), -1)
val_array = np.concatenate((val_array, next_development), -1)
return pd.DatetimeIndex( | pd.DataFrame(val_array) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from itertools import islice
from gensim.models.doc2vec import TaggedDocument, Doc2Vec
from gensim.parsing.preprocessing import preprocess_string
from sklearn.base import BaseEstimator
from sklearn import utils as skl_utils
from tqdm import tqdm
from sklearn.neural_network import MLPRegressor
import multiprocessing
import numpy as np
tos_df = | pd.read_csv('./whole.csv', index_col=False, header=0) | pandas.read_csv |
from Gridworld import Gridworld
from MonteCarlo import MonteCarlo
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
env = Gridworld(shape=[5,5], initialState=25)
print("------------------------------epsilon=0.01-------------------------------------")
MC_1 = MonteCarlo(grid_world = env, epsilon=0.01, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------epsilon=0.1-------------------------------------")
MC_2 = MonteCarlo(grid_world = env, epsilon=0.1, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------epsilon=0.25-------------------------------------")
MC_3 = MonteCarlo(grid_world = env, epsilon=0.25, alpha=0.1, gamma=0.99, num_trials = 20)
print("------------------------------RESULTS-------------------------------------")
print("------------------------------EPSILON = 0.01 --------------------------------")
policy_1 = MC_1.getPolicy()
q1 = MC_1.getQValues()
frames = []
for x in range(3):
q = {}
for state in range(25):
q[state+1] = q1[x][(state+1, policy_1[x][0][state])]
df = pd.DataFrame(q, index = [x])
frames.append(df)
result_1 = pd.concat(frames)
print("Most common 3 policies: ", policy_1)
print("q values: \n", result_1)
print("------------------------------EPSILON = 0.1 --------------------------------")
policy_2 = MC_2.getPolicy()
q2 = MC_2.getQValues()
frames = []
for x in range(3):
q = {}
for state in range(25):
q[state+1] = q2[x][(state+1, policy_2[x][0][state])]
df = pd.DataFrame(q, index = [x])
frames.append(df)
result_2 = | pd.concat(frames) | pandas.concat |
# -*- coding: utf-8 -*-
import pandas as pd
from flask import Flask, jsonify, render_template
from yahoofinancials import YahooFinancials
import numpy as np
from datetime import date
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# identify how many days into the future do we want to predict
future = int(30)
# identify the stocks
tickers = ['AAPL', 'DIS', 'TXN', 'CAT', 'NVDA', 'V', 'CMCSA', 'LMT', 'GOOG', 'TSLA']
names_dict = {'AAPL':'Apple', 'DIS':'Disney', 'TXN':'Texas Instruments', 'CAT':'Caterpilla', 'NVDA':'Nivdia', 'V':'Visa', 'CMCSA':'Comcast', 'LMT':'Lockheed Martin', 'GOOG':'Google', 'TSLA':'Tesla'}
sent_dict = {'AAPL':'Apple',
'DIS':'Disney',
'TXN':"Texas Instruments Inc. (stock ticker: TI) is an American technology company that designs and manufactures semiconductors and various integrated circuits, which it sells to electronics designers and manufacturers globally. Headquartered in Dallas, Texas, United States, TI is one of the top ten semiconductor companies worldwide, based on sales volume. Texas Instruments' focus is on developing analog chips and embedded processors, which accounts for more than 80% of their revenue. TI also produces TI digital light processing (DLP) technology and education technology products including calculators, microcontrollers and multi-core processors. To date, TI has more than 43,000 patents worldwide.",
'CAT':'Caterpilla',
'NVDA':'Nivdia',
'V': 'Visa',
'CMCSA':"Comcast (CMSCA) is the world's largest broadcasting and cable corporation. Comcast is the largest cable television and internet provider services. The company began in 1963 in Tupelo, Mississippi and went public in 1972 with an initial price of $7 per share. Comcast's stock price has risen steadily since it was initially offered and peaked for $42 a share in February 2018.",
'LMT':'<NAME>',
'GOOG':'Google',
'TSLA':'Tesla'}
# identify the date interval
date1 = '2016-01-01'
date2 = str(date.today())
# adjclose is the same as close
# initialize empty list to append
ti = []
acc = []
pred = []
act = []
for ticker in tickers:
dat = pd.DataFrame()
yahoo_financials = YahooFinancials(ticker)
result = yahoo_financials.get_historical_price_data(date1, date2, 'daily')
df = | pd.DataFrame(data=result[ticker]['prices']) | pandas.DataFrame |
"""Основные метрики доходности на базе ML-модели"""
from functools import lru_cache
import numpy as np
import pandas as pd
from local import moex
from metrics.portfolio import CASH
from metrics.portfolio import PORTFOLIO
from metrics.portfolio import Portfolio
from metrics.returns_metrics import AbstractReturnsMetrics
from ml.returns.manager import ReturnsMLDataManager
from settings import T_SCORE
MONTH_TO_OPTIMIZE = 10
class MLReturnsMetrics(AbstractReturnsMetrics):
def __init__(self, portfolio: Portfolio):
super().__init__(portfolio)
manager = ReturnsMLDataManager(portfolio.positions[:-2], | pd.Timestamp(portfolio.date) | pandas.Timestamp |
from collections import namedtuple
import re
import time
import warnings
from geopy.distance import geodesic
from geopy.exc import GeocoderUnavailable
from geopy.geocoders import Nominatim
import pandas
import requests
Location = namedtuple("Location", ["latitude", "longitude"])
def clean_address(problem_address):
"""Format the address so it is usable by the geocoder.
Parameters
----------
problem_address: str
The address that needs to be cleaned.
Returns
-------
str: The cleaned address.
"""
# clean the Apartment info out
gg = re.search(
r"\s(?:apt|apt\.|apartment|unit|unt|bldg\.|bldg|building|#)([\s#]+)?\w+\s",
problem_address,
re.IGNORECASE,
)
if gg:
gg = gg.group()
problem_address = " ".join(
[piece.strip().strip(",") for piece in problem_address.split(gg)]
)
# clean the extra zip code out
city_info_results = re.search(r"\w+,\s+\w+(\s+\d+)?", problem_address)
if not city_info_results:
warnings.warn("Error cleaning: {}".format(problem_address))
return problem_address
city_info = city_info_results.group()
fixed_address = problem_address.split(city_info)[0].strip() + " " + city_info
return fixed_address
def get_census_address(input_address):
"""
Get the geocode location from census.gov
Parameters
----------
input_address: str
The address to use to find the latlon location.
Returns
-------
dict: Dictionary with latitude and longitude of address.
"""
response = requests.get(
"https://geocoding.geo.census.gov/geocoder/locations/"
"onelineaddress?benchmark=4&format=json",
params={"address": input_address},
)
try:
geo_data = response.json()["result"]["addressMatches"][0]["coordinates"]
except IndexError:
return None
return Location(latitude=geo_data["y"], longitude=geo_data["x"])
def get_location(address):
"""
Return the location in latitude and longitude of the address.
Parameters
----------
address: str
The address to find the location for.
Returns
-------
tuple(:obj:`Location`, str): The location of the address.
"""
location = get_census_address(address)
try:
if location is None:
location = Nominatim(user_agent="member_proximity_py").geocode(address)
except GeocoderUnavailable:
pass
# maximum of 1 request per second
# https://operations.osmfoundation.org/policies/nominatim/
time.sleep(1)
return location
def generate_address_latlon(ward_directory_export, recalc=False):
"""
Adds latitude and longitudes to the ward directory file.
Parameters
----------
ward_directory_export: str
Path to the ward directory export csv file.
recalc: bool
If True, it will recalculate rows without locations.
"""
ward_directory_df = pandas.read_csv(ward_directory_export)
# make sure location columns added
location_columns = ("cleaned_address", "latitude", "longitude")
for location_column in location_columns:
if location_column not in ward_directory_df.columns:
ward_directory_df[location_column] = None
recalc = True
if not recalc:
return
def add_locations(in_row):
if pandas.isnull(in_row["cleaned_address"]):
in_row["cleaned_address"] = clean_address(in_row["Family Address"])
if pandas.isnull(in_row["latitude"]) or pandas.isnull(in_row["longitude"]):
location = get_location(in_row["cleaned_address"])
if location is None:
if location is None:
warnings.warn(
"Location not found: {}".format(
[in_row["cleaned_address"], in_row["Family Address"]]
)
)
else:
in_row["latitude"] = location.latitude
in_row["longitude"] = location.longitude
return in_row
ward_directory_df = ward_directory_df.apply(add_locations, axis=1)
ward_directory_df.to_csv(ward_directory_export, index=False)
def generate_address_distance(
input_address, ward_directory_export, output_distance_file, recalc=False
):
"""
Creates a file of distances to the input address sorted with the closest at the top.
Parameters
----------
input_address: str
The address to use as a base to generate distances from.
ward_directory_export: str
Path to the ward directory export csv file.
output_distance_file: str
Path to output csv file with distances to the original input_address.
recalc: bool
If True, it will recalculate rows without locations.
"""
input_address = clean_address(input_address.strip())
generate_address_latlon(ward_directory_export, recalc)
address_latlon_df = pandas.read_csv(ward_directory_export)
# use calculated location if address already calculated
existing_addresses = address_latlon_df.loc[
address_latlon_df.cleaned_address == input_address
]
try:
existing_address = existing_addresses.iloc[0]
location = Location(
latitude=existing_address["latitude"],
longitude=existing_address["longitude"],
)
except IndexError:
# look up location if not found in addresses
location = get_location(input_address)
if not location:
raise RuntimeError("Address location not found.")
def calc_distance(in_row):
start = (location.latitude, location.longitude)
end = (
0 if | pandas.isnull(in_row["latitude"]) | pandas.isnull |
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
df=pd.read_csv('TrainingData.csv')
#Change strings to numbers
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
wi=lb.fit_transform(np.array(df.loc[:,['Working Ion']]))
cs=lb.fit_transform(np.array(df.loc[:,['Crystal System']]))
sn=lb.fit_transform(np.array(df.loc[:,['Spacegroup Number']]))
el=np.array(df.loc[:,['mean_Number', 'mean_MendeleevNumber',
'mean_AtomicWeight', 'mean_MeltingT', 'mean_Column', 'mean_Row',
'mean_CovalentRadius', 'mean_Electronegativity', 'mean_NsValence',
'mean_NpValence', 'mean_NdValence', 'mean_NfValence', 'mean_NValance',
'mean_NsUnfilled', 'mean_NpUnfilled', 'mean_NdUnfilled',
'mean_NfUnfilled', 'mean_NUnfilled', 'mean_GSvolume_pa',
'mean_GSbandgap', 'mean_GSmagmom', 'mean_SpaceGroupNumber',
'dev_Number', 'dev_MendeleevNumber', 'dev_AtomicWeight', 'dev_MeltingT',
'dev_Column', 'dev_Row', 'dev_CovalentRadius', 'dev_Electronegativity',
'dev_NsValence', 'dev_NpValence', 'dev_NdValence', 'dev_NfValence',
'dev_NValance', 'dev_NsUnfilled', 'dev_NpUnfilled', 'dev_NdUnfilled',
'dev_NfUnfilled', 'dev_NUnfilled', 'dev_GSvolume_pa', 'dev_GSbandgap',
'dev_GSmagmom', 'dev_SpaceGroupNumber', 'mean_Number.1',
'mean_MendeleevNumber.1', 'mean_AtomicWeight.1', 'mean_MeltingT.1',
'mean_Column.1', 'mean_Row.1', 'mean_CovalentRadius.1',
'mean_Electronegativity.1', 'mean_NsValence.1', 'mean_NpValence.1',
'mean_NdValence.1', 'mean_NfValence.1', 'mean_NValance.1',
'mean_NsUnfilled.1', 'mean_NpUnfilled.1', 'mean_NdUnfilled.1',
'mean_NfUnfilled.1', 'mean_NUnfilled.1', 'mean_GSvolume_pa.1',
'mean_GSbandgap.1', 'mean_GSmagmom.1', 'mean_SpaceGroupNumber.1',
'dev_Number.1', 'dev_MendeleevNumber.1', 'dev_AtomicWeight.1',
'dev_MeltingT.1', 'dev_Column.1', 'dev_Row.1', 'dev_CovalentRadius.1',
'dev_Electronegativity.1', 'dev_NsValence.1', 'dev_NpValence.1',
'dev_NdValence.1', 'dev_NfValence.1', 'dev_NValance.1',
'dev_NsUnfilled.1', 'dev_NpUnfilled.1', 'dev_NdUnfilled.1',
'dev_NfUnfilled.1', 'dev_NUnfilled.1', 'dev_GSvolume_pa.1',
'dev_GSbandgap.1', 'dev_GSmagmom.1', 'dev_SpaceGroupNumber.1']])
prop=np.hstack((wi, cs, sn, el))
#Use StandardScaler
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
pss = ss.fit_transform(prop)
pca = PCA(n_components=165)
newdata=pca.fit_transform(pss)
newdf = | pd.DataFrame(newdata) | pandas.DataFrame |
'''
Download PDF files for a series of law chapter numbers
found in our training CSV file provided by partners.
Then extract the chapter texts from those PDFs.
Finally insert the chapter text into a new column 'Text' of our training CSV.
Note: each PDF contains a bit of the previous chapter and following one.
'''
import difflib
import math
import os
import re
from collections import Counter
import pandas as pd
import settings
import urllib.request, urllib
import utils
DOWNLOAD_PATH = os.path.join(settings.DATA_PATH, 'in', 'download')
os.makedirs(DOWNLOAD_PATH, exist_ok=True)
TITLES_FILENAME = 'titles-1398.csv'
def download_and_extract():
titles_df = pd.read_csv(os.path.join(settings.DATA_PATH, 'in', TITLES_FILENAME))
# download all the pdfs
download_pdfs(titles_df)
# Extract the texts from PDF into .txt files
# Using two complementary techniques:
# 1. extract embedded text from PDF
# 2. OCR with Tesseract
# 2. is better than 1. in general but not always.
for i in [0, 1]:
extract_texts_from_pdfs(titles_df, use_tesseract=i)
extract_chapters_from_texts(titles_df, limit=None)
# write the new dataframe back into the input csv
titles_df.to_csv(os.path.join(settings.DATA_PATH, 'in', TITLES_FILENAME), index=False)
def extract_chapters_from_texts(titles_df, limit=None):
'''Extract the text of the chapters from the texts converted from the PDFs
and insert them into a new column in titles_df dataframe called Text.'''
found = 0
total = len(titles_df)
titles_df['Text'] = titles_df['Title']
if limit is None:
limit = total
for i, row in titles_df.iterrows():
if i >= limit:
break
chapter_number = row['Chapter']
# print(str(chapter_number) + ' ' + '=' * 40)
best = None
options = []
# We extract the desired chapter with Tesseract first.
# If that doesn't work we fall back to the text embedded in the PDF.
for j, prefix in enumerate(['', 'tes-']):
filename = f"{prefix}{chapter_number}.txt"
content = utils.read_file(os.path.join(DOWNLOAD_PATH, filename))
chapter, chapter_first_line = extract_chapter_from_text(content, chapter_number)
options.append({
'filename': filename,
'chapter': chapter,
'first_line': chapter_first_line,
})
if options[-1]['chapter']:
best = j
if best is None:
print(f'WARNING: chapter roman number not found {chapter_number}')
else:
found += 1
text = utils.repair_ocred_text(options[best]['chapter'])
titles_df.loc[titles_df['Chapter'] == chapter_number, "Text"] = text
titles_df.loc[titles_df['Chapter'] == chapter_number, "First line"] = options[best]['first_line']
# print('INFO: {} - {}'.format(chapter_number, options[best]['filename']))
print('INFO: {} parsed. {} not found.'.format(limit, limit-found))
def extract_chapter_from_text(text, chapter_number):
'''
:param text: textual content of a pdf file
:param chapter_number: number of a chapter to extract from text
:return: a string with the content of the extracted chapter
Method:
All chapters in a text start with a line like this:
CHAPTER MCDLXXXIV
Which is the roman number of the chapter.
We use some text similarity function to find all the lines with
a chapter number (chapters). Then we look for perfect match on the
number we are after of the following one.
We replace some characters and patterns which are often misencoded
or badly OCRed in the pdf to improve the chances of a match.
'''
ret = ''
roman = utils.get_roman_from_int(chapter_number)
'''
e.g. 1484
['CHAPTER MODLXXXIV', 'CHAPTER MCDLXXXIIL', 'CHAPTER MCDLXXXY¥']
e.g. 1485
# First match is 1486, second is what we want for 1485.
['CHAPTER MCDLXXXVI', 'CHAPTER MCDLXXXY¥']
'''
# clean up to facilitate matches of chapter number
# Tesseract often reads O when it is written C
text_normalised = re.sub(r'[^A-Z1-9\n]+', r' ', text.upper().replace('1', 'I'))
text_normalised = utils.normalise_roman_number(text_normalised)
lines = [line.strip('. ') for line in text_normalised.split('\n')]
marker = utils.normalise_roman_number(f'CHAPTER {roman}')
# list of lines with CHAPTER XXX
chapters = [
c for c in difflib.get_close_matches(marker, lines)
# exclude footnotes, e.g. *CHAPTER 1478
if not(re.search(r'\d', c) and len(c) < 16)
]
# sort them by appearance rather than similarity.
# Similarity is not reliable due to corruption of the numbers by OCR.
chapters = [line for line in lines if line in chapters]
start = None
end = None
warning = 'NOT FOUND'
if len(chapters) == 1:
# only one match, we assume it's what we are after even if not perfect
start = lines.index(chapters[0])
end = len(lines)
elif len(chapters) > 1:
# return a line which ends with the same roman number
start = find_exact_chapter_number(chapter_number, chapters, lines)
# line for next chapter number
end = find_exact_chapter_number(chapter_number + 1, chapters, lines)
if start is not None and end is not None and not(chapters.index(lines[end]) == chapters.index(lines[start]) + 1):
warning = 'NEXT != THIS + 1'
start = None
end = None
if start is None:
if end is not None:
start = max([
lines.index(ch)
for ch in chapters
if lines.index(ch) < end
] + [
-1
])
if start == -1:
warning = 'NEXT is first'
start = None
if start is None:
# heuristic: if no good match AND we have two candidate chapters
# then pick the first candidate.
if len(chapters) == 2:
print(chapters)
start = lines.index(chapters[0])
end = lines.index(chapters[1], start+1)
# now get all the lines between start and end
first_line = ''
if start is not None:
if end is None:
end = min([
lines.index(ch)
for ch in chapters
if lines.index(ch) > start
] + [
len(lines)
])
if end != len(lines) and lines[end]:
if re.findall(r'\d', lines[end]):
warning = 'END might be a footnote'
print(warning, lines[end])
if re.findall(r'\d', lines[start]):
warning = 'START might be a footnote'
print(warning, lines[start])
# extract lines [start:end] from the non-normalised text
lines = text.split('\n')
ret = '\n'.join(lines[start+1:end]).strip('\n ')
first_line = lines[start].strip('\n ')
if not ret:
print(chapter_number, repr(marker), chapters, warning)
return ret, first_line
def find_exact_chapter_number(number, candidates_lines, all_lines):
'''return index of the line with an exact match for given number'''
ret = None
roman = utils.get_roman_from_int(number)
roman_normalised = utils.normalise_roman_number(roman)
exact_chapters = [
ch for ch in candidates_lines
if ch.endswith(roman_normalised)
]
if len(exact_chapters) == 1:
ret = all_lines.index(exact_chapters[0])
return ret
def download_pdfs(titles_df):
'''
Download the PDFs from palrb.us website.
Skip files already on disk.
'''
total = len(titles_df)
for i, row in titles_df.iterrows():
yd = str(row['Session'])[1]
url = f"http://www.palrb.us/statutesatlarge/1{yd}001{yd}99/{row['Session']}/0/act/{row['Chapter']}.pdf"
out_path = os.path.join(DOWNLOAD_PATH, f"{row['Chapter']}.pdf")
if utils.download(url, out_path) == 2:
print(f"{i}/{total} {url}")
def extract_texts_from_pdfs(titles_df, reprocess=False, read_only=False, limit=None, use_tesseract=False):
found = 0
total = len(titles_df)
if limit is None:
limit = total
prefix = ''
if use_tesseract:
prefix = 'tes-'
for i, row in titles_df.iterrows():
if i >= limit:
break
pdf_path = os.path.join(DOWNLOAD_PATH, f"{row['Chapter']}.pdf")
txt_path = os.path.join(DOWNLOAD_PATH, f"{prefix}{row['Chapter']}.txt")
if not os.path.exists(pdf_path):
print('WARNING: {pdf_path} not found.')
if reprocess or not os.path.exists(txt_path):
print(f"{i}/{total} {pdf_path}")
content = utils.extract_text_from_pdf(
pdf_path,
use_tesseract=use_tesseract
)
if content and not read_only:
with open(txt_path, 'wt') as fh:
fh.write(content)
warnings_count = limit
def get_first_chapter_from_pdf_text(text, print_chapters=False, chapter_number=None):
ret = text
'''
Passed December 9, 1789.
Recorded L. B. No. 4, p. 56.
'''
# chapters = re.split(r'(?ism)(?:passed|approved) .{,30}recorded.*?$', text)
# chapters = re.split(r'(?sm)^.{,2}APTER (M[^\n]{,12})$', text)
if chapter_number:
roman = utils.get_roman_from_int(chapter_number)
# chapters = re.split(r'(?sm)^.{,2}APTER (M[^\n]{,12})$', text)
pattern = r'(?sm)\s{}\.'.format(re.escape(roman))
print(pattern)
chapters = re.findall(pattern, text)
if print_chapters:
for i, c in enumerate(chapters):
print(str(i) + '-'*40)
print(c)
if len(chapters) < 1:
ret = None
else:
ret = text
return ret
def check_extraction_quality(warnings_to_show=None):
titles_df = pd.read_csv(os.path.join(settings.DATA_PATH, 'in', TITLES_FILENAME))
def find_warnings(text, chapter_number, first_line):
'''returns a list of warning flags'''
ret = []
if | pd.isna(first_line) | pandas.isna |
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from collections import Counter
import pandas as pd
from datetime import datetime
import numpy as np
import pygsheets as pyg
import pkg.SC_config as cnf
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets','https://www.googleapis.com/auth/spreadsheets.readonly']
# SCOPES=[]
#%%
def readPaylog():
''' Read google sheets online paylog and return as dataframe
and as pygsheet
'''
creds = getGoogleCreds() # google.oauth2.credentials
gc = pyg.authorize(custom_credentials=creds) # pygsheets client
sheetID ='<KEY>' # paylog sheet
sh = gc.open_by_key(sheetID)
myPygSheet=sh[0]
mycols=myPygSheet.get_row(1) # gets column names
paylog=pd.DataFrame(myPygSheet.get_all_records())
paylog=paylog[mycols] # reorder cols
def convInt(val):
try:
return int(val)
except:
return np.nan
def convDate(val):
try:
return datetime.strptime(val, '%m/%d/%Y')
except:
try:
return datetime.strptime(val, '%m/%d/%y')
except:
try:
return datetime.strptime(val.split(' ')[0], '%Y-%m-%d')
except:
print('Error converting', val)
return val
paylog['Date']=paylog['Date'].apply(lambda x: convDate(x))
paylog['Year']=paylog['Year'].apply(lambda x: convInt(x))
paylog['Amount']=paylog['Amount'].apply(lambda x: convInt(x))
paylog['Deposit']=paylog['Deposit'].apply(lambda x: convInt(x))
paylog['Paykey']=paylog['Paykey'].apply(lambda x: convInt(x))
paylog['Famkey']=paylog['Famkey'].apply(lambda x: convInt(x))
paylog['Plakey']=paylog['Plakey'].apply(lambda x: convInt(x))
return myPygSheet, paylog
def readUniList():
''' Read of current version of unilist (master list w each unique uniform ahd
person to whom it is assigned)
'''
sheetID ='<KEY>' #
rangeName = 'Unilist!A:M'
# Read of current version of unilist from google sheets
unilist = downloadSheet(sheetID, rangeName)
def convInt(val):
try:
return int(val)
except:
return np.nan
def convDate(val):
try:
return datetime.strptime(val, '%m/%d/%Y')
except:
try:
return datetime.strptime(val, '%m/%d/%y')
except:
print('Error converting', val)
return val
unilist['Date']=unilist['Date'].apply(lambda x: convDate(x))
unilist['Plakey']=unilist['Plakey'].apply(lambda x: convInt(x))
unilist['PriorKey']=unilist['PriorKey'].apply(lambda x: convInt(x))
unilist['Number']=unilist['Number'].apply(lambda x: convInt(x))
return unilist
def readInventory():
''' Read results of recent inventories
'''
sheetID ='<KEY>' #
rangeName = 'Inventory!A:E'
inventory = downloadSheet(sheetID, rangeName)
# Transform inventory into Setname, Size, Number, Date, Location =in
grouped=inventory.groupby(['Setname','Size'])
unis=[]
for (sn, size), gr in grouped:
# TODO keep only most recent version of inventory (by date)
thisDate=gr.iloc[0]['Date']
try:
thisDate=datetime.strptime(thisDate,'%m/%d/%y')
except:
pass
nums=gr.iloc[0]['Numberlist']
if ',' in nums:
nums=nums.split(',')
try:
nums=[int(i) for i in nums]
except:
# Maybe a trailing comma problem
print('error for', nums)
else:
nums=[nums] # single valued list
for num in nums:
thisUni={'Setname':sn, 'Size':size, 'Number':num,'Date': thisDate,'Location':'in'}
unis.append(thisUni)
unis=pd.DataFrame(unis)
return unis
def getGoogleCreds():
''' Load and process credentials.json (generated by Google API)
Enables creation of google Service object to access online google sheets
'''
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
tokenFile=cnf._INPUT_DIR+'\\token.pickle'
if os.path.exists(tokenFile):
with open(tokenFile, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(cnf._INPUT_DIR +
'\\credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(tokenFile, 'wb') as token:
pickle.dump(creds, token)
return creds
def changeColNames(headers):
''' Transform column names (google form questions) to standard abbrev versions
after Google Sheets API file download
'''
# Find header entries (google form questions) that are duplicates
dups = [k for k,v in Counter(headers).items() if v>1]
for dup in dups:
matchinds=[i for i, val in enumerate(headers) if val==dup]
# Replace 2nd instance in header list with val_2
headers=[val if i !=matchinds[1] else val+'_2' for i, val in enumerate(headers) ]
# handle duplicates
renameDict={'Player First Name':'First','Player Last Name':'Last', 'Player Date of Birth':'DOB',
'School Player Attends':'School', 'Grade Level':'Grade','Street Address':'Address',
'Zip Code':'Zip','Parish of Registration':'Parish','Alternate Placement':'AltPlacement',
'Other Roster Status':'Ocstatus', 'Other Contact':'Othercontact',
'Parent/Guardian First Name':'Pfirst1', 'Parent/Guardian First Name_2':'Pfirst2',
'Parent/Guardian Last Name':'Plast1','Parent/Guardian Last Name_2':'Plast2',
'Primary Phone':'Phone1','Primary Phone_2':'Phone2','Textable':'Text1','Textable_2':'Text2',
'Primary Email':'Email1','Primary Email_2':'Email2',
'Primary Email (enter "None" if you do not use e-mail)':'Email1',
'Would you be willing to act as a coach or assistant':'Coach',
'Would you be willing to act as a coach or assistant_2':'Coach2',
"Player's Uniform Size":'Unisize',
"Does your child already have an":'Unineed'}
# substring matching
rename_close={'grade level':'Grade', 'uniform size':'Unisize',
'child already have':'Unineed'}
newNames=[]
for val in headers:
if val in renameDict:
newNames.append(renameDict.get(val))
elif len([i for i in list(rename_close.keys()) if i in val.lower()])>0:
if len([i for i in list(rename_close.keys()) if i in val.lower()])>1:
print('Multiple close colname matches for {}'.val)
newNames.append(val)
continue
else:
matchkey=[i for i in list(rename_close.keys()) if i in val.lower()][0]
newNames.append(rename_close.get(matchkey))
else: # not found in any rename dicts so just keep
newNames.append(val)
unchanged=['Timestamp','Gender','Sport','Gkey','Plakey','Famkey']
# check for invalid header names
validNames=list(renameDict.values()) + unchanged
badNames=[i for i in newNames if i not in validNames]
if len(badNames)>0:
print('Invalid column names:',', '.join(badNames))
return newNames
def downloadSignups(sheetID, rangeName):
''' Download all from current season's signups
'''
creds = getGoogleCreds() # google.oauth2.credentials
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=sheetID,
range=rangeName).execute()
values = result.get('values', []) # list of lists
if len(values)==0:
print('Signup data not found')
return pd.DataFrame()
headers = changeColNames(values[0])
# Google API retrieved rows each become lists truncated at last value
newValList=[]
for vallist in values[1:]:
while len(vallist)<len(headers):
vallist.append('') # add blanks for missing/optional answer
newEntry={}
for i, val in enumerate(vallist):
newEntry[headers[i]]= val
newValList.append(newEntry)
signups= | pd.DataFrame(newValList, columns=headers) | pandas.DataFrame |
import pandas as pd
import numpy as np
from custom_stuff import Alone
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
data = | pd.read_csv('./train.csv', index_col='PassengerId') | pandas.read_csv |
from __future__ import absolute_import
import datetime
from copy import deepcopy
import numpy as np
import pandas as pd
from scipy.stats import norm
import expan.core.statistics as statx
from expan.core.debugging import Dbg
from expan.core.version import __version__
class Results(object):
"""
A Results instance represents the results of a series of analyses such as
SGA or deltaKPI.
Q: could we make this a subclass of DataFrame (i.e. provide this class in an
'is-a' relationship with DataFrame, rather than the 'has-a' relationship it
has now? It seems to be a difficult thing to subclass the DataFrame object
for some reason. https://github.com/pydata/pandas/pull/4271
For now, will leave as a 'has-a' class.
Todo:
Can we remove the 'value' level from the columns, so that the columns of the dataframe are simply the names of the variants?
This will make the columns a normal index rather than a multi-index. Currently, always a multi-index with second level only containing a single value 'value'.
"""
# TODO: maybe move these two to the __init__?
mandatory_index_levels = [
'metric',
'subgroup_metric',
'subgroup',
'statistic',
'pctile']
mandatory_column_levels = ['variant']
def __init__(self, df, metadata={}, dbg=None):
"""
Want to be able to create results from just a single dataframe.
Args:
df (pandas.DataFrame): input dataframe
metadata (dict): input metadata
dbg:
"""
self.df = df
self.metadata = metadata
self.metadata['version'] = __version__
self.metadata['errors'] = {}
self.metadata['warnings'] = {}
self.dbg = dbg or Dbg()
@property
def binning(self):
"""Return the binning object."""
return self.metadata['binning']
def set_binning(self, binning):
"""Store a binning object in the metadata."""
self.metadata['binning'] = binning
def _sortlevels(self):
self.df.sortlevel(axis=0, inplace=True, sort_remaining=True)
self.df.sortlevel(axis=1, inplace=True, sort_remaining=True)
def append_delta(self, metric, variant, mu, pctiles,
samplesize_variant,
samplesize_baseline,
subgroup_metric='-',
subgroup=None):
"""
Appends the results of a delta.
Modifies (or creates) the results data (df).
Args:
metric:
variant:
mu:
pctiles:
samplesize_variant:
samplesize_baseline:
subgroup_metric:
subgroup:
"""
df = self._delta_to_dataframe(metric, variant, mu, pctiles,
samplesize_variant,
samplesize_baseline,
subgroup_metric='-',
subgroup=None)
if self.df is None:
self.df = df
else:
self.df = self.df.append(df)
self._sortlevels()
def variants(self):
"""
Return the variants represented in this object
"""
return self.df.columns.levels[0]
def index_values(self, level='metric'):
"""
Return the metrics represented in this Results object
"""
return self.df.index.get_level_values(level=level).unique()
def relative_uplift(self, analysis_type, metric=None, subgroup_metric='-'):
"""Calculate the relative uplift for the given metrics and subgroup
metrics.
"""
uplifts = self.statistic(analysis_type, 'uplift', metric, subgroup_metric)
baseline_abs = self.statistic(analysis_type, 'variant_mean', metric,
subgroup_metric).loc[:, ('value', self.metadata['baseline_variant'])]
val = uplifts.values / baseline_abs.values[:, np.newaxis]
df = pd.DataFrame(val, columns=uplifts.columns)
# reconstruct indices
for i in self.mandatory_index_levels:
df[i] = uplifts.index.get_level_values(i)
df.set_index(self.mandatory_index_levels, inplace=True)
# df.index.set_levels(['uplift_rel']*df.index.shape[0], level='statistic', inplace=True)
# TODO: do we return a data frame or a Results object here?
return df
def calculate_prob_uplift_over_zero(self):
"""
"""
# check if the subgroup index is NaN
# NB: this will NOT work if we store delta and SGA results in the same object
# if the subgroup index contains only NaNs
if len(self.df.index.levels[2]) == 0:
df = self.df.groupby(level=['metric']).apply(
lambda x: self._prob_uplift_over_zero_single_metric(x, self.metadata['baseline_variant']))
# remove redundant levels (coming from groupby)
df.reset_index(level=0, drop=True, inplace=True)
else:
df = self.df.groupby(level=['metric', 'subgroup_metric', 'subgroup']).apply(
lambda x: self._prob_uplift_over_zero_single_metric(x, self.metadata['baseline_variant']))
# remove redundant levels (coming from groupby)
df.reset_index(level=[0, 1, 2], drop=True, inplace=True)
self.df = df
def delta_means(self, metric=None, subgroup_metric='-'):
"""
Args:
metric:
subgroup_metric:
Returns:
"""
return self.statistic('delta', 'variant_mean', metric, subgroup_metric)
def sga_means(self, metric=None, subgroup_metric='-'):
"""
Args:
metric:
subgroup_metric:
Returns:
"""
return self.statistic('sga', 'variant_mean', metric, subgroup_metric)
def uplifts(self, metric=None, subgroup_metric='-'):
"""
Args:
metric:
subgroup_metric:
Returns:
"""
return self.statistic('delta', 'uplift', metric, subgroup_metric)
def sga_uplifts(self, metric=None, subgroup_metric='-'):
"""
Args:
metric:
subgroup_metric:
Returns:
"""
return self.statistic('sga', 'uplift', metric, subgroup_metric)
def sample_sizes(self, analysis_type='delta', metric=None, subgroup_metric='-'):
"""
Args:
analysis_type:
metric:
subgroup_metric:
Returns:
"""
return self.statistic(analysis_type, 'sample_size', metric, subgroup_metric)
def statistic(self, analysis_type, statistic=None, metric=None,
subgroup_metric='-',
time_since_treatment='-',
include_pctiles=True):
"""
This is just a basic 'formatter' to allow easy access to results without
knowing the ordering of the index, etc. and to have sensible defaults.
All of this can be accomplished with fancy indexing on the dataframe
directly, but this should just serve as a convenience and an obvious
place to 'document' what the typical use-case is.
For all arguments, None means all, and '-' means only those for which
this particular argument is undefined (e.g. subgroup_metric='-')
Args:
analysis_type (string): the type of analysis that produced the TODO: implement this!
results (several can be present in a single Result object). Must be
one of the following:
- 'delta': only those with no time_since_treatment information, and no subgroup defined
- 'sga': only those with subgroup defined
- 'trend': only those with time_since_treatment defined
- None: no restriction done
statistic (string): the type of data you want, such as 'uplift'
metric (string): which metrics you are interested in
time_since_treatment (int?): TODO: implement
include_pctiles (bool): some statistics (e.g. 'uplift') can be present
with percentiles defined, as opposed to just a mean. If this is true,
they'll be returned also. TODO: implement this!
"""
# NOTE: throws AssertionError for trend results and unittest example results
# assert (self.df.index.names[0:4] == Results.mandatory_index_levels[0:4])
# assert (self.df.columns.names[1] == Results.mandatory_column_levels[0])
mean_results = self.df.xs((statistic, metric, subgroup_metric),
level=('statistic', 'metric', 'subgroup_metric'), drop_level=False)
# metric_slicer = metric or slice(None)
# statistic_slicer = stat or slice(None)
# rows = (metric,subgroup_metric,slice(None),statistic_slicer)
# levels_to_drop = ['subgroup_metric', 'statistic']
# if subgroup_metric == '-':
# levels_to_drop.append('subgroup')
# if metric is not None:
# levels_to_drop.insert(0, 'metric')
# if stat == 'pctile':
# cols = (slice(None))
# else:
# cols = (slice(None),'value')
# levels_to_drop.append('pctile')
# mean_results = self.df.loc[rows, cols]
# #mean_results.index = mean_results.reset_index(levels_to_drop,drop=True)
# mean_results = mean_results.reset_index(levels_to_drop,drop=True)
# mean_results.columns = mean_results.columns.droplevel(1)
return mean_results
def bounds(self, metric=None, subgroup_metric='-'):
"""
Args:
metric:
subgroup_metric:
Returns:
"""
if False:
rows = (slice(None), '-', slice(None), ['uplift', 'uplift_pctile'])
cols = (slice(None), 'value')
results = self.df.loc[rows, cols].unstack(['statistic', 'pctile'])
results.columns = results.columns.droplevel(1)
if subgroup_metric == '-':
results.reset_index(['subgroup', 'subgroup_metric'], drop=True)
else:
pctiles = self.statistic('pctile').unstack('pctile')
mns = self.uplifts()
mns.columns = pd.MultiIndex.from_product(
(self.means().columns, ['mean']), names=['variant', 'statistic'])
results = pd.concat((pctiles, mns), axis=1)
results.columns.names = ['variant', 'statistic']
return results
def __str__(self):
return 'Results for \'{}\' with {:d} variants, {:d} metrics, {:d} subgroup metrics. Means are:\n{}'.format(
str(self.metadata.get('experiment')),
len(self.variants()),
len(self.index_values('metric')),
len(self.index_values('subgroup_metric')),
str(self.means()),
)
def __repr__(self):
return 'Results(metadata={}, \ndf={})'.format(repr(self.metadata),
repr(self.df.unstack('pctile')))
def to_csv(self, fpath):
"""
Persist to a csv file, losing metadata.
Args:
fpath: file path where the csv should be created
Returns:
csv file
Note:
This will lose all metadata.
"""
res = deepcopy(self.df)
res.columns = res.columns.droplevel(0)
res = res.reset_index()
res.to_csv(fpath, index=False)
def to_hdf(self, fpath):
"""
Persist to an HDF5 file, preserving metadata.
MetaData is stored as attributes on a Group called 'metadata'. This group
doesn't include any datasets, but was used to avoid interfering with the
attributes that pandas stores on the 'data' Group.
Args:
fpath:
Returns:
"""
import h5py
store = pd.HDFStore(fpath)
store['data'] = self.df # write to HDF5
store.close()
# TODO: surely can do this via single interface rather than opening and closing?
hfile = h5py.File(fpath)
md = hfile.require_group('metadata')
datetime_conversions = set(md.attrs.get('_datetime_attributes', set()))
for k, v in list(self.metadata.items()):
if k == '_datetime_attributes':
continue
if v is None:
continue
self.dbg(3, 'to_hdf: storing metadata {}'.format(k))
if isinstance(v, pd.Timestamp) or isinstance(v, datetime.datetime):
v = str(v)
self.dbg(3, ' -> converted datetime/timestamp to string')
datetime_conversions.add(k)
md.attrs[k] = v
if len(datetime_conversions) > 0:
md.attrs['_datetime_attributes'] = [str(x) for x in datetime_conversions]
hfile.close()
def to_json(self, fpath=None):
"""
Produces either a JSON string (if there is no filepath specified)
or a JSON file containing the results.
Args:
fpath: filepath where the result JSON file should be stored
Returns:
string: JSON string with the results
file: JSON file with the results
"""
import json
# copy results dataframe so that we don't perform modifications on original
df = deepcopy(self.df)
# reindex manually to remove one level of nesting
try:
for column in df.index.names:
df[column] = df.index.get_level_values(column)
except AttributeError:
# trend() results are stored a bit differently, this needs to be addressed
self.dbg(-1, "trend() results are not supported yet")
return None
# reset the index
df = df.reset_index(drop=True).copy()
# fill numpy nans with string nans
df.fillna("nan", inplace=True)
# ----------------------------------------------------------------------------------------------------------------------------------
# | | | |
# | labels for the new dimension | next dimension | data frame index mask |
# | | | |
# ----------------------------------------------------------------------------------------------------------------------------------
table = [(lambda x: df.value.keys(), 'variant', lambda x: True),
(lambda x: df.metric.unique(), 'metric', lambda x: df.metric == x),
(lambda x: df[df.metric == x].subgroup_metric.unique(), 'subgroup_metric',
lambda x: df.subgroup_metric == x),
(lambda x: df[df.subgroup_metric == x].subgroup.unique(), 'subgroup', lambda x: df.subgroup == x),
(lambda x: df[df.subgroup == x].statistic.unique(), 'statistic', lambda x: df.statistic == x),
(lambda x: df[df.statistic == x].pctile.unique(), 'pctile', lambda x: df.pctile == x)]
# traverse the tree of dimensions aka indices
# in parallel refining the data frame view mask
def go(table, name=None, ixes=[], mask=pd.Series([True] * len(df))):
if not table:
variant = dict(ixes)['variant']
return {"name": str(name), "value": df[mask].value[variant].values[0]}
else:
head, tail = table[0], table[1:]
f, nextDim, flter = head[0], head[1], head[2]
val = [go(tail, n, [(nextDim, n)] + ixes, flter(n) & mask) for n in f(name)]
return {"name": name, nextDim + "s": val}
json_tree = {'variants': go(table, 'none')['variants']}
# store metadata in temporary variable as UserWarning() needs to be converted to string so that JSON serialization can continue
metadata = self.metadata
for m in set(metadata.keys()).intersection(['errors', 'warnings']):
for k in metadata[m]:
metadata[m][k] = str(metadata[m][k])
json_tree['metadata'] = metadata
json_string = json.dumps(json_tree)
try:
json.loads(json_string)
except ValueError as e:
self.dbg(-2, 'Invalid json created in expan.results.to_json(): %s' % e)
return None
if fpath:
with open(fpath, 'w') as json_file:
json.dump(obj=json_tree, fp=json_file)
else:
return json_string
# Fixme: depreciated?
def from_hdf(self, fpath, dbg=None):
"""
Restores a Results object from HDF5 as created by the to_hdf method.
Args:
fpath:
dbg:
Returns:
"""
if dbg is None:
dbg = Dbg()
import h5py
data = pd.read_hdf(fpath, 'data')
hfile = h5py.File(fpath)
md = hfile['metadata']
datetime_conversions = set(md.attrs.get('_datetime_attributes', set()))
metadata = {}
for k, v in list(md.attrs.items()):
if k == '_datetime_attributes':
continue
dbg(3, 'from_hdf: retrieving metadata {}'.format(k))
if k in datetime_conversions:
dbg(3, ' -> converting to Timestamp')
v = | pd.Timestamp(v) | pandas.Timestamp |
"""
This file tests the utilities stored in cassiopeia/data/utilities.py
"""
import unittest
from typing import Dict, Optional
import networkx as nx
import numpy as np
import pandas as pd
from cassiopeia.data import CassiopeiaTree
from cassiopeia.data import utilities as data_utilities
from cassiopeia.preprocess import utilities as preprocessing_utilities
class TestDataUtilities(unittest.TestCase):
def setUp(self):
# this should obey PP for easy checking of ancestral states
self.character_matrix = pd.DataFrame.from_dict(
{
"node3": [1, 0, 0, 0, 0, 0, 0, 0],
"node7": [1, 1, 0, 0, 0, 0, 0, 0],
"node9": [1, 1, 1, 0, 0, 0, 0, 0],
"node11": [1, 1, 1, 1, 0, 0, 0, 0],
"node13": [1, 1, 1, 1, 1, 0, 0, 0],
"node15": [1, 1, 1, 1, 1, 1, 0, 0],
"node17": [1, 1, 1, 1, 1, 1, 1, 0],
"node18": [1, 1, 1, 1, 1, 1, 1, 1],
"node5": [2, 0, 0, 0, 0, 0, 0, 0],
"node6": [2, 2, 0, 0, 0, 0, 0, 0],
},
orient="index",
)
self.priors = {
0: {1: 0.5, 2: 0.5},
1: {1: 0.4, 2: 0.6},
2: {1: 1.0},
3: {1: 1.0},
4: {1: 1.0},
5: {1: 1.0},
6: {1: 1.0},
7: {1: 1.0},
}
# Test allele table
at_dict = {
"cellBC": ["cellA", "cellA", "cellA", "cellB", "cellC"],
"intBC": ["A", "B", "C", "A", "C"],
"r1": ["None", "ATC", "GGG", "ATA", "GAA"],
"r2": ["None", "AAA", "GAA", "TTT", "GAA"],
"r3": ["ATC", "TTT", "ATA", "ATA", "ATA"],
"UMI": [5, 10, 1, 30, 30],
}
self.allele_table = pd.DataFrame.from_dict(at_dict)
self.indel_to_prior = pd.DataFrame.from_dict(
{
"ATC": 0.5,
"GGG": 0.2,
"GAA": 0.1,
"AAA": 0.05,
"TTT": 0.05,
"ATA": 0.1,
},
orient="index",
columns=["freq"],
)
# Test allele table without normal cassiopeia columns
self.non_cassiopeia_allele_table = self.allele_table.copy()
self.non_cassiopeia_allele_table.rename(
columns={"r1": "cs1", "r2": "cs2", "r3": "cs3"}, inplace=True
)
def test_bootstrap_character_matrices_no_priors(self):
random_state = np.random.RandomState(123431235)
bootstrap_samples = data_utilities.sample_bootstrap_character_matrices(
self.character_matrix, num_bootstraps=10, random_state=random_state
)
self.assertEqual(len(bootstrap_samples), 10)
for (bootstrap_matrix, bootstrap_priors) in bootstrap_samples:
self.assertCountEqual(
self.character_matrix.index, bootstrap_matrix.index
)
self.assertEqual(
self.character_matrix.shape[1], bootstrap_matrix.shape[1]
)
self.assertRaises(
AssertionError,
pd.testing.assert_frame_equal,
self.character_matrix,
bootstrap_matrix,
)
def test_bootstrap_character_matrices_with_priors(self):
random_state = np.random.RandomState(12345)
bootstrap_samples = data_utilities.sample_bootstrap_character_matrices(
self.character_matrix,
num_bootstraps=10,
prior_probabilities=self.priors,
random_state=random_state,
)
self.assertEqual(len(bootstrap_samples), 10)
for (bootstrap_matrix, bootstrap_priors) in bootstrap_samples:
self.assertCountEqual(
self.character_matrix.index, bootstrap_matrix.index
)
self.assertEqual(
self.character_matrix.shape[1], bootstrap_matrix.shape[1]
)
self.assertRaises(
AssertionError,
pd.testing.assert_frame_equal,
self.character_matrix,
bootstrap_matrix,
)
self.assertEqual(
len(bootstrap_priors), self.character_matrix.shape[1]
)
def test_bootstrap_allele_tables(self):
random_state = np.random.RandomState(123431235)
(
character_matrix,
_,
_,
) = preprocessing_utilities.convert_alleletable_to_character_matrix(
self.allele_table
)
bootstrap_samples = data_utilities.sample_bootstrap_allele_tables(
self.allele_table, num_bootstraps=10, random_state=random_state
)
self.assertEqual(len(bootstrap_samples), 10)
for (
bootstrap_matrix,
bootstrap_priors,
boostarp_state_to_indel,
bootstrap_intbcs,
) in bootstrap_samples:
self.assertEqual(
len(bootstrap_intbcs),
len(self.allele_table["intBC"].unique()) * 3,
)
self.assertCountEqual(
character_matrix.index, bootstrap_matrix.index
)
self.assertEqual(
character_matrix.shape[1], bootstrap_matrix.shape[1]
)
self.assertRaises(
AssertionError,
pd.testing.assert_frame_equal,
character_matrix,
bootstrap_matrix,
)
def test_bootstrap_allele_tables_non_cassiopeia_allele_table(self):
random_state = np.random.RandomState(123431235)
(
character_matrix,
_,
_,
) = preprocessing_utilities.convert_alleletable_to_character_matrix(
self.non_cassiopeia_allele_table, cut_sites=["cs1", "cs2", "cs3"]
)
bootstrap_samples = data_utilities.sample_bootstrap_allele_tables(
self.non_cassiopeia_allele_table,
num_bootstraps=10,
random_state=random_state,
cut_sites=["cs1", "cs2", "cs3"],
)
self.assertEqual(len(bootstrap_samples), 10)
for (
bootstrap_matrix,
bootstrap_priors,
boostrap_state_to_indel,
bootstrap_intbcs,
) in bootstrap_samples:
self.assertEqual(
len(bootstrap_intbcs),
len(self.non_cassiopeia_allele_table["intBC"].unique()) * 3,
)
self.assertCountEqual(
character_matrix.index, bootstrap_matrix.index
)
self.assertEqual(
character_matrix.shape[1], bootstrap_matrix.shape[1]
)
self.assertRaises(
AssertionError,
pd.testing.assert_frame_equal,
character_matrix,
bootstrap_matrix,
)
def test_bootstrap_allele_tables_priors(self):
random_state = np.random.RandomState(12345)
(
character_matrix,
_,
_,
) = preprocessing_utilities.convert_alleletable_to_character_matrix(
self.allele_table
)
bootstrap_samples = data_utilities.sample_bootstrap_allele_tables(
self.allele_table,
num_bootstraps=10,
indel_priors=self.indel_to_prior,
random_state=random_state,
)
self.assertEqual(len(bootstrap_samples), 10)
for (
bootstrap_matrix,
bootstrap_priors,
boostrap_state_to_indel,
bootstrap_intbcs,
) in bootstrap_samples:
self.assertEqual(
len(bootstrap_intbcs),
len(self.allele_table["intBC"].unique()) * 3,
)
self.assertCountEqual(
character_matrix.index, bootstrap_matrix.index
)
self.assertEqual(
character_matrix.shape[1], bootstrap_matrix.shape[1]
)
self.assertRaises(
AssertionError,
pd.testing.assert_frame_equal,
character_matrix,
bootstrap_matrix,
)
self.assertEqual(len(bootstrap_priors), character_matrix.shape[1])
def test_to_newick_no_branch_lengths(self):
tree = nx.DiGraph()
tree.add_nodes_from(["A", "B", "C", "D", "E", "F"])
tree.add_edge("F", "A", length=0.1)
tree.add_edge("F", "B", length=0.2)
tree.add_edge("F", "E", length=0.5)
tree.add_edge("E", "C", length=0.3)
tree.add_edge("E", "D", length=0.4)
newick_string = data_utilities.to_newick(tree)
self.assertEqual(newick_string, "(A,B,(C,D));")
def test_to_newick_branch_lengths(self):
tree = nx.DiGraph()
tree.add_nodes_from(["A", "B", "C", "D", "E", "F"])
tree.add_edge("F", "A", length=0.1)
tree.add_edge("F", "B", length=0.2)
tree.add_edge("F", "E", length=0.5)
tree.add_edge("E", "C", length=0.3)
tree.add_edge("E", "D", length=0.4)
newick_string = data_utilities.to_newick(
tree, record_branch_lengths=True
)
self.assertEqual(newick_string, "(A:0.1,B:0.2,(C:0.3,D:0.4):0.5);")
def test_lca_characters(self):
vecs = [[1, 0, 3, 4, 5], [1, -1, -1, 3, -1], [1, 2, 3, 2, -1]]
ret_vec = data_utilities.get_lca_characters(
vecs, missing_state_indicator=-1
)
self.assertEqual(ret_vec, [1, 0, 3, 0, 5])
def test_lca_characters_ambiguous(self):
vecs = [
[(1, 1), (0, 2), (3,), (4,), (5,)],
[1, -1, -1, 3, -1],
[1, 2, 3, 2, -1],
]
ret_vec = data_utilities.get_lca_characters(
vecs, missing_state_indicator=-1
)
self.assertEqual(ret_vec, [1, 0, 3, 0, 5])
def test_resolve_most_abundant(self):
state = (1, 2, 3, 3)
self.assertEqual(data_utilities.resolve_most_abundant(state), 3)
def test_simple_phylogenetic_weights_matrix(self):
tree = nx.DiGraph()
tree.add_nodes_from(["A", "B", "C", "D", "E", "F"])
tree.add_edge("F", "A", length=0.1)
tree.add_edge("F", "B", length=0.2)
tree.add_edge("F", "E", length=0.5)
tree.add_edge("E", "C", length=0.3)
tree.add_edge("E", "D", length=0.4)
tree = CassiopeiaTree(tree=tree)
weight_matrix = data_utilities.compute_phylogenetic_weight_matrix(tree)
expected_weight_matrix = pd.DataFrame.from_dict(
{
"A": [0.0, 0.3, 0.9, 1.0],
"B": [0.3, 0.0, 1.0, 1.1],
"C": [0.9, 1.0, 0.0, 0.7],
"D": [1.0, 1.1, 0.7, 0.0],
},
orient="index",
columns=["A", "B", "C", "D"],
)
pd.testing.assert_frame_equal(weight_matrix, expected_weight_matrix)
def test_simple_phylogenetic_weights_matrix_inverse(self):
tree = nx.DiGraph()
tree.add_nodes_from(["A", "B", "C", "D", "E", "F"])
tree.add_edge("F", "A", length=0.1)
tree.add_edge("F", "B", length=0.2)
tree.add_edge("F", "E", length=0.5)
tree.add_edge("E", "C", length=0.3)
tree.add_edge("E", "D", length=0.4)
tree = CassiopeiaTree(tree=tree)
weight_matrix = data_utilities.compute_phylogenetic_weight_matrix(
tree, inverse=True
)
expected_weight_matrix = pd.DataFrame.from_dict(
{
"A": [0.0, 1.0 / 0.3, 1.0 / 0.9, 1.0],
"B": [1.0 / 0.3, 0.0, 1.0, 1.0 / 1.1],
"C": [1.0 / 0.9, 1.0, 0.0, 1.0 / 0.7],
"D": [1.0, 1.0 / 1.1, 1.0 / 0.7, 0.0],
},
orient="index",
columns=["A", "B", "C", "D"],
)
| pd.testing.assert_frame_equal(weight_matrix, expected_weight_matrix) | pandas.testing.assert_frame_equal |
from datetime import timedelta,datetime
import pandas as pd
from database.market import Market
class Analyzer(object):
@classmethod
def pv_analysis(self,portfolio):
stuff = []
total_cash = 100
trades = portfolio.trades
trades = trades[(trades["date"] >= portfolio.start) & (trades["date"] <= portfolio.end)]
trades["quarter"] = [x.quarter for x in trades["sell_date"]]
trades["year"] = [x.year for x in trades["sell_date"]]
if trades.index.size < 1:
return pd.DataFrame([{"message":"no trades..."}])
number_of_strats = len(portfolio.strats.keys())
for strategy in list(portfolio.strats.keys()):
strat_trades = trades[trades["strategy"]==strategy]
cash = []
for seat in range(portfolio.seats):
initial = float(total_cash / number_of_strats / portfolio.seats )
seat_trades = strat_trades[strat_trades["seat"]==seat]
for delta in seat_trades["delta"]:
initial = initial * (1+delta)
cash.append(initial)
strat_trades["pv"] = cash
stuff.append(strat_trades)
analysis = pd.concat(stuff).pivot_table(index=["strategy","date"],columns="seat",values="pv").fillna(method="ffill").fillna(float(total_cash / number_of_strats / portfolio.seats )).reset_index()
analysis["pv"] = [sum([row[1][i] for i in range(portfolio.seats)]) for row in analysis.iterrows()]
final = analysis.pivot_table(index="date",columns="strategy",values="pv").fillna(method="ffill").fillna(float(total_cash / number_of_strats)).reset_index()
return final
@classmethod
def industry_analysis(self,portfolio):
market = Market()
market.connect()
sp5 = market.retrieve("sp500")
market.disconnect()
sp5.rename(columns={"Symbol":"ticker"},inplace=True)
trades = portfolio.trades
trades["quarter"] = [x.quarter for x in trades["sell_date"]]
trades["year"] = [x.year for x in trades["sell_date"]]
if trades.index.size < 1:
return | pd.DataFrame([{"message":"no trades..."}]) | pandas.DataFrame |
import numpy as np
from graspologic.utils import largest_connected_component
import pandas as pd
def get_paired_inds(meta, check_in=True, pair_key="pair", pair_id_key="pair_id"):
pair_meta = meta.copy()
pair_meta["_inds"] = range(len(pair_meta))
# remove any center neurons
pair_meta = pair_meta[pair_meta["hemisphere"].isin(["L", "R"])]
# remove any neurons for which the other in the pair is not in the metadata
if check_in:
pair_meta = pair_meta[pair_meta[pair_key].isin(pair_meta.index)]
# remove any pairs for which there is only one neuron
pair_group_size = pair_meta.groupby(pair_id_key).size()
remove_pairs = pair_group_size[pair_group_size == 1].index
pair_meta = pair_meta[~pair_meta[pair_id_key].isin(remove_pairs)]
# make sure each pair is "valid" now
assert pair_meta.groupby(pair_id_key).size().min() == 2
assert pair_meta.groupby(pair_id_key).size().max() == 2
# sort into pairs interleaved
pair_meta.sort_values([pair_id_key, "hemisphere"], inplace=True)
lp_inds = pair_meta[pair_meta["hemisphere"] == "L"]["_inds"]
rp_inds = pair_meta[pair_meta["hemisphere"] == "R"]["_inds"]
# double check that everything worked
assert (
meta.iloc[lp_inds][pair_id_key].values == meta.iloc[rp_inds][pair_id_key].values
).all()
return lp_inds, rp_inds
def get_paired_subgraphs(adj, lp_inds, rp_inds):
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
return (ll_adj, rr_adj, lr_adj, rl_adj)
def to_largest_connected_component(adj, meta=None):
adj, lcc_inds = largest_connected_component(adj, return_inds=True)
if meta is not None:
return adj, meta.iloc[lcc_inds]
else:
return adj
def to_pandas_edgelist(g):
"""Works for multigraphs, the networkx one wasnt returning edge keys"""
rows = []
for u, v, k in g.edges(keys=True):
data = g.edges[u, v, k]
data["source"] = u
data["target"] = v
data["key"] = k
rows.append(data)
edges = pd.DataFrame(rows)
edges["edge"] = list(zip(edges["source"], edges["target"], edges["key"]))
edges.set_index("edge", inplace=True)
return edges
def get_paired_nodes(nodes):
paired_nodes = nodes[nodes["pair_id"] != -1]
pair_ids = paired_nodes["pair_id"]
pair_counts = pair_ids.value_counts()
pair_counts = pair_counts[pair_counts == 1]
pair_ids = pair_ids[pair_ids.isin(pair_counts.index)]
paired_nodes = paired_nodes[paired_nodes["pair_id"].isin(pair_ids)].copy()
return paired_nodes
def get_seeds(left_nodes, right_nodes):
left_paired_nodes = get_paired_nodes(left_nodes)
right_paired_nodes = get_paired_nodes(right_nodes)
pairs_in_both = np.intersect1d(
left_paired_nodes["pair_id"], right_paired_nodes["pair_id"]
)
left_paired_nodes = left_paired_nodes[
left_paired_nodes["pair_id"].isin(pairs_in_both)
]
right_paired_nodes = right_paired_nodes[
right_paired_nodes["pair_id"].isin(pairs_in_both)
]
left_seeds = left_paired_nodes.sort_values("pair_id")["inds"]
right_seeds = right_paired_nodes.sort_values("pair_id")["inds"]
assert (
left_nodes.iloc[left_seeds]["pair_id"].values
== right_nodes.iloc[right_seeds]["pair_id"].values
).all()
return (left_seeds, right_seeds)
def remove_group(
left_adj, right_adj, left_nodes, right_nodes, group, group_key="simple_group"
):
left_nodes["inds"] = range(len(left_nodes))
sub_left_nodes = left_nodes[left_nodes[group_key] != group]
sub_left_inds = sub_left_nodes["inds"].values
right_nodes["inds"] = range(len(right_nodes))
sub_right_nodes = right_nodes[right_nodes[group_key] != group]
sub_right_inds = sub_right_nodes["inds"].values
sub_left_adj = left_adj[np.ix_(sub_left_inds, sub_left_inds)]
sub_right_adj = right_adj[np.ix_(sub_right_inds, sub_right_inds)]
return sub_left_adj, sub_right_adj, sub_left_nodes, sub_right_nodes
def select_lateral_nodes(adj_df, nodes):
counts = nodes.groupby("pair").size()
singleton_classes = counts[counts != 2].index
removed = nodes[nodes["pair"].isin(singleton_classes)]
nodes = nodes[~nodes["pair"].isin(singleton_classes)]
nodes = nodes.sort_values(["hemisphere", "pair"])
left_nodes = nodes[nodes["hemisphere"] == "L"]
right_nodes = nodes[nodes["hemisphere"] == "R"]
assert (left_nodes["pair"].values == right_nodes["pair"].values).all()
adj_df = adj_df.reindex(index=nodes.index, columns=nodes.index)
return adj_df, nodes, removed
def ensure_connected(adj_df, nodes):
adj_df = adj_df.reindex(index=nodes.index, columns=nodes.index)
adj = adj_df.values
adj_lcc, inds = largest_connected_component(adj, return_inds=True)
removed = nodes[~nodes.index.isin(nodes.index[inds])]
nodes = nodes.iloc[inds]
adj_df = pd.DataFrame(data=adj_lcc, index=nodes.index, columns=nodes.index)
return adj_df, nodes, removed
def split_nodes(nodes):
nodes = nodes.sort_values(["hemisphere", "pair"])
left_nodes = nodes[nodes["hemisphere"] == "L"]
right_nodes = nodes[nodes["hemisphere"] == "R"]
assert (left_nodes["pair"].values == right_nodes["pair"].values).all()
return left_nodes, right_nodes
def create_node_data(node_ids, exceptions=[]):
node_rows = []
for node_id in node_ids:
is_sided = True
if not ((node_id[-1] == "L") or (node_id[-1] == "R")):
is_exception = False
for exception in exceptions:
if exception in node_id:
is_exception = True
if not is_exception:
is_sided = False
if is_sided:
# node_id_no_side = node_id.strip("0123456789")
left_pos = node_id.rfind("L")
right_pos = node_id.rfind("R")
is_right = bool(np.argmax((left_pos, right_pos)))
side_indicator_loc = right_pos if is_right else left_pos
node_pair = node_id[:side_indicator_loc] + node_id[side_indicator_loc + 1 :]
hemisphere = "R" if is_right else "L"
node_rows.append(
{"node_id": node_id, "pair": node_pair, "hemisphere": hemisphere}
)
nodes = | pd.DataFrame(node_rows) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from folium import Map, FeatureGroup, Marker, Popup
from folium.utilities import (
validate_location,
validate_locations,
if_pandas_df_convert_to_numpy,
camelize,
deep_copy,
get_obj_in_upper_tree,
parse_options,
)
@pytest.mark.parametrize('location', [
(5, 3),
[5., 3.],
np.array([5, 3]),
np.array([[5, 3]]),
| pd.Series([5, 3]) | pandas.Series |
from sys import path
from os.path import expanduser
#path.append('/home/ubuntu/StatisticalClearSky/')
path.append('/Users/bennetmeyers/Documents/ClearSky/StatisticalClearSky/')
from statistical_clear_sky.algorithm.iterative_fitting import IterativeFitting
from solardatatools import standardize_time_axis, make_2d, fix_time_shifts
import pp
import s3fs
import pandas
import numpy
import sys
from statistical_clear_sky.algorithm.time_shift.clustering\
import ClusteringTimeShift
from\
statistical_clear_sky.algorithm.initialization.singular_value_decomposition\
import SingularValueDecomposition
from statistical_clear_sky.algorithm.initialization.linearization_helper\
import LinearizationHelper
from statistical_clear_sky.algorithm.initialization.weight_setting\
import WeightSetting
from statistical_clear_sky.algorithm.exception import ProblemStatusError
from statistical_clear_sky.algorithm.minimization.left_matrix\
import LeftMatrixMinimization
from statistical_clear_sky.algorithm.minimization.right_matrix\
import RightMatrixMinimization
from statistical_clear_sky.algorithm.serialization.state_data import StateData
from statistical_clear_sky.algorithm.serialization.serialization_mixin\
import SerializationMixin
from statistical_clear_sky.algorithm.plot.plot_mixin import PlotMixin
TZ_LOOKUP = {
'America/Anchorage': 9,
'America/Chicago': 6,
'America/Denver': 7,
'America/Los_Angeles': 8,
'America/New_York': 5,
'America/Phoenix': 7,
'Pacific/Honolulu': 10
}
def load_sys(n, fp=None, verbose=False):
if fp is not None:
base = fp
else:
base = 's3://pvinsight.nrel/PVO/'
# Weird quirk of pp is that some packages such as pandas an numpy need to use the full name for the import
meta = | pandas.read_csv(base + 'sys_meta.csv') | pandas.read_csv |
import os
import pathlib
import sys
import warnings
from functools import partial
from io import StringIO
from typing import Optional, TextIO
import click
import numpy as np # type: ignore
import pandas # type: ignore
import tomlkit as toml # type: ignore
from .compaction import compact as _compact
out = partial(click.secho, bold=True, err=True)
err = partial(click.secho, fg="red", err=True)
def _tomlkit_to_popo(d):
"""Convert a tomlkit doc to plain-old-python objects.
Examples
--------
>>> import tomlkit
>>> from compaction.cli import _tomlkit_to_popo
>>> contents = \"\"\"
... [[test]]
... int_value = 3
... float_value = 3.14
... str_value = "pi"
... bool_value = true
... \"\"\"
>>> doc = tomlkit.parse(contents)
>>> doc
{'test': [{'int_value': 3, 'float_value': 3.14, 'str_value': 'pi', 'bool_value': True}]}
>>> isinstance(doc["test"][0]["int_value"], tomlkit.items.Item)
True
>>> isinstance(doc["test"][0]["float_value"], tomlkit.items.Item)
True
>>> isinstance(doc["test"][0]["str_value"], tomlkit.items.Item)
True
>>> popo = _tomlkit_to_popo(doc)
>>> popo
{'test': [{'int_value': 3, 'float_value': 3.14, 'str_value': 'pi', 'bool_value': True}]}
>>> isinstance(popo["test"][0]["int_value"], tomlkit.items.Item)
False
>>> isinstance(popo["test"][0]["float_value"], tomlkit.items.Item)
False
>>> isinstance(popo["test"][0]["str_value"], tomlkit.items.Item)
False
>>> isinstance(popo["test"][0]["bool_value"], tomlkit.items.Item)
False
"""
try:
result = getattr(d, "value")
except AttributeError:
result = d
if isinstance(result, list):
result = [_tomlkit_to_popo(x) for x in result]
elif isinstance(result, dict):
result = {
_tomlkit_to_popo(key): _tomlkit_to_popo(val) for key, val in result.items()
}
elif isinstance(result, toml.items.Integer):
result = int(result)
elif isinstance(result, toml.items.Float):
result = float(result)
elif isinstance(result, (toml.items.String, str)):
result = str(result)
elif isinstance(result, (toml.items.Bool, bool)):
result = bool(result)
else:
if not isinstance(result, (int, float, str, bool)):
warnings.warn( # pragma: no cover
"unexpected type ({0!r}) encountered when converting toml to a dict".format(
result.__class__.__name__
)
)
return result
def load_config(stream: Optional[TextIO] = None):
"""Load compaction config file.
Parameters
----------
stream : file-like, optional
Opened config file or ``None``. If ``None``, return default
values.
Returns
-------
dict
Config parameters.
"""
conf = {
"compaction": {
"constants": {
"c": 5e-8,
"porosity_min": 0.0,
"porosity_max": 0.5,
"rho_grain": 2650.0,
"rho_void": 1000.0,
}
}
}
if stream is not None:
try:
local_params = toml.parse(stream.read())["compaction"]
except KeyError:
local_params = {"constants": {}}
try:
local_constants = local_params["constants"]
except KeyError:
local_constants = {}
conf["compaction"]["constants"].update(local_constants)
return _tomlkit_to_popo(conf).pop("compaction")
def _contents_of_input_file(infile: str) -> str:
params = load_config()
def as_csv(data, header=None):
with StringIO() as fp:
np.savetxt(fp, data, header=header, delimiter=",", fmt="%.1f")
contents = fp.getvalue()
return contents
contents = {
"compaction.toml": toml.dumps(dict(compacton=params)),
"porosity.csv": as_csv(
[[100.0, 0.5], [100.0, 0.5], [100.0, 0.5]],
header="Layer Thickness [m], Porosity [-]",
),
}
return contents[infile]
def run_compaction(src: str, dest: str, **kwds) -> None:
init = | pandas.read_csv(src, names=("dz", "porosity"), dtype=float, comment="#") | pandas.read_csv |
#!/home/mario/anaconda3/envs/project2_venv/bin python
"""
DESCRIPTION:
An script to retrieve the information generated during
the resquiggling from the fastq files.
"""
import h5py
import os
import pandas as pd
import csv
import numpy as np
from pytictoc import TicToc
from tombo import tombo_helper, tombo_stats, resquiggle
import mappy
from torch import mean
if __name__ == "__main__":
# Process the 3xr6 dataset
t = TicToc()
t.tic()
dataset_folder = "/home/mario/Projects/project_2/databases/working_3xr6/reads"
reference_file = dataset_folder + "/" + "reference.fasta"
reads_data = []
for flowcell in ['flowcell1', 'flowcell2', 'flowcell3']:
flowcell_folder = dataset_folder + '/' + flowcell + '/' + 'single'
for subfolder in os.listdir(flowcell_folder):
if subfolder.endswith('txt') or subfolder.endswith('index'):
continue
subfolder = flowcell_folder + '/' + subfolder
for read_file in os.listdir(subfolder):
read_name = read_file
if not read_file.endswith('fast5'):
continue
read_file = subfolder + '/' + read_file
try:
fast5_data = h5py.File(read_file, 'r')
template = fast5_data['Analyses']['RawGenomeCorrected_000']['BaseCalled_template']
except:
# Parsing: WRONG
# Alignment: WRONG
clipped_start = -1
clipped_end = -1
mapped_start = -1
mapped_end = -1
num_deletions = -1
num_insertions = -1
num_matches = -1
num_events = -1
num_mismatches = -1
signal_matching_score = -1
raw_data = -1 # fast5_data['Raw']['Reads'][list(fast5_data['Raw']['Reads'].keys())[0]]
read_id = -1 # raw_data.attrs['read_id'].decode('UTF-8')
raw_signal_length = -1 # raw_data['Signal'].value.shape[0]
fastq = -1 # fast5_data['Analyses']['Basecall_1D_000']['BaseCalled_template']['Fastq'].value
sequence = -1 # fastq.split('\n')[1]
at_content = -1 # sequence.count('A') + sequence.count('T')
gc_content = -1 # sequence.count('G') + sequence.count('C')
# at_content, gc_content = at_content / (at_content + gc_content), gc_content / (at_content + gc_content)
sequence_length = -1 # len(sequence)
aligned_section_length = -1
failed_parsing = True
failed_alignment = True
mean_q_score = -1
reads_data.append(
(read_id, raw_signal_length, sequence_length, clipped_start, clipped_end, mapped_start, mapped_end, num_deletions, num_insertions, num_matches, num_mismatches, num_events, signal_matching_score, failed_parsing, failed_alignment, aligned_section_length, at_content, gc_content, mean_q_score)
)
continue
status = template.attrs['status']
if status == 'Alignment not produced':
# Parsing: OK
# Alignment: WRONG
raw_data = fast5_data['Raw']['Reads'][list(fast5_data['Raw']['Reads'].keys())[0]]
read_id = raw_data.attrs['read_id'].decode('UTF-8')
raw_signal_length = raw_data['Signal'].value.shape[0]
signal_matching_score = -1
fastq = fast5_data['Analyses']['Basecall_1D_000']['BaseCalled_template']['Fastq'].value
sequence = fastq.split('\n')[1]
at_content = sequence.count('A') + sequence.count('T')
gc_content = sequence.count('G') + sequence.count('C')
at_content, gc_content = at_content / (at_content + gc_content), gc_content / (at_content + gc_content)
sequence_length = len(sequence)
clipped_start = -1
clipped_end = -1
mapped_start = -1
mapped_end = -1
num_deletions = -1
num_insertions = -1
num_matches = -1
num_mismatches = -1
num_events = -1
aligned_section_length = -1
failed_parsing = False
failed_alignment = True
mean_q_score = -1
else:
try:
alignment = template['Alignment']
except KeyError:
# Parsing: WRONG
# Alignment: WRONG
clipped_start = -1
clipped_end = -1
mapped_start = -1
mapped_end = -1
num_deletions = -1
num_insertions = -1
num_matches = -1
num_events = -1
num_mismatches = -1
signal_matching_score = -1
raw_data = -1 # fast5_data['Raw']['Reads'][list(fast5_data['Raw']['Reads'].keys())[0]]
read_id = -1 # raw_data.attrs['read_id'].decode('UTF-8')
raw_signal_length = -1 # raw_data['Signal'].value.shape[0]
fastq = -1 # fast5_data['Analyses']['Basecall_1D_000']['BaseCalled_template']['Fastq'].value
sequence = -1 # fastq.split('\n')[1]
at_content = -1 # sequence.count('A') + sequence.count('T')
gc_content = -1 # sequence.count('G') + sequence.count('C')
# at_content, gc_content = at_content / (at_content + gc_content), gc_content / (at_content + gc_content)
sequence_length = -1 # len(sequence)
aligned_section_length = -1
failed_parsing = True
failed_alignment = True
mean_q_score = -1
reads_data.append(
(read_id, raw_signal_length, sequence_length, clipped_start, clipped_end, mapped_start, mapped_end, num_deletions, num_insertions, num_matches, num_mismatches, num_events, signal_matching_score, failed_parsing, failed_alignment, aligned_section_length, at_content, gc_content, mean_q_score)
)
continue
# Parsing: OK
# Alignment: OK
alignment = template['Alignment']
clipped_start = alignment.attrs['clipped_bases_start']
clipped_end = alignment.attrs['clipped_bases_end']
mapped_start = alignment.attrs['mapped_start']
mapped_end = alignment.attrs['mapped_end']
num_deletions = alignment.attrs['num_deletions']
num_insertions = alignment.attrs['num_insertions']
num_matches = alignment.attrs['num_matches']
num_mismatches = alignment.attrs['num_mismatches']
num_events = mapped_end - mapped_start
signal_matching_score = template.attrs['signal_match_score']
raw_data = fast5_data['Raw']['Reads'][list(fast5_data['Raw']['Reads'].keys())[0]]
read_id = raw_data.attrs['read_id'].decode('UTF-8')
raw_signal_length = raw_data['Signal'].value.shape[0]
fastq = fast5_data['Analyses']['Basecall_1D_000']['BaseCalled_template']['Fastq'].value
sequence = fastq.split('\n')[1]
at_content = sequence.count('A') + sequence.count('T')
gc_content = sequence.count('G') + sequence.count('C')
at_content, gc_content = at_content / (at_content + gc_content), gc_content / (at_content + gc_content)
sequence_length = len(sequence)
events = fast5_data['Analyses']['RawGenomeCorrected_000']['BaseCalled_template']['Events']
last_event = events.value.tolist()[-1]
aligned_section_length = last_event[-3] + last_event[-2]
failed_parsing = False
failed_alignment = False
# Obtain mean q score
try:
aligner = mappy.Aligner(reference_file, preset=str('map-ont'), best_n=1)
seq_samp_type = tombo_helper.get_seq_sample_type(fast5_data)
std_ref = tombo_stats.TomboModel(seq_samp_type=seq_samp_type)
map_results = resquiggle.map_read(fast5_data, aligner, std_ref)
mean_q_score = map_results.mean_q_score
except tombo_helper.TomboError:
mean_q_score = -1
failed_alignment = True
reads_data.append(
(read_id, raw_signal_length, sequence_length, clipped_start, clipped_end, mapped_start, mapped_end, num_deletions, num_insertions, num_matches, num_mismatches, num_events, signal_matching_score, failed_parsing, failed_alignment, aligned_section_length, at_content, gc_content, mean_q_score)
)
columns = ['read_id', 'raw_signal_length', 'sequence_length', 'clipped_start', 'clipped_end', 'mapped_start', 'mapped_end', 'num_deletions', 'num_insertions', 'num_matches', 'num_mismatches', 'num_events', 'signal_matching_score', 'failed_parsing', 'failed_alignment', 'aligned_section_length', 'at_content', 'gc_content', 'mean_q_score']
metadata_3xr6 = | pd.DataFrame(reads_data, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import json
from tqdm import tqdm
from scipy.optimize import minimize
from utils import get_next_gw, time_decay
from ranked_probability_score import ranked_probability_score, match_outcome
class Bradley_Terry:
""" Model game outcomes using logistic distribution """
def __init__(
self,
games,
threshold=0.1,
scale=1,
parameters=None,
decay=True):
"""
Args:
games (pd.DataFrame): Finished games to used for training.
threshold (float): Threshold to differentiate team performances
scale (float): Variance of strength ratings
parameters (array): Initial parameters to use
decay (boolean): Apply time decay
"""
self.games = games.loc[:, [
"score1", "score2", "team1", "team2", "date"]]
self.games = self.games.dropna()
self.games["date"] = pd.to_datetime(self.games["date"])
self.games["days_since"] = (
self.games["date"].max() - self.games["date"]).dt.days
self.games["weight"] = (
time_decay(0.0026, self.games["days_since"]) if decay else 1)
self.decay = decay
self.games["score1"] = self.games["score1"].astype(int)
self.games["score2"] = self.games["score2"].astype(int)
self.teams = np.sort(np.unique(self.games["team1"]))
self.league_size = len(self.teams)
self.threshold = threshold
self.scale = scale
# Initial parameters
if parameters is None:
self.parameters = np.concatenate((
np.random.uniform(0, 1, (self.league_size)), # Strength
[.1], # Home advantage
))
else:
self.parameters = parameters
def likelihood(self, parameters, games):
""" Perform sample prediction and compare with outcome
Args:
parameters (pd.DataFrame): Current estimate of the parameters
games (pd.DataFrame): Fixtures
Returns:
(float): Likelihood of the estimated parameters
"""
parameter_df = (
pd.DataFrame()
.assign(rating=parameters[:self.league_size])
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(
games,
parameter_df,
left_on='team1',
right_on='team')
.rename(columns={"rating": "rating1"})
.merge(parameter_df, left_on='team2', right_on='team')
.rename(columns={"rating": "rating2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
)
outcome = match_outcome(fixtures_df)
outcome_ma = np.ones((fixtures_df.shape[0], 3))
outcome_ma[np.arange(0, fixtures_df.shape[0]), outcome] = 0
odds = np.zeros((fixtures_df.shape[0], 3))
odds[:, 0] = (
1 / (1 + np.exp(
-(
fixtures_df["rating1"] + parameters[-1] -
fixtures_df["rating2"] - self.threshold
) / self.scale)
)
)
odds[:, 2] = (
1 / (1 + np.exp(
-(
fixtures_df["rating2"] - parameters[-1] -
fixtures_df["rating1"] - self.threshold
) / self.scale)
)
)
odds[:, 1] = 1 - odds[:, 0] - odds[:, 2]
return - np.power(
np.ma.masked_array(odds, outcome_ma),
np.repeat(
np.array(fixtures_df["weight"].values).reshape(-1, 1),
3,
axis=1)
).sum()
def maximum_likelihood_estimation(self):
"""
Maximum likelihood estimation of the model parameters for team
strengths and the home field advantage.
"""
# Set strength ratings to have unique set of values for reproducibility
constraints = [{
"type": "eq",
"fun": lambda x:
sum(x[: self.league_size]) - self.league_size
}]
# Set the maximum and minimum values the parameters can take
bounds = [(0, 3)] * self.league_size
bounds += [(0, 1)]
self.solution = minimize(
self.likelihood,
self.parameters,
args=self.games,
constraints=constraints,
bounds=bounds,
options={'disp': False, 'maxiter': 100})
self.parameters = self.solution["x"]
def predict(self, games):
""" Predict score for several fixtures
Args:
games (pd.DataFrame): Fixtures
Returns:
pd.DataFrame: Fixtures with appended odds
"""
parameter_df = (
pd.DataFrame()
.assign(rating=self.parameters[:self.league_size])
.assign(team=self.teams)
)
fixtures_df = (
pd.merge(games, parameter_df, left_on='team1', right_on='team')
.rename(columns={"rating": "rating1"})
.merge(parameter_df, left_on='team2', right_on='team')
.rename(columns={"rating": "rating2"})
.drop("team_y", axis=1)
.drop("team_x", axis=1)
.assign(home_adv=self.parameters[-1])
)
def synthesize_odds(row):
""" Lambda function that parses row by row to compute score matrix
Args:
row (array): Fixture
Returns:
(tuple): Home and Away win and clean sheets odds
"""
home_win_p = (
1 / (
1 + np.exp(
-(
row["rating1"] + row["home_adv"] -
row["rating2"] - self.threshold) / self.scale
)
)
)
away_win_p = (
1 / (
1 + np.exp(
-(
row["rating2"] - row["home_adv"] -
row["rating1"] - self.threshold) / self.scale
)
)
)
draw_p = 1 - home_win_p - away_win_p
return home_win_p, draw_p, away_win_p
(
fixtures_df["home_win_p"],
fixtures_df["draw_p"],
fixtures_df["away_win_p"]
) = zip(*fixtures_df.apply(
lambda row: synthesize_odds(row), axis=1))
return fixtures_df
def evaluate(self, games):
""" Evaluate the model's prediction accuracy
Args:
games (pd.DataFrame): Fixtured to evaluate on
Returns:
pd.DataFrame: df with appended metrics
"""
fixtures_df = self.predict(games)
fixtures_df["winner"] = match_outcome(fixtures_df)
fixtures_df["rps"] = fixtures_df.apply(
lambda row: ranked_probability_score(
[row["home_win_p"], row["draw_p"],
row["away_win_p"]], row["winner"]), axis=1)
return fixtures_df
def backtest(
self,
train_games,
test_season,
path='',
cold_start=False,
save=True):
""" Test the model's accuracy on past/finished games by iteratively
training and testing on parts of the data.
Args:
train_games (pd.DataFrame): All the training samples
test_season (int): Season to use a test set
path (string): Path extension to adjust to ipynb use
cold_start (boolean): Resume training with random parameters
save (boolean): Save predictions to disk
Returns:
(float): Evaluation metric
"""
# Get training data
self.train_games = train_games
# Initialize model
self.__init__(self.train_games[
self.train_games['season'] != test_season],
decay=self.decay)
# Initial train on past seasons
self.maximum_likelihood_estimation()
# Get test data
# Separate testing based on per GW intervals
fixtures = (
pd.read_csv(
f"{path}data/fpl_official/vaastav/data/2021-22/fixtures.csv")
.loc[:, ['event', 'kickoff_time']])
fixtures["kickoff_time"] = (
pd.to_datetime(fixtures["kickoff_time"]).dt.date)
# Get only EPL games from the test season
self.test_games = (
self.train_games
.loc[self.train_games['league_id'] == 2411]
.loc[self.train_games['season'] == test_season]
.dropna()
)
self.test_games["kickoff_time"] = (
pd.to_datetime(self.test_games["date"]).dt.date)
# Merge on date
self.test_games = pd.merge(
self.test_games,
fixtures,
left_on='kickoff_time',
right_on='kickoff_time')
# Add the home team and away team index for running inference
idx = (
pd.DataFrame()
.assign(team=self.teams)
.assign(team_index=np.arange(self.league_size)))
self.test_games = (
| pd.merge(self.test_games, idx, left_on="team1", right_on="team") | pandas.merge |
#!/usr/bin/env python
import os
import bisect
import sys
import logging
import math
import yaml
import numpy as np
import pandas as pd
import configparser
import shapefile
from collections import defaultdict
from shapely import geometry
from geopy.distance import geodesic
from scipy import stats
from wistl.constants import K_FACTOR, NO_CIRCUIT
OPTIONS = ['run_parallel', 'save_output', 'save_figure',
'run_analytical', 'run_simulation', 'use_random_seed',
'run_no_cascading_collapse', 'adjust_design_by_topography',
'apply_line_interaction', 'use_collapse_capacity']
DIRECTORIES = ['gis_data', 'wind_event_base', 'input', 'output']
GIS_DATA = ['shape_tower', 'shape_line']
FORMAT = ['wind_file', 'event_id']
INPUT_FILES = ['fragility_metadata', 'cond_prob_metadata',
'terrain_multiplier', 'topographic_multiplier',
'design_adjustment_factor_by_topography',
'cond_prob_interaction_metadata']
FIELDS_TOWER = ['name', 'type', 'latitude', 'longitude', 'function', 'devangle',
'axisaz', 'height' , 'lineroute', 'design_span', 'design_speed',
'terrain_cat', 'height_z', 'shape', 'design_level',
]
SHAPEFILE_TYPE = {'C': object, 'F': np.float64, 'N': np.int64}
# scenario -> damage scenario
# event -> wind event
class Config(object):
"""
class to hold all configuration variables.
"""
def __init__(self, file_cfg=None, logger=None):
self.file_cfg = os.path.abspath(file_cfg)
self.logger = logger or logging.getLogger(__name__)
self.options = {}
self.no_sims = None
self.strainer = []
self.selected_lines = []
self.atol = None
self.rtol = None
self.dmg_threshold = None
#for item in DIRECTORIES:
# setattr(self, f'path_{item}', None)
#for item in INPUT_FILES + GIS_DATA:
# setattr(self, f'file_{item}', None)
self.events = [] # list of tuples of event_name and scale
self.line_interaction = {}
self._topographic_multiplier = None
self._design_value_by_line = None
self._terrain_multiplier = None
self._drag_height_by_type = None
self._design_adjustment_factor_by_topography = None
# cond prob wrt line_interaction
self._cond_prob_interaction = None
self._cond_prob_interaction_metadata = None
self._fragility_metadata = None
self._fragility = None # pandas.DataFrame
self._damage_states = None
self._no_damage_states = None
self._non_collapse = None
self._cond_prob_metadata = None
self._cond_prob = None # dict of pd.DataFrame
self._towers_by_line = None
self._lines = None
self._no_towers_by_line = None
if not os.path.isfile(file_cfg):
self.logger.error(f'{file_cfg} not found')
else:
self.read_config()
self.process_config()
#def __getstate__(self):
# d = self.__dict__.copy()
# if 'logger' in d:
# d['logger'] = d['logger'].name
# return d
#def __setstate__(self, d):
# if 'logger' in d:
# d['logger'] = logging.getLogger(d['logger'])
# self.__dict__.update(d)
@property
def drag_height_by_type(self):
"""read typical drag height by tower type
:returns: drag height
:rtype: dict
"""
if self._drag_height_by_type is None:
if os.path.exists(self.file_drag_height_by_type):
self._drag_height_by_type = h_drag_height_by_type(self.file_drag_height_by_type)
else:
msg = f'{self.file_drag_height_by_type} not found'
self.logger.critical(msg)
return self._drag_height_by_type
@property
def topographic_multiplier(self):
"""read topographic multiplier value from the input file
:returns: topography value at each site
:rtype: dict
"""
if self._topographic_multiplier is None and self.options['adjust_design_by_topography']:
if os.path.exists(self.file_topographic_multiplier):
self._topographic_multiplier = h_topographic_multiplier(
self.file_topographic_multiplier)
else:
msg = f'{self.file_topographic_multiplier} not found'
self.logger.critical(msg)
return self._topographic_multiplier
@property
def design_adjustment_factor_by_topography(self):
"""read design wind speed adjustment based on topographic multiplier
:returns: design speed adjustment factor
:rtype: dict
"""
if self._design_adjustment_factor_by_topography is None and self.options['adjust_design_by_topography']:
if os.path.exists(self.file_design_adjustment_factor_by_topography):
self._design_adjustment_factor_by_topography = \
h_design_adjustment_factor_by_topography(
self.file_design_adjustment_factor_by_topography)
else:
msg = f'{self.file_design_adjustment_factor_by_topography} not found'
self.logger.critical(msg)
return self._design_adjustment_factor_by_topography
@property
def terrain_multiplier(self):
"""
read terrain multiplier (AS/NZS 1170.2:2011 Table 4.1)
"""
if self._terrain_multiplier is None:
if os.path.exists(self.file_terrain_multiplier):
self._terrain_multiplier = h_terrain_multiplier(
self.file_terrain_multiplier)
else:
msg = f'{self.file_terrain_multiplier} not found'
self.logger.critical(msg)
return self._terrain_multiplier
@property
def design_value_by_line(self):
"""read design values by line
"""
if self._design_value_by_line is None:
if os.path.exists(self.file_design_value_by_line):
self._design_value_by_line = h_design_value_by_line(self.file_design_value_by_line)
else:
msg = f'{self.file_design_value_by_line} not found'
self.logger.critical(msg)
return self._design_value_by_line
@property
def fragility_metadata(self):
"""
read collapse fragility parameter values
"""
if self._fragility_metadata is None:
try:
with open(self.file_fragility_metadata, 'r') as ymlfile:
tmp = yaml.load(ymlfile, Loader=yaml.FullLoader)
except IOError:
msg = f'{self.file_fragility_metadata} not found'
self.logger.critical(msg)
else:
self._fragility_metadata = nested_dic(tmp)
return self._fragility_metadata
@property
def fragility(self):
if self._fragility is None:
path_metadata = os.path.dirname(
os.path.realpath(self.file_fragility_metadata))
_file = os.path.join(path_metadata, self.fragility_metadata['main']['file'])
if os.path.exists(_file):
self._fragility = h_fragility(_file)
else:
self.logger.critical(f'{_file} not found')
return self._fragility
@property
def damage_states(self):
if self._damage_states is None:
self._damage_states = self.fragility_metadata['main']['limit_states']
return self._damage_states
@property
def no_damage_states(self):
if self._no_damage_states is None:
self._no_damage_states = len(self.damage_states)
return self._no_damage_states
@property
def non_collapse(self):
if self._non_collapse is None:
self._non_collapse = self.damage_states[:]
self._non_collapse.remove('collapse')
return self._non_collapse
@property
def cond_prob_metadata(self):
"""
read condition collapse probability defined by tower function
"""
if self._cond_prob_metadata is None:
if os.path.exists(self.file_cond_prob_metadata):
self._cond_prob_metadata = read_yml_file(
self.file_cond_prob_metadata)
else:
msg = f'{self.file_cond_prob_metadata} not found'
self.logger.critical(msg)
return self._cond_prob_metadata
@property
def cond_prob(self):
if self._cond_prob is None:
_file = os.path.join(self.cond_prob_metadata['path'],
self.cond_prob_metadata['file'])
self._cond_prob = h_cond_prob(_file)
return self._cond_prob
@property
def cond_prob_interaction_metadata(self):
"""
read conditional line interaction probability
"""
if self.options['apply_line_interaction'] and self._cond_prob_interaction_metadata is None:
if not os.path.exists(self.file_cond_prob_interaction_metadata):
msg = f'{self.file_cond_prob_interaction_metadata} not found'
self.logger.critical(msg)
else:
self._cond_prob_interaction_metadata = read_yml_file(
self.file_cond_prob_interaction_metadata)
return self._cond_prob_interaction_metadata
@property
def cond_prob_interaction(self):
if self._cond_prob_interaction is None and self.cond_prob_interaction_metadata:
_file = os.path.join(self.cond_prob_interaction_metadata['path'],
self.cond_prob_interaction_metadata['file'])
with open(_file, 'r') as ymlfile:
self._cond_prob_interaction = yaml.load(ymlfile, Loader=yaml.FullLoader)
return self._cond_prob_interaction
@property
def no_towers_by_line(self):
if self._no_towers_by_line is None:
self._no_towers_by_line = {k: v['no_towers']
for k, v in self.lines.items()}
return self._no_towers_by_line
@property
def towers_by_line(self):
if self._towers_by_line is None:
df = pd.DataFrame(None)
for _file in self.file_shape_tower:
if '.shp' in _file:
df = df.append(read_shape_file(_file))
else:
tmp = pd.read_csv(_file, skipinitialspace=True, usecols=FIELDS_TOWER)
df = df.append(tmp)
df.set_index('name', inplace=True, drop=False)
# set dtype of lineroute chr
df['lineroute'] = df['lineroute'].astype(str)
# only selected lines
df = df.loc[df['lineroute'].isin(self.selected_lines)]
# coord, coord_lat_lon, point
df = df.merge(df.apply(assign_shapely_point, axis=1),
left_index=True, right_index=True)
# design_span, design_level, design_speed, terrain_cat
#df = df.merge(df.apply(self.assign_design_values, axis=1),
# left_index=True, right_index=True)
# frag_dic
df = df.merge(df.apply(self.assign_fragility_parameters, axis=1),
left_index=True, right_index=True)
df['file_wind_base_name'] = df['name'].apply(
lambda x: self.wind_file_format.format(tower_name=x))
#df['height_z'] = df['function'].apply(lambda x: self.drag_height_by_type[x])
df['ratio_z_to_10'] = df.apply(self.ratio_z_to_10, axis=1)
self._towers_by_line = {}
for name, grp in df.groupby('lineroute'):
self._towers_by_line[name] = grp.to_dict('index')
return self._towers_by_line
@property
def lines(self):
if self._lines is None:
df = | pd.DataFrame(None) | pandas.DataFrame |
import pandas as pd
from utils.save_data import write_csv
def filter_runs_not_us(data_subject):
data_subject['residence'] = data_subject['Current Country of Residence']
runs_not_us = data_subject.loc[
data_subject['residence'] != 'United States', 'run_id']
print(f"""{len(runs_not_us)} runs do not reside inside the us. """)
return runs_not_us
def filter_runs_precision(data_subject, max_precision=0.15):
runs_low_precision = data_subject.loc[
data_subject['precision'] > max_precision, 'run_id']
return runs_low_precision
def filter_runs_offset(data_subject, max_offset=0.5):
runs_high_offset = data_subject.loc[
data_subject['offset'] > max_offset, 'run_id']
return runs_high_offset
def filter_hit_ratio(data_subject, min_hit_ratio=0.8):
freq_table = pd.crosstab(
index=data_subject['n_valid_dots'],
columns="count")
print(f"""How many dots are valid per subject. Dots during """
f"""chin-rest validation: \n"""
f"""{data_subject['n_valid_dots'].describe()} \n\n"""
f"""{freq_table} \n\n"""
f"""{data_subject[['run_id', 'fps', 'n_valid_dots']]}""")
runs_low_hit_ratio = data_subject.loc[
data_subject['hit_ratio'] < min_hit_ratio,
'run_id']
return runs_low_hit_ratio
def filter_bad_log_k(data_subject, max_noise=40):
runs_missing_log_k = data_subject.loc[
| pd.isna(data_subject['logK']) | pandas.isna |
# Charting OSeMOSYS transformation data
# These charts won't necessarily need to be mapped back to EGEDA historical.
# Will effectively be base year and out
# But will be good to incorporate some historical generation before the base year eventually
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from openpyxl import Workbook
import xlsxwriter
import pandas.io.formats.excel
import glob
import re
# Path for OSeMOSYS output
path_output = './data/3_OSeMOSYS_output'
# Path for OSeMOSYS to EGEDA mapping
path_mapping = './data/2_Mapping_and_other'
# They're csv files so use a wild card (*) to grab the filenames
OSeMOSYS_filenames = glob.glob(path_output + "/*.xlsx")
# Reference filenames and net zero filenames
reference_filenames = list(filter(lambda k: 'reference' in k, OSeMOSYS_filenames))
netzero_filenames = list(filter(lambda y: 'net-zero' in y, OSeMOSYS_filenames))
# Read in mapping file
# New 2018 data variable names
Mapping_sheets = list(pd.read_excel(path_mapping + '/OSeMOSYS_mapping_2021.xlsx', sheet_name = None).keys())[1:]
Mapping_file = pd.DataFrame()
for sheet in Mapping_sheets:
interim_map = pd.read_excel(path_mapping + '/OSeMOSYS_mapping_2021.xlsx', sheet_name = sheet, skiprows = 1)
Mapping_file = Mapping_file.append(interim_map).reset_index(drop = True)
# Subset the mapping file so that it's just transformation
Map_trans = Mapping_file[Mapping_file['Balance'] == 'TRANS'].reset_index(drop = True)
# Define unique workbook and sheet combinations
Unique_trans = Map_trans.groupby(['Workbook', 'Sheet_energy']).size().reset_index().loc[:, ['Workbook', 'Sheet_energy']]
########################################################################################################################
########################### Create historical electricity generation dataframe for use later ###########################
required_fuels_elec = ['1_coal', '1_5_lignite', '2_coal_products', '6_crude_oil_and_ngl', '7_petroleum_products',
'8_gas', '9_nuclear', '10_hydro', '11_geothermal', '12_solar', '13_tide_wave_ocean', '14_wind',
'15_solid_biomass', '16_others', '18_heat']
EGEDA_hist_gen = pd.read_csv('./data/1_EGEDA/EGEDA_2018_years.csv',
names = ['economy', 'fuel_code', 'item_code_new'] + list(range(1980, 2019)),
header = 0)
EGEDA_hist_gen_1 = EGEDA_hist_gen[(EGEDA_hist_gen['item_code_new'] == '18_electricity_output_in_pj') &
(EGEDA_hist_gen['fuel_code'].isin(required_fuels_elec))].reset_index(drop = True)
EGEDA_hist_gen_2 = EGEDA_hist_gen[(EGEDA_hist_gen['fuel_code'] == '17_electricity') &
(EGEDA_hist_gen['item_code_new'] == '2_imports')].reset_index(drop = True)
EGEDA_hist_gen = EGEDA_hist_gen_1.append(EGEDA_hist_gen_2).reset_index(drop = True)
# China only having data for 1_coal requires workaround to keep lignite data
lignite_alt = EGEDA_hist_gen[EGEDA_hist_gen['fuel_code'] == '1_5_lignite'].copy()\
.set_index(['economy', 'fuel_code', 'item_code_new']) * -1
lignite_alt = lignite_alt.reset_index()
new_coal = EGEDA_hist_gen[EGEDA_hist_gen['fuel_code'] == '1_coal'].copy().reset_index(drop = True)
lig_coal = new_coal.append(lignite_alt).reset_index(drop = True).groupby(['economy', 'item_code_new']).sum().reset_index()
lig_coal['fuel_code'] = '1_coal'
no_coal = EGEDA_hist_gen[EGEDA_hist_gen['fuel_code'] != '1_coal'].copy().reset_index(drop = True)
EGEDA_hist_gen = no_coal.append(lig_coal).reset_index(drop = True)
EGEDA_hist_gen['TECHNOLOGY'] = EGEDA_hist_gen['fuel_code'].map({'1_coal': 'Coal',
'1_5_lignite': 'Lignite',
'2_coal_products': 'Coal',
'6_crude_oil_and_ngl': 'Oil',
'7_petroleum_products': 'Oil',
'8_gas': 'Gas',
'9_nuclear': 'Nuclear',
'10_hydro': 'Hydro',
'11_geothermal': 'Geothermal',
'12_solar': 'Solar',
'13_tide_wave_ocean': 'Hydro',
'14_wind': 'Wind',
'15_solid_biomass': 'Biomass',
'16_others': 'Other',
'17_electricity': 'Imports',
'18_heat': 'Other'})
EGEDA_hist_gen['Generation'] = 'Electricity'
EGEDA_hist_gen = EGEDA_hist_gen[['economy', 'TECHNOLOGY', 'Generation'] + list(range(2000, 2019))].\
groupby(['economy', 'TECHNOLOGY', 'Generation']).sum().reset_index()
########################################################################################################################
# Determine list of files to read based on the workbooks identified in the mapping file
# REFERENCE
ref_file_trans = pd.DataFrame()
for i in range(len(Unique_trans['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in reference_filenames if Unique_trans['Workbook'].unique()[i] in entry],
'Workbook': Unique_trans['Workbook'].unique()[i]})
ref_file_trans = ref_file_trans.append(_file)
ref_file_trans = ref_file_trans.merge(Unique_trans, how = 'outer', on = 'Workbook')
# NET ZERO
netz_file_trans = pd.DataFrame()
for i in range(len(Unique_trans['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in netzero_filenames if Unique_trans['Workbook'].unique()[i] in entry],
'Workbook': Unique_trans['Workbook'].unique()[i]})
netz_file_trans = netz_file_trans.append(_file)
netz_file_trans = netz_file_trans.merge(Unique_trans, how = 'outer', on = 'Workbook')
# Create empty dataframe to store aggregated results
# REFERENCE
ref_aggregate_df1 = pd.DataFrame()
# Now read in the OSeMOSYS output files so that that they're all in one data frame (aggregate_df1)
for i in range(ref_file_trans.shape[0]):
_df = pd.read_excel(ref_file_trans.iloc[i, 0], sheet_name = ref_file_trans.iloc[i, 2])
_df['Workbook'] = ref_file_trans.iloc[i, 1]
_df['Sheet_energy'] = ref_file_trans.iloc[i, 2]
ref_aggregate_df1 = ref_aggregate_df1.append(_df)
ref_aggregate_df1 = ref_aggregate_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION']).sum().reset_index()
# NET ZERO
netz_aggregate_df1 = pd.DataFrame()
# Now read in the OSeMOSYS output files so that that they're all in one data frame (aggregate_df1)
for i in range(netz_file_trans.shape[0]):
_df = pd.read_excel(netz_file_trans.iloc[i, 0], sheet_name = netz_file_trans.iloc[i, 2])
_df['Workbook'] = netz_file_trans.iloc[i, 1]
_df['Sheet_energy'] = netz_file_trans.iloc[i, 2]
netz_aggregate_df1 = netz_aggregate_df1.append(_df)
netz_aggregate_df1 = netz_aggregate_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION']).sum().reset_index()
# Read in capacity data
# REFERENCE
ref_capacity_df1 = pd.DataFrame()
# Populate the above blank dataframe with capacity data from the results workbook
for i in range(len(reference_filenames)):
_df = pd.read_excel(reference_filenames[i], sheet_name = 'TotalCapacityAnnual')
ref_capacity_df1 = ref_capacity_df1.append(_df)
# Now just extract the power capacity
ref_pow_capacity_df1 = ref_capacity_df1[ref_capacity_df1['TECHNOLOGY'].str.startswith('POW')].reset_index(drop = True)
# NET ZERO
netz_capacity_df1 = pd.DataFrame()
# Populate the above blank dataframe with capacity data from the results workbook
for i in range(len(netzero_filenames)):
_df = pd.read_excel(netzero_filenames[i], sheet_name = 'TotalCapacityAnnual')
netz_capacity_df1 = netz_capacity_df1.append(_df)
# Now just extract the power capacity
netz_pow_capacity_df1 = netz_capacity_df1[netz_capacity_df1['TECHNOLOGY'].str.startswith('POW')].reset_index(drop = True)
# Get maximum year column to build data frame below
# REFERENCE
ref_year_columns = []
for item in list(ref_aggregate_df1.columns):
try:
ref_year_columns.append(int(item))
except ValueError:
pass
max_year_ref = min(2050, max(ref_year_columns))
OSeMOSYS_years_ref = list(range(2017, max_year_ref + 1))
# NET ZERO
netz_year_columns = []
for item in list(netz_aggregate_df1.columns):
try:
netz_year_columns.append(int(item))
except ValueError:
pass
max_year_netz = min(2050, max(netz_year_columns))
OSeMOSYS_years_netz = list(range(2017, max_year_netz + 1))
# Colours for charting (to be amended later)
colours = pd.read_excel('./data/2_Mapping_and_other/colour_template_7th.xlsx')
colours_hex = colours['hex']
# Colour dictionary
colours_dict = {
'Coal': '#323232',
'Oil': '#be280a',
'Gas': '#f59300',
'Modern renewables': '#3c7896',
'Traditional biomass': '#828282',
'Hydrogen': '#28825a',
'Electricity': '#a5cdf0',
'Heat': '#cd6477',
'Others': '#bebebe',
'Industry': '#ffc305',
'Transport': '#bebebe',
'Buildings': '#3c7896',
'Agriculture': '#323232',
'Non-energy': '#cd6477',
'Non-specified': '#872355',
'Services': '#a5cdf0',
'Residential': '#28825a',
'Iron & steel': '#8c0000',
'Chemicals': '#a5cdf0',
'Aluminium': '#bebebe',
'Non-metallic minerals': '#1e465a',
'Mining': '#f59300',
'Pulp & paper': '#28825a',
'Other': '#cd6477',
'Biomass': '#828282',
'Jet fuel': '#323232',
'LPG': '#ffdc96',
'Gasoline': '#be280a',
'Diesel': '#3c7896',
'Renewables': '#1e465a',
'Aviation': '#ffc305',
'Road': '#1e465a',
'Rail': '#be280a',
'Marine': '#28825a',
'Pipeline': '#bebebe',
# Transformation unique
'Geothermal': '#3c7896',
'Hydro': '#a5cdf0',
'Lignite': '#833C0C',
'Nuclear': '#872355',
'Other renewables': '#1e465a',
'Solar': '#ffc305',
'Wind': '#28825a',
'Storage': '#ffdc96',
'Imports': '#641964',
'Crude oil': '#be280a',
'NGLs': '#3c7896',
'Motor gasoline': '#1e465a',
'Aviation gasoline': '#3c7896',
'Naphtha': '#a5cdf0',
'Other kerosene': '#8c0000',
'Gas diesel oil': '#be280a',
'Fuel oil': '#f59300',
'Refinery gas': '#ffc305',
'Ethane': '#872355',
'Power': '#1e465a',
'Refining': '#3c7896'
}
Map_power = Map_trans[Map_trans['Sector'] == 'POW'].reset_index(drop = True)
################################ POWER SECTOR ###############################
# Aggregate data based on the Map_power mapping
# That is group by REGION, TECHNOLOGY and FUEL
# First create empty dataframe
# REFERENCE
ref_power_df1 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in ref_aggregate_df1['REGION'].unique():
interim_df1 = ref_aggregate_df1[ref_aggregate_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Map_power, how = 'right', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
# Now add in economy reference
interim_df1['economy'] = region
# Now append economy dataframe to communal data frame
ref_power_df1 = ref_power_df1.append(interim_df1)
ref_power_df1 = ref_power_df1[['economy', 'TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector'] + OSeMOSYS_years_ref]
# NET ZERO
netz_power_df1 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in netz_aggregate_df1['REGION'].unique():
interim_df1 = netz_aggregate_df1[netz_aggregate_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Map_power, how = 'right', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
# Now add in economy reference
interim_df1['economy'] = region
# Now append economy dataframe to communal data frame
netz_power_df1 = netz_power_df1.append(interim_df1)
netz_power_df1 = netz_power_df1[['economy', 'TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector'] + OSeMOSYS_years_netz]
################################ REFINERY, OWN USE and SUPPLY TRANSFORMATION SECTOR ###############################
Map_refownsup = Map_trans[Map_trans['Sector'].isin(['REF', 'SUP', 'OWN'])].reset_index(drop = True)
# Aggregate data based on the Map_power mapping
# That is group by REGION, TECHNOLOGY and FUEL
# First create empty dataframe
# REFERENCE
ref_refownsup_df1 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in ref_aggregate_df1['REGION'].unique():
interim_df1 = ref_aggregate_df1[ref_aggregate_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Map_refownsup, how = 'right', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
# Now add in economy reference
interim_df1['economy'] = region
# Now append economy dataframe to communal data frame
ref_refownsup_df1 = ref_refownsup_df1.append(interim_df1)
ref_refownsup_df1 = ref_refownsup_df1[['economy', 'TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector'] + OSeMOSYS_years_ref]
# REFERENCE
netz_refownsup_df1 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in netz_aggregate_df1['REGION'].unique():
interim_df1 = netz_aggregate_df1[netz_aggregate_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Map_refownsup, how = 'right', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector']).sum().reset_index()
# Now add in economy reference
interim_df1['economy'] = region
# Now append economy dataframe to communal data frame
netz_refownsup_df1 = netz_refownsup_df1.append(interim_df1)
netz_refownsup_df1 = netz_refownsup_df1[['economy', 'TECHNOLOGY', 'FUEL', 'Sheet_energy', 'Sector'] + OSeMOSYS_years_netz]
# Refinery, own-use, supply and power
ref_trans_df1 = ref_power_df1.append(ref_refownsup_df1)
netz_trans_df1 = netz_power_df1.append(netz_refownsup_df1)
# FUEL aggregations for UseByTechnology
# First aggregation (13 fuels)
coal_fuel_1 = ['1_x_coal_thermal', '2_coal_products']
lignite_fuel_1 = ['1_5_lignite']
oil_fuel_1 = ['7_7_gas_diesel_oil','7_3_naphtha', '7_8_fuel_oil', '6_1_crude_oil', '7_9_lpg', '7_10_refinery_gas_not_liquefied', '7_x_other_petroleum_products']
gas_fuel_1 = ['8_1_natural_gas']
nuclear_fuel_1 = ['9_nuclear']
hydro_fuel_1 = ['10_hydro']
solar_fuel_1 = ['12_1_of_which_photovoltaics']
wind_fuel_1 = ['14_wind']
biomass_fuel_1 = ['15_1_fuelwood_and_woodwaste', '15_2_bagasse', '15_4_black_liquor', '15_5_other_biomass']
geothermal_fuel_1 = ['11_geothermal']
other_renew_fuel_1 = ['13_tide_wave_ocean', '16_3_municipal_solid_waste_renewable', '16_1_biogas']
other_fuel_1 = ['16_4_municipal_solid_waste_nonrenewable', '17_electricity', '18_heat', '16_x_hydrogen', '16_2_industrial_waste']
imports_fuel_1 = ['17_electricity_import']
# Second aggreagtion: Oil, Gas, Nuclear, Imports, Other from above and below two new aggregations (7 fuels)
coal_fuel_2 = ['1_x_coal_thermal', '1_5_lignite', '2_coal_products']
renewables_fuel_2 = ['10_hydro', '11_geothermal', '12_1_of_which_photovoltaics', '13_tide_wave_ocean', '14_wind', '15_1_fuelwood_and_woodwaste',
'15_2_bagasse', '15_4_black_liquor', '15_5_other_biomass', '16_1_biogas', '16_3_municipal_solid_waste_renewable']
# Own use fuels
coal_ou = ['1_x_coal_thermal', '1_5_lignite', '2_coal_products', '1_1_coking_coal']
oil_ou = ['6_1_crude_oil', '6_x_ngls', '7_1_motor_gasoline', '7_2_aviation_gasoline', '7_3_naphtha', '7_6_kerosene',
'7_7_gas_diesel_oil', '7_8_fuel_oil', '7_9_lpg', '7_10_refinery_gas_not_liquefied', '7_11_ethane',
'7_x_jet_fuel', '7_x_other_petroleum_products']
gas_ou = ['8_1_natural_gas']
renew_ou = ['15_1_fuelwood_and_woodwaste', '15_2_bagasse', '15_3_charcoal', '15_4_black_liquor', '15_5_other_biomass',
'16_1_biogas', '16_3_municipal_solid_waste_renewable', '16_5_biogasoline', '16_6_biodiesel',
'16_8_other_liquid_biofuels']
elec_ou = ['17_electricity']
heat_ou = ['18_heat']
other_ou = ['16_2_industrial_waste', '16_4_municipal_solid_waste_nonrenewable']
own_use_fuels = ['Coal', 'Oil', 'Gas', 'Renewables', 'Electricity', 'Heat', 'Other']
# Note, 12_1_of_which_photovoltaics is a subset of 12_solar so including will lead to double counting
use_agg_fuels_1 = ['Coal', 'Lignite', 'Oil', 'Gas', 'Nuclear', 'Hydro', 'Solar', 'Wind',
'Biomass', 'Geothermal', 'Other renewables', 'Other', 'Imports']
use_agg_fuels_2 = ['Coal', 'Oil', 'Gas', 'Nuclear', 'Renewables', 'Other', 'Imports']
# TECHNOLOGY aggregations for ProductionByTechnology
coal_tech = ['POW_Black_Coal_PP', 'POW_Other_Coal_PP', 'POW_Sub_BituCoal_PP', 'POW_Sub_Brown_PP', 'POW_Ultra_BituCoal_PP', 'POW_CHP_COAL_PP', 'POW_Ultra_CHP_PP']
oil_tech = ['POW_Diesel_PP', 'POW_FuelOil_PP', 'POW_OilProducts_PP', 'POW_PetCoke_PP']
gas_tech = ['POW_CCGT_PP', 'POW_OCGT_PP', 'POW_CHP_GAS_PP', 'POW_CCGT_CCS_PP']
nuclear_tech = ['POW_Nuclear_PP', 'POW_IMP_Nuclear_PP']
hydro_tech = ['POW_Hydro_PP', 'POW_Pumped_Hydro', 'POW_Storage_Hydro_PP', 'POW_IMP_Hydro_PP']
solar_tech = ['POW_SolarCSP_PP', 'POW_SolarFloatPV_PP', 'POW_SolarPV_PP', 'POW_SolarRoofPV_PP']
wind_tech = ['POW_WindOff_PP', 'POW_Wind_PP']
bio_tech = ['POW_Solid_Biomass_PP', 'POW_CHP_BIO_PP', 'POW_Biogas_PP']
geo_tech = ['POW_Geothermal_PP']
storage_tech = ['POW_AggregatedEnergy_Storage_VPP', 'POW_EmbeddedBattery_Storage']
other_tech = ['POW_IPP_PP', 'POW_TIDAL_PP', 'POW_WasteToEnergy_PP', 'POW_CHP_PP']
# chp_tech = ['POW_CHP_PP']
im_tech = ['POW_IMPORTS_PP', 'POW_IMPORT_ELEC_PP']
lignite_tech = ['POW_Sub_Brown_PP']
thermal_coal_tech = ['POW_Black_Coal_PP', 'POW_Other_Coal_PP', 'POW_Sub_BituCoal_PP', 'POW_Ultra_BituCoal_PP', 'POW_CHP_COAL_PP', 'POW_Ultra_CHP_PP']
solar_roof_tech = ['POW_SolarRoofPV_PP']
solar_nr_tech = ['POW_SolarCSP_PP', 'POW_SolarFloatPV_PP', 'POW_SolarPV_PP']
# POW_EXPORT_ELEC_PP need to work this in
prod_agg_tech = ['Coal', 'Oil', 'Gas', 'Hydro', 'Nuclear', 'Wind', 'Solar', 'Biomass', 'Geothermal', 'Storage', 'Other', 'Imports']
prod_agg_tech2 = ['Coal', 'Lignite', 'Oil', 'Gas', 'Hydro', 'Nuclear', 'Wind', 'Solar',
'Biomass', 'Geothermal', 'Storage', 'Other', 'Imports']
# Refinery vectors
refinery_input = ['6_1_crude_oil', '6_x_ngls']
refinery_output = ['7_1_motor_gasoline', '7_2_aviation_gasoline', '7_3_naphtha', '7_x_jet_fuel', '7_6_kerosene', '7_7_gas_diesel_oil', '7_8_fuel_oil',
'7_9_lpg', '7_10_refinery_gas_not_liquefied', '7_11_ethane', '7_x_other_petroleum_products']
refinery_new_output = ['7_1_from_ref', '7_2_from_ref', '7_3_from_ref', '7_jet_from_ref', '7_6_from_ref', '7_7_from_ref',
'7_8_from_ref', '7_9_from_ref', '7_10_from_ref', '7_11_from_ref', '7_other_from_ref']
# Capacity vectors
coal_cap = ['POW_Black_Coal_PP', 'POW_Sub_BituCoal_PP', 'POW_Sub_Brown_PP', 'POW_CHP_COAL_PP', 'POW_Other_Coal_PP', 'POW_Ultra_BituCoal_PP', 'POW_Ultra_CHP_PP']
gas_cap = ['POW_CCGT_PP', 'POW_OCGT_PP', 'POW_CHP_GAS_PP', 'POW_CCGT_CCS_PP']
oil_cap = ['POW_Diesel_PP', 'POW_FuelOil_PP', 'POW_OilProducts_PP', 'POW_PetCoke_PP']
nuclear_cap = ['POW_Nuclear_PP', 'POW_IMP_Nuclear_PP']
hydro_cap = ['POW_Hydro_PP', 'POW_Pumped_Hydro', 'POW_Storage_Hydro_PP', 'POW_IMP_Hydro_PP']
bio_cap = ['POW_Solid_Biomass_PP', 'POW_CHP_BIO_PP', 'POW_Biogas_PP']
wind_cap = ['POW_Wind_PP', 'POW_WindOff_PP']
solar_cap = ['POW_SolarCSP_PP', 'POW_SolarFloatPV_PP', 'POW_SolarPV_PP', 'POW_SolarRoofPV_PP']
geo_cap = ['POW_Geothermal_PP']
storage_cap = ['POW_AggregatedEnergy_Storage_VPP', 'POW_EmbeddedBattery_Storage']
other_cap = ['POW_WasteToEnergy_PP', 'POW_IPP_PP', 'POW_TIDAL_PP', 'POW_CHP_PP']
# chp_cap = ['POW_CHP_PP']
# 'POW_HEAT_HP' not in electricity capacity
transmission_cap = ['POW_Transmission']
lignite_cap = ['POW_Sub_Brown_PP']
thermal_coal_cap = ['POW_Black_Coal_PP', 'POW_Other_Coal_PP', 'POW_Sub_BituCoal_PP', 'POW_Ultra_BituCoal_PP', 'POW_CHP_COAL_PP', 'POW_Ultra_CHP_PP']
pow_capacity_agg = ['Coal', 'Gas', 'Oil', 'Nuclear', 'Hydro', 'Biomass', 'Wind', 'Solar', 'Geothermal', 'Storage', 'Other']
pow_capacity_agg2 = ['Coal', 'Lignite', 'Gas', 'Oil', 'Nuclear', 'Hydro', 'Biomass', 'Wind',
'Solar', 'Geothermal', 'Storage', 'Other']
# Chart years for column charts
col_chart_years = [2018, 2020, 2030, 2040, 2050]
gen_col_chart_years = [2000, 2010, 2018, 2020, 2030, 2040, 2050]
# Make space for charts (before data/tables)
chart_height = 18 # number of excel rows before the data is written
# TRANSFORMATION SECTOR: Build use, capacity and production dataframes with appropriate aggregations to chart
for economy in ref_power_df1['economy'].unique():
ref_use_df1 = ref_power_df1[(ref_power_df1['economy'] == economy) &
(ref_power_df1['Sheet_energy'] == 'UseByTechnology') &
(ref_power_df1['TECHNOLOGY'] != 'POW_Transmission')].reset_index(drop = True)
# Now build aggregate variables of the FUELS
# First level aggregations
coal = ref_use_df1[ref_use_df1['FUEL'].isin(coal_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Coal',
TECHNOLOGY = 'Coal power')
lignite = ref_use_df1[ref_use_df1['FUEL'].isin(lignite_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Lignite',
TECHNOLOGY = 'Lignite power')
oil = ref_use_df1[ref_use_df1['FUEL'].isin(oil_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Oil',
TECHNOLOGY = 'Oil power')
gas = ref_use_df1[ref_use_df1['FUEL'].isin(gas_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Gas',
TECHNOLOGY = 'Gas power')
nuclear = ref_use_df1[ref_use_df1['FUEL'].isin(nuclear_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Nuclear',
TECHNOLOGY = 'Nuclear power')
hydro = ref_use_df1[ref_use_df1['FUEL'].isin(hydro_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Hydro',
TECHNOLOGY = 'Hydro power')
solar = ref_use_df1[ref_use_df1['FUEL'].isin(solar_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Solar',
TECHNOLOGY = 'Solar power')
wind = ref_use_df1[ref_use_df1['FUEL'].isin(wind_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Wind',
TECHNOLOGY = 'Wind power')
geothermal = ref_use_df1[ref_use_df1['FUEL'].isin(geothermal_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Geothermal',
TECHNOLOGY = 'Geothermal power')
biomass = ref_use_df1[ref_use_df1['FUEL'].isin(biomass_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Biomass',
TECHNOLOGY = 'Biomass power')
other_renew = ref_use_df1[ref_use_df1['FUEL'].isin(other_renew_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Other renewables',
TECHNOLOGY = 'Other renewable power')
other = ref_use_df1[ref_use_df1['FUEL'].isin(other_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Other',
TECHNOLOGY = 'Other power')
imports = ref_use_df1[ref_use_df1['FUEL'].isin(imports_fuel_1)].groupby(['economy']).sum().assign(FUEL = 'Imports',
TECHNOLOGY = 'Electricity imports')
# Second level aggregations
coal2 = ref_use_df1[ref_use_df1['FUEL'].isin(coal_fuel_2)].groupby(['economy']).sum().assign(FUEL = 'Coal',
TECHNOLOGY = 'Coal power')
renew2 = ref_use_df1[ref_use_df1['FUEL'].isin(renewables_fuel_2)].groupby(['economy']).sum().assign(FUEL = 'Renewables',
TECHNOLOGY = 'Renewables power')
# Use by fuel data frame number 1
ref_usefuel_df1 = ref_use_df1.append([coal, lignite, oil, gas, nuclear, hydro, solar, wind, geothermal, biomass, other_renew, other, imports])\
[['FUEL', 'TECHNOLOGY'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_usefuel_df1 = ref_usefuel_df1[ref_usefuel_df1['FUEL'].isin(use_agg_fuels_1)].copy().set_index('FUEL').reset_index()
ref_usefuel_df1 = ref_usefuel_df1.groupby('FUEL').sum().reset_index()
ref_usefuel_df1['Transformation'] = 'Input fuel'
ref_usefuel_df1['FUEL'] = pd.Categorical(ref_usefuel_df1['FUEL'], use_agg_fuels_1)
ref_usefuel_df1 = ref_usefuel_df1.sort_values('FUEL').reset_index(drop = True)
ref_usefuel_df1 = ref_usefuel_df1[['FUEL', 'Transformation'] + OSeMOSYS_years_ref]
nrows1 = ref_usefuel_df1.shape[0]
ncols1 = ref_usefuel_df1.shape[1]
ref_usefuel_df2 = ref_usefuel_df1[['FUEL', 'Transformation'] + col_chart_years]
nrows2 = ref_usefuel_df2.shape[0]
ncols2 = ref_usefuel_df2.shape[1]
# Use by fuel data frame number 1
ref_usefuel_df3 = ref_use_df1.append([coal2, oil, gas, nuclear, renew2, other, imports])\
[['FUEL', 'TECHNOLOGY'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_usefuel_df3 = ref_usefuel_df3[ref_usefuel_df3['FUEL'].isin(use_agg_fuels_2)].copy().set_index('FUEL').reset_index()
ref_usefuel_df3 = ref_usefuel_df3.groupby('FUEL').sum().reset_index()
ref_usefuel_df3['Transformation'] = 'Input fuel'
ref_usefuel_df3 = ref_usefuel_df3[['FUEL', 'Transformation'] + OSeMOSYS_years_ref]
nrows10 = ref_usefuel_df3.shape[0]
ncols10 = ref_usefuel_df3.shape[1]
ref_usefuel_df4 = ref_usefuel_df3[['FUEL', 'Transformation'] + col_chart_years]
nrows11 = ref_usefuel_df4.shape[0]
ncols11 = ref_usefuel_df4.shape[1]
# Now build production dataframe
ref_prodelec_df1 = ref_power_df1[(ref_power_df1['economy'] == economy) &
(ref_power_df1['Sheet_energy'] == 'ProductionByTechnology') &
(ref_power_df1['FUEL'].isin(['17_electricity', '17_electricity_Dx']))].reset_index(drop = True)
# Now build the aggregations of technology (power plants)
coal_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(coal_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Coal')
oil_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(oil_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Oil')
gas_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(gas_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Gas')
storage_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(storage_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Storage')
# chp_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(chp_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Cogeneration')
nuclear_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(nuclear_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Nuclear')
bio_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(bio_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Biomass')
other_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(other_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Other')
hydro_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(hydro_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Hydro')
geo_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(geo_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Geothermal')
misc = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(im_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Imports')
solar_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(solar_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Solar')
wind_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(wind_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Wind')
coal_pp2 = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(thermal_coal_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Coal')
lignite_pp2 = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(lignite_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Lignite')
roof_pp2 = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(solar_roof_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Solar roof')
nonroof_pp = ref_prodelec_df1[ref_prodelec_df1['TECHNOLOGY'].isin(solar_nr_tech)].groupby(['economy']).sum().assign(TECHNOLOGY = 'Solar')
# Generation of electricity by tech dataframe (with the above aggregations added)
ref_prodelec_bytech_df1 = ref_prodelec_df1.append([coal_pp2, lignite_pp2, oil_pp, gas_pp, storage_pp, nuclear_pp,\
bio_pp, geo_pp, other_pp, hydro_pp, misc, solar_pp, wind_pp])\
[['TECHNOLOGY'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_prodelec_bytech_df1['Generation'] = 'Electricity'
ref_prodelec_bytech_df1 = ref_prodelec_bytech_df1[['TECHNOLOGY', 'Generation'] + OSeMOSYS_years_ref]
ref_prodelec_bytech_df1 = ref_prodelec_bytech_df1[ref_prodelec_bytech_df1['TECHNOLOGY'].isin(prod_agg_tech2)].\
set_index('TECHNOLOGY')
ref_prodelec_bytech_df1 = ref_prodelec_bytech_df1.loc[ref_prodelec_bytech_df1.index.intersection(prod_agg_tech2)].reset_index()\
.rename(columns = {'index': 'TECHNOLOGY'})
#################################################################################
historical_gen = EGEDA_hist_gen[EGEDA_hist_gen['economy'] == economy].copy().\
iloc[:,:-2][['TECHNOLOGY', 'Generation'] + list(range(2000, 2017))]
ref_prodelec_bytech_df1 = historical_gen.merge(ref_prodelec_bytech_df1, how = 'right', on = ['TECHNOLOGY', 'Generation']).replace(np.nan, 0)
ref_prodelec_bytech_df1['TECHNOLOGY'] = pd.Categorical(ref_prodelec_bytech_df1['TECHNOLOGY'], prod_agg_tech2)
ref_prodelec_bytech_df1 = ref_prodelec_bytech_df1.sort_values('TECHNOLOGY').reset_index(drop = True)
# CHange to TWh from Petajoules
s = ref_prodelec_bytech_df1.select_dtypes(include=[np.number]) / 3.6
ref_prodelec_bytech_df1[s.columns] = s
nrows3 = ref_prodelec_bytech_df1.shape[0]
ncols3 = ref_prodelec_bytech_df1.shape[1]
ref_prodelec_bytech_df2 = ref_prodelec_bytech_df1[['TECHNOLOGY', 'Generation'] + gen_col_chart_years]
nrows4 = ref_prodelec_bytech_df2.shape[0]
ncols4 = ref_prodelec_bytech_df2.shape[1]
##################################################################################################################################################################
# Now create some refinery dataframes
ref_refinery_df1 = ref_refownsup_df1[(ref_refownsup_df1['economy'] == economy) &
(ref_refownsup_df1['Sector'] == 'REF') &
(ref_refownsup_df1['FUEL'].isin(refinery_input))].copy()
ref_refinery_df1['Transformation'] = 'Input to refinery'
ref_refinery_df1 = ref_refinery_df1[['FUEL', 'Transformation'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_refinery_df1.loc[ref_refinery_df1['FUEL'] == '6_1_crude_oil', 'FUEL'] = 'Crude oil'
ref_refinery_df1.loc[ref_refinery_df1['FUEL'] == '6_x_ngls', 'FUEL'] = 'NGLs'
nrows5 = ref_refinery_df1.shape[0]
ncols5 = ref_refinery_df1.shape[1]
ref_refinery_df2 = ref_refownsup_df1[(ref_refownsup_df1['economy'] == economy) &
(ref_refownsup_df1['Sector'] == 'REF') &
(ref_refownsup_df1['FUEL'].isin(refinery_new_output))].copy()
ref_refinery_df2['Transformation'] = 'Output from refinery'
ref_refinery_df2 = ref_refinery_df2[['FUEL', 'Transformation'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_1_from_ref', 'FUEL'] = 'Motor gasoline'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_2_from_ref', 'FUEL'] = 'Aviation gasoline'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_3_from_ref', 'FUEL'] = 'Naphtha'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_jet_from_ref', 'FUEL'] = 'Jet fuel'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_6_from_ref', 'FUEL'] = 'Other kerosene'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_7_from_ref', 'FUEL'] = 'Gas diesel oil'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_8_from_ref', 'FUEL'] = 'Fuel oil'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_9_from_ref', 'FUEL'] = 'LPG'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_10_from_ref', 'FUEL'] = 'Refinery gas'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_11_from_ref', 'FUEL'] = 'Ethane'
ref_refinery_df2.loc[ref_refinery_df2['FUEL'] == '7_other_from_ref', 'FUEL'] = 'Other'
ref_refinery_df2['FUEL'] = pd.Categorical(
ref_refinery_df2['FUEL'],
categories = ['Motor gasoline', 'Aviation gasoline', 'Naphtha', 'Jet fuel', 'Other kerosene', 'Gas diesel oil', 'Fuel oil', 'LPG', 'Refinery gas', 'Ethane', 'Other'],
ordered = True)
ref_refinery_df2 = ref_refinery_df2.sort_values('FUEL')
nrows6 = ref_refinery_df2.shape[0]
ncols6 = ref_refinery_df2.shape[1]
ref_refinery_df3 = ref_refinery_df2[['FUEL', 'Transformation'] + col_chart_years]
nrows7 = ref_refinery_df3.shape[0]
ncols7 = ref_refinery_df3.shape[1]
#####################################################################################################################################################################
# Create some power capacity dataframes
ref_powcap_df1 = ref_pow_capacity_df1[ref_pow_capacity_df1['REGION'] == economy]
coal_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(coal_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Coal')
oil_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(oil_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Oil')
wind_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(wind_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Wind')
storage_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(storage_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Storage')
gas_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(gas_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Gas')
hydro_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(hydro_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Hydro')
solar_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(solar_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Solar')
nuclear_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(nuclear_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Nuclear')
bio_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(bio_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Biomass')
geo_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(geo_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Geothermal')
#chp_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(chp_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Cogeneration')
other_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(other_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Other')
transmission = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(transmission_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Transmission')
lignite_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(lignite_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Lignite')
thermal_capacity = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(thermal_coal_cap)].groupby(['REGION']).sum().assign(TECHNOLOGY = 'Coal')
# Capacity by tech dataframe (with the above aggregations added)
ref_powcap_df1 = ref_powcap_df1.append([coal_capacity, gas_capacity, oil_capacity, nuclear_capacity,
hydro_capacity, bio_capacity, wind_capacity, solar_capacity,
storage_capacity, geo_capacity, other_capacity])\
[['TECHNOLOGY'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_powcap_df1 = ref_powcap_df1[ref_powcap_df1['TECHNOLOGY'].isin(pow_capacity_agg)].reset_index(drop = True)
ref_powcap_df1['TECHNOLOGY'] = | pd.Categorical(ref_powcap_df1['TECHNOLOGY'], prod_agg_tech[:-1]) | pandas.Categorical |
# Copyright 2021 The ProLoaF Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ==============================================================================
"""
Preprocesses your input data for use with ProLoaF
Transforms the data to a common format (pandas.DataFrame as csv) for all stations.
Notes
-----
- This script can load xlsx or csv files.
- If your data does not match the criteria, you can use a custom script that saves your
data as a pandas.DataFrame with datetimeindex to a csv file with a “;” as separator to
accomplish the same thing.
"""
import pandas as pd
import numpy as np
import sys
import json
import os
MAIN_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(MAIN_PATH)
from utils.config_util import read_config, parse_basic
#Import customized functions below this point
import utils.datatuner as dt
def load_raw_data_xlsx(files):
"""
Load data from an xlsx file
After loading, the date column in the raw data is converted to a UTC datetime
Parameters
----------
files : list
A list of files to read. See the Notes section for more information
Returns
-------
list
A list containing a DataFrame for each file that was read
Notes
-----
- Files is an array of maps containing the following data with the keyword (keyword)
+ ('file_name') the name of the xlsx file
+ ('date_column') the name of the date_column in the raw_data
+ ('time_zone') specifier for the timezone the raw data is recorded in
+ ('sheet_name') name or list of names of the sheets that are to be read
+ ('combine') boolean, all datasheets with true are combined into one, all others are read individually
+ ('start_column') Columns between this and ('end_column') are loaded
+ ('end_column')
"""
print('Importing XLSX Data...')
combined_files = []
individual_files = []
for xlsx_file in files:
print('importing ' + xlsx_file['file_name'])
# if isinstance(file_name, str):
# file_name = [file_name,'UTC']
date_column = xlsx_file['date_column']
raw_data = pd.read_excel(INPATH + xlsx_file['file_name'], xlsx_file['sheet_name'],
parse_dates=[date_column])
# convert load data to UTC
if(xlsx_file['time_zone'] != 'UTC'):
raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(xlsx_file['time_zone'], ambiguous="infer").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')
else:
if (xlsx_file['dayfirst']):
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)
else:
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)
if(xlsx_file['data_abs']):
raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']] = raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']].abs()
# rename column IDs, specifically Time, this will be used later as the df index
raw_data.rename(columns={date_column: 'Time'}, inplace=True)
raw_data.head() # now the data is positive and set to UTC
raw_data.info()
# interpolating for missing entries created by asfreq and original missing values if any
raw_data.interpolate(method='time', inplace=True)
if(xlsx_file['combine']):
combined_files.append(raw_data)
else:
individual_files.append(raw_data)
if(len(combined_files) > 0):
individual_files.append(pd.concat(combined_files))
return individual_files
def load_raw_data_csv(files):
"""
Load data from a csv file
After loading, the date column in the raw data is converted to a UTC datetime
Parameters
----------
files : list
A list of files to read. See the Notes section for more information
Returns
-------
list
A list containing a DataFrame for each file that was read
Notes
-----
- Files is an array of maps containing the following data with the keyword (keyword)
+ ('file_name') the name of the load_file
+ ('date_column') the name of the date_column in the raw_data
+ ('dayfirst') specifier for the formatting of the read time
+ ('sep') separator used in this file
+ ('combine') boolean, all datasheets with true are combined into one, all others are read individually
+ ('use_columns') list of columns that are loaded
"""
print('Importing CSV Data...')
combined_files = []
individual_files = []
for csv_file in files:
print('Importing ' + csv_file['file_name'] + ' ...')
date_column = csv_file['date_column']
raw_data = pd.read_csv(INPATH + csv_file['file_name'], sep=csv_file['sep'], usecols=csv_file['use_columns'], parse_dates=[date_column] , dayfirst=csv_file['dayfirst'])
# pd.read_csv(INPATH + name, sep=sep, usecols=cols, parse_dates=[date_column] , dayfirst=dayfirst)
if (csv_file['time_zone'] != 'UTC'):
raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(csv_file['time_zone'], ambiguous="infer").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')
else:
if (csv_file['dayfirst']):
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)
else:
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)
print('...Importing finished. ')
raw_data.rename(columns={date_column: 'Time'}, inplace=True)
if(csv_file['combine']):
combined_files.append(raw_data)
else:
individual_files.append(raw_data)
if(len(combined_files) > 0):
individual_files.append(pd.concat(combined_files, sort = False))
#for frame in individual_files:
# frame.rename(columns={date_column: 'Time'}, inplace=True)
return individual_files
def set_to_hours(df):
"""
Sets the index of the DataFrame to 'Time' and the frequency to hours.
Parameters
----------
df : pandas.DataFrame
The DataFrame whose index and frequency are to be changed
Returns
-------
df
The modified DataFrame
"""
df['Time'] = pd.to_datetime(df['Time'])
df = df.set_index('Time')
df = df.asfreq(freq='H')
return df
if __name__ == '__main__':
ARGS = parse_basic()
config_file = os.path.join(MAIN_PATH, 'targets', ARGS.station, 'preprocessing.json')
PAR = read_config(config_path=config_file)
# DEFINES
if PAR['local'] == True:
INPATH = os.path.join(MAIN_PATH, PAR['raw_path'])
else:
INPATH = PAR['raw_path']
if ('xlsx_files' in PAR):
XLSX_FILES = PAR['xlsx_files']
if ('csv_files' in PAR):
CSV_FILES = PAR['csv_files']
OUTFILE = os.path.join(MAIN_PATH, PAR['data_path'])
# Prepare Load Data
df_list = []
if ('xlsx_files' in PAR):
xlsx_data = load_raw_data_xlsx(XLSX_FILES)
for data in xlsx_data:
hourly_data = set_to_hours(df=data)
dt.fill_if_missing(hourly_data)
df_list.append(hourly_data)
if ('csv_files' in PAR):
csv_data = load_raw_data_csv(CSV_FILES)
for data in csv_data:
hourly_data = set_to_hours(df=data)
dt.fill_if_missing(hourly_data)
print(hourly_data)
df_list.append(hourly_data)
print(df_list)
# When concatenating, the arrays are filled with NaNs if the index is not available.
# Since the DataFrames were already interpolated there are non "natural" NaNs left so
# dropping all rows with NaNs finds the maximum overlap in indices
# # Merge load and weather data to one df
df = pd.concat(df_list, axis = 1)
df.dropna(inplace = True)
if not df.index.equals(pd.date_range(min(df.index),max(df.index),freq = df.index.freq)):
raise ValueError("DateTime index is not continuous")
if not df.isnull().values.any():
print('No missing data \n')
df.head()
## http://blog.davidkaleko.com/feature-engineering-cyclical-features.html
df['hour_sin'] = np.sin(df.index.hour * (2. * np.pi / 24))
df['hour_cos'] = np.cos(df.index.hour * (2. * np.pi / 24))
df['mnth_sin'] = np.sin((df.index.month - 1) * (2. * np.pi / 12))
df['mnth_cos'] = np.cos((df.index.month - 1) * (2. * np.pi / 12))
# fetch back the datetime again
# add one-hot encoding for Hour & Month
hours = pd.get_dummies(df.index.hour, prefix='hour').set_index(df.index) # one-hot encoding of hours
month = | pd.get_dummies(df.index.month, prefix='month') | pandas.get_dummies |
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 14:13:52 2021
@author: <NAME>
"""
import solarenergy as se
import geocoder
from datetime import datetime
from dateutil.relativedelta import relativedelta
import numpy as np
import timezonefinder
import pandas as pd
import streamlit as st
def getDaysList(start_date, td):
dates = []
dt = datetime.strptime(start_date, "%Y-%m-%d")
for n in range(0, td):
dates.append(
(dt + relativedelta(days=n)).strftime('%Y-%m-%d'))
return dates
def user_input():
data = {'Location': loc
,'No. of days': dt}
inputs = | pd.DataFrame(data, index=[0]) | pandas.DataFrame |
import os, glob
import pandas as pd
from datetime import datetime as dt
from pathlib import Path
from emotion_recognition import EmotionRecognizer
from pylab import *
import numpy as np
import seaborn as sn
from progressbar import *
import pickle
import ntpath
from pathlib import Path
import shutil
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
class CordioESP_ToolBox:
"""
Expressive Speech Processing
This class manage emotion detection from speech.
* Emotion class support:
'neutral', 'calm', 'happy', 'sad', 'angry', 'fear', 'disgust', 'ps', 'boredom'
* CordioESP_ToolBox support all scikit-learn models, for example:
'SVC', 'AdaBoostClassifier', 'RandomForestClassifier', 'GradientBoostingClassifier', 'DecisionTreeClassifier'
* example for usage can be find in:
Main_test.py or in Main_create_emotion_label_table_and_emotion_labaled_scalarDist_table.py
Cordio Medical - Confidential
Version: 0.1 2020-04-27
Revision History:
| Ver | Author | Date | Change Description
|----------|-----------|----------------|--------------------
| 0.1 | Or | 2020-04-27 | Initial
| x.x | xxxx | xxxx-xx-xx | x
"""
def __init__(self):
self.version = 0.1
self.suported_emotions = ['neutral', 'calm', 'happy', 'sad', 'angry', 'fear', 'disgust', 'ps', 'boredom']
self.supported_models = ['SVC', 'AdaBoostClassifier', 'RandomForestClassifier', 'GradientBoostingClassifier',
'DecisionTreeClassifier', 'KNeighborsClassifier', 'MLPClassifier']
self.model_emotion_dict = {'SVC': ['angry', 'sad', 'neutral'],
'AdaBoostClassifier': ['sad', 'fear', 'boredom', 'neutral'],
'RandomForestClassifier': ['sad', 'fear', 'boredom', 'neutral'],
'KNeighborsClassifier': ['sad', 'fear', 'boredom', 'neutral']}
self.model_list = [SVC(probability=True), AdaBoostClassifier(), RandomForestClassifier(), KNeighborsClassifier()]
def save_object(self, obj, save_url_path, filename):
# create folders in path if not exist:
Path(save_url_path).mkdir(parents=True, exist_ok=True)
with open(save_url_path+'\\'+filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def modelTrain(self, model, emotions_list):
# my_model probability attribute needs to be Truth!
save_url_path = 'trained_models'
filename = 'trained_' + type(model).__name__ + '_ESPVer' + str(self.version) + '.pkl'
# check if model exist:
if os.path.exists(save_url_path+'\\'+filename):
with open(save_url_path+'\\'+filename, 'rb') as input:
rec = pickle.load(input)
else:
# train the model
# pass my model to EmotionRecognizer instance
# and balance the dataset
rec = EmotionRecognizer(model=model, emotions=emotions_list, balance=True, verbose=0, probability=True)
rec.train()
self.save_object(rec, save_url_path, filename)
return rec
def modelPredict(self, rec, wav_url):
try:
out = rec.predict_proba(wav_url)
# wav_url == '\\\\192.168.55.210\\f$\\db\\BSV\\BSV-0009\\BSV-0009_200403_105407_S0007_he_1.54_SMJ400F_Android26.wav'
except ValueError:
wavPath = Path(wav_url)
print('\nempty file skipped: '+wavPath.name)
out = 'empty file'
except RuntimeError:
wavPath = Path(wav_url)
print("\nFixing header: "+wavPath.name)
LOCAL_PATH = os.getcwd()
shutil.copyfile(wav_url, LOCAL_PATH+'\\'+wavPath.name)
os.system("ffmpeg -nostats -loglevel 0 -i " + LOCAL_PATH+'\\'+wavPath.name + " -f s16le -acodec pcm_s16le -y temp.pcm")
os.system("ffmpeg -nostats -loglevel 0 -f s16le -ar 48.0k -ac 1 -i temp.pcm " + LOCAL_PATH+'\\'+wavPath.name + " -y")
print("\nDone fixing the header")
out = rec.predict_proba( LOCAL_PATH+'\\'+wavPath.name )
# remove temp files:
os.remove(LOCAL_PATH+'\\'+wavPath.name)
os.remove(LOCAL_PATH + '\\temp.pcm')
return out
def predict_all_proba_for_patientNmodel(self, model, fileHandle, clinicalInformation, patient_info_column_names,
emotions_list, all_wavs):
df_len = len(all_wavs)
patientNmodel_df = pd.DataFrame(index=np.arange(df_len),
columns=patient_info_column_names + ['Model'] + emotions_list)
model_name = model if type(model)==str else type(model).__name__
rec = self.modelTrain(model, self.model_emotion_dict[model_name])
# progress bar initialization:
p = Path(str(all_wavs[0]))
# fileHandle = CordioFile
patient_ID = fileHandle.CordioExtractPatient(p)
patient_model = type(model).__name__
widgets = [FormatLabel('<patient: ' + patient_ID + '; model: ' + patient_model + '>'), ' ', Percentage(), ' ',
Bar('#'), ' ', RotatingMarker()]
progressbar = ProgressBar(widgets=widgets, maxval=df_len)
progressbar.start()
# fill df:
for (i, wav) in zip(range(df_len), all_wavs):
# progress bar update:
widgets[0] = FormatLabel('<filename-{0}>'.format(i))
progressbar.update(i)
# add soft decision score for each emotion
patientNmodel_df.loc[i] = self.modelPredict(rec, wav)
# insert basic information:
p = Path(str(wav))
# fileHandle = CordioFile
patientNmodel_df.at[i, "PatientName"] = fileHandle.CordioExtractPatient(p)
patientNmodel_df.at[i, "Date"] = fileHandle.CordioExtractRecordingDateTime(p).strftime("%d/%m/%Y")
patientNmodel_df.at[i, "Time"] = fileHandle.CordioExtractRecordingDateTime(p).strftime("%H:%M:%S")
patientNmodel_df.at[i, "sentence"] = fileHandle.CordioExtractSentence(p)
patientNmodel_df.at[i, "Language"] = fileHandle.CordioExtractLanguage(p)
# TODO: add App version, Device identifier and OS version columns
# setting clinical status:
clinicalStatus = self.get_clinical_info(clinicalInformation, fileHandle.CordioExtractRecordingDateTime(p),
patientNmodel_df.at[i, "PatientName"])
patientNmodel_df.at[i, "ClinicalStatus"] = clinicalStatus
# setting model:
patientNmodel_df.at[i, "Model"] = type(model).__name__
progressbar.finish()
return patientNmodel_df
# def predict_all_proba_for_patient(self, patientDir_path, clinicalInformation, fileHandle, model_list,
# emotions_list):
# # get all wavs:
# all_wavs = glob.glob(os.path.join(patientDir_path, '*.wav'))
# num_of_wav = len(all_wavs) * len(model_list)
#
# # create basic information table for patient:
# patient_info_column_names = ["PatientName", "Date", "Time", "sentence", "Language", "ClinicalStatus"]
# patient_df = pd.DataFrame(columns=patient_info_column_names + ['Model'] + emotions_list)
#
# # progress bar initialization:
# p = Path(str(all_wavs[0]))
# # fileHandle = CordioFile
# patient_ID = fileHandle.CordioExtractPatient(p)
# widgets = [FormatLabel('<<patient: ' + patient_ID + '; all models process>>'), ' ', Percentage(), ' ',
# Bar('#'), ' ', RotatingMarker()]
# progressbar = ProgressBar(widgets=widgets, maxval=len(model_list))
# progressbar.start()
#
# # calculating for all models:
# for i, model in zip(range(len(model_list)), model_list):
# # progress bar update:
# widgets[0] = FormatLabel('<filename-{0}>'.format(i))
# progressbar.update(i)
#
# # --for debug:
# sentence = 'S0007'
# tmp = self.create_ESP_labeled_table(patientDir_path, model, sentence, emotions_list, clinicalInformation,
# fileHandle)
# # --
# tmp = self.predict_all_proba_for_patientNmodel(model, fileHandle, clinicalInformation,
# patient_info_column_names, all_wavs)
#
# patient_df = patient_df.append(tmp)
# progressbar.finish()
#
# return patient_df
def get_clinical_info(self, clinicalInformation, recording_datetime, patient_id):
clinicalInfo = clinicalInformation(patient_id, '')
clinicalStatusCode = clinicalInfo(recording_datetime)
clinicalStatus = "dry"
if clinicalStatusCode == -1:
# recording is not valid (before patient registration)
clinicalStatus = 'recording is not valid (before patient registration)'
elif clinicalStatusCode == clinicalInfo.CLINICAL_STATUS_UNKNOWN:
clinicalStatus = "unknown"
elif clinicalStatusCode == clinicalInfo.CLINICAL_STATUS_WET:
clinicalStatus = "wet"
return clinicalStatus
def SaveFig(self, fig, save_url_path, save_file_name, add_datetime, close_fig):
# from pathlib import Path
# create folders in path if not exist:
Path(save_url_path).mkdir(parents=True, exist_ok=True)
# remove old file with the same name if exist:
if os.path.isfile(save_url_path + "\\" + save_file_name + ".png"):
os.remove(save_url_path + "\\" + save_file_name + ".png")
plt.ioff()
# save file:
# datetime object containing current date and time
now = dt.now()
if (add_datetime == []) or (add_datetime == True):
dt_string = now.strftime("%d%m%y_%H%M%S")
fig.savefig(save_url_path + "\\" + save_file_name + dt_string + ".png", bbox_inches='tight')
else:
fig.savefig(save_url_path + "\\" + save_file_name + ".png", bbox_inches='tight')
if close_fig:
plt.close(fig)
def SaveTable(self, table, save_url_path, save_file_name, add_datetime, is_index_col=True):
# from pathlib import Path
# create folders in path if not exist:
Path(save_url_path).mkdir(parents=True, exist_ok=True)
# remove old file with the same name if exist:
if os.path.isfile(save_url_path + "\\" + save_file_name + ".png"):
os.remove(save_url_path + "\\" + save_file_name + ".png")
plt.ioff()
# save file:
# datetime object containing current date and time
now = dt.now()
if (add_datetime == []) or (add_datetime == True):
dt_string = now.strftime("%d%m%y_%H%M%S")
table.to_csv(save_url_path + "\\" + save_file_name + "_" + dt_string + '.csv', index=is_index_col)
else:
table.to_csv(save_url_path + "\\" + save_file_name + '.csv', index=is_index_col)
def get_table_by_session(self, prob_table, session_hour_range, session_action, emotions_list):
# TODO: add description
# prob_table check: check necessary columns existence
prob_table_col_names = list(prob_table.columns)
if 'Unnamed: 0' in prob_table_col_names:
prob_table.drop('Unnamed: 0', axis=1)
prob_table['Date'] = pd.to_datetime(prob_table['Date'], format="%d/%m/%Y")
prob_table['Time'] = pd.to_datetime(prob_table['Time'], format="%H:%M:%S")
# initial graphs df:
emotions_in_prob_table_idx = [idx for idx, val in enumerate(self.suported_emotions) if val in prob_table_col_names]
emotions_in_prob_table = [self.suported_emotions[i] for i in emotions_in_prob_table_idx]
graphs_df_col_names = ['Patient_id', 'SessionIdx', 'Date', 'FirstSessionRecTime',
'LastSessionRecTime', 'Model', 'IsWet'] + emotions_in_prob_table
graphs_df = pd.DataFrame(columns=graphs_df_col_names)
# fill graphs_df:
unique_dates = prob_table.Date.dt.strftime("%d/%m/%Y").unique()
unique_dates = [x for x in unique_dates if str(x) != 'nan'] # remove nans
prob_table = prob_table.sort_values(['Date', 'Time'], ascending=[True, True])
session_idx = 0
for date in unique_dates:
# get current date sub-df
dt_date = dt.strptime(date, "%d/%m/%Y")
# mean probabilities for each model type:
unique_model_types = prob_table.Model.unique()
# remove unsapported models:
unique_model_types = [val for idx, val in enumerate(self.supported_models) if val in unique_model_types]
for model in unique_model_types:
prob_table_dateNmodel_sub_df = prob_table[(prob_table['Model'] == model) & (prob_table['Date'] == dt_date)]
curr_time_idx = prob_table_dateNmodel_sub_df.index.values[0] # first index of prob_table_dateNmodel_sub_df
curr_time = pd.to_datetime(prob_table_dateNmodel_sub_df['Time'].loc[curr_time_idx], format="%H:%M:%S")
last_dateNmodel_idx = prob_table_dateNmodel_sub_df.index[-1]
while curr_time_idx <= last_dateNmodel_idx:
session_mask = (prob_table_dateNmodel_sub_df['Time'] >= curr_time) & (prob_table_dateNmodel_sub_df['Time'] < curr_time + datetime.timedelta(hours=session_hour_range))
prob_table_dateNmodel_seassion_sub_df = prob_table_dateNmodel_sub_df[session_mask]
mean_prob_row = getattr(prob_table_dateNmodel_seassion_sub_df[emotions_list], session_action)(axis=0, numeric_only=True, skipna=True)
# if session_action = 'std' and prob_table_dateNmodel_seassion_sub_df is one line mean_prob_row will be nan
basic_info_dict = {'Patient_id': [prob_table_dateNmodel_seassion_sub_df.iloc[0]['PatientName']],
'SessionIdx': [session_idx],
'Date': [prob_table_dateNmodel_seassion_sub_df.iloc[0]['Date']],
'FirstSessionRecTime': [prob_table_dateNmodel_seassion_sub_df.iloc[0]['Time'].strftime("%H:%M:%S")],
'LastSessionRecTime': [prob_table_dateNmodel_seassion_sub_df.iloc[-1]['Time'].strftime("%H:%M:%S")],
'Model': model,
'IsWet': (prob_table_dateNmodel_seassion_sub_df['ClinicalStatus'] == 'wet').any()}
basic_info_dict.update(mean_prob_row.to_dict())
full_info_dict = basic_info_dict
# remove bad entries:
full_info_dict = {k: full_info_dict[k] for k in graphs_df_col_names}
# insert new row
graphs_df = graphs_df.append(pd.DataFrame(full_info_dict))
session_idx = session_idx + 1
# iterate to next time value:
last_true_value_idx = session_mask[::-1].idxmax()
curr_time_idx = last_true_value_idx + 1
if curr_time_idx <= last_dateNmodel_idx:
try:
curr_time = pd.to_datetime(prob_table_dateNmodel_sub_df['Time'].loc[curr_time_idx], format="%H:%M:%S")
except:
print(str(date)+' '+model+' '+str(curr_time_idx))
return graphs_df
def create_ESP_patient_model_sentence_labeled_table(self, all_wavs, model, sentence, clinicalInformation, fileHandle):
"""
creates a table with the following columns and data:
Patient name | Date | Time | clinical status | emotional hard decision label | -> current emotional
classes probabilities
Input:
patient_dir_url - a string represent the path to the folder containing a patient audio files
model - scikit-learn library model object
sentence - sentence id string ("S0007")
emotions_list - emotions list classes
clinicalInformation - clinicalInformation object
fileHandle - fileHandle object
Output:
patientNmodelNsentence_df - data frame containing audio files names, their details and their emotion
labeling details by model, sentence and patient
"""
# set emotion list:
model_name = model if type(model)==str else type(model).__name__
emotions_list = self.model_emotion_dict[model_name]
# filter wav by sentence:
all_wavs = [x for x in all_wavs if sentence in x]
# collecting constance data:
df_len = len(all_wavs)
patient_info_column_names = ["Filename", "PatientId", "Date", "Time", "Sentence", "Model", "Language", "ClinicalStatus", "EmotionHardDecision"]
patientNmodelNsentence_df = pd.DataFrame(index=np.arange(df_len),
columns=patient_info_column_names + emotions_list)
rec = self.modelTrain(model, emotions_list)
# progress bar initialization:
p = Path(str(all_wavs[0]))
# fileHandle = CordioFile
patient_ID = fileHandle.CordioExtractPatient(p)
patient_model = type(model).__name__
widgets = [FormatLabel('<patient: ' + patient_ID + '; model: ' + patient_model + '>'), ' ', Percentage(), ' ',
Bar('#'), ' ', RotatingMarker()]
progressbar = ProgressBar(widgets=widgets, maxval=df_len)
progressbar.start()
# fill df:
for (i, wav) in zip(range(df_len), all_wavs):
# progress bar update:
widgets[0] = FormatLabel('<filename-{0}>'.format(i))
progressbar.update(i)
# predict current wav
prediction = self.modelPredict(rec, wav)
# add soft decision probabilities for each emotion
patientNmodelNsentence_df.loc[i] = prediction
# add hard decision probabilities for each emotion
if(type(prediction) == dict ):
patientNmodelNsentence_df.at[i, "EmotionHardDecision"] = max(prediction, key=prediction.get)
else:
patientNmodelNsentence_df.at[i, "EmotionHardDecision"] = prediction
# insert basic information:
p = Path(str(wav))
patientNmodelNsentence_df.at[i, "Filename"] = os.path.basename(p)
patientNmodelNsentence_df.at[i, "PatientId"] = fileHandle.CordioExtractPatient(p)
patientNmodelNsentence_df.at[i, "Date"] = fileHandle.CordioExtractRecordingDateTime(p).strftime("%d/%m/%Y")
patientNmodelNsentence_df.at[i, "Time"] = fileHandle.CordioExtractRecordingDateTime(p).strftime("%H:%M:%S")
patientNmodelNsentence_df.at[i, "Sentence"] = fileHandle.CordioExtractSentence(p)
patientNmodelNsentence_df.at[i, "Language"] = fileHandle.CordioExtractLanguage(p)
# TODO: add App version, Device identifier and OS version columns
# setting clinical status:
clinicalStatus = self.get_clinical_info(clinicalInformation, fileHandle.CordioExtractRecordingDateTime(p),
patientNmodelNsentence_df.at[i, "PatientId"])
patientNmodelNsentence_df.at[i, "ClinicalStatus"] = clinicalStatus
# setting model:
patientNmodelNsentence_df.at[i, "Model"] = type(model).__name__
progressbar.finish()
return patientNmodelNsentence_df
def append_ESP_patient_model_sentence_labeled_table(self, patient_dir_url, table, model, sentence, clinicalInformation, fileHandle):
"""
update a table with the following columns and data:
Patient name | Date | Time | clinical status | emotional hard decision label | -> current emotional
classes probabilities
Input:
patient_dir_url - a string represent the path to the folder containing a patient audio files
table - a table to update
model - scikit-learn library model object
sentence - sentence id string ("S0007")
emotions_list - emotions list classes
clinicalInformation - clinicalInformation object
fileHandle - fileHandle object
Output:
patientNmodelNsentence_df - data frame containing audio files names, their details and their emotion
labeling details by model, sentence and patient
"""
patient_info_column_names = ["FileIdx", "Filename", "PatientId", "Date", "Time", "Sentence", "Model", "Language", "ClinicalStatus", "EmotionHardDecision"]
# fix table if needed:
if 'Unnamed: 0' in list(table.columns)[0:10]:
table = table.drop('Unnamed: 0', axis=1)
# check table:
if list(table.columns)[0:10] != patient_info_column_names:
sys.exit('\ngiven table columns format error')
# filtering new audio files to process:
processed_wavs = table["Filename"]
all_wav_urls = glob.glob(os.path.join(patient_dir_url, '*.wav'))
all_wav_urls = [x for x in all_wav_urls if sentence in x] # filter wav by sentence
wav_url_to_process = [wav_url for wav_url in all_wav_urls if not self.isValInList(ntpath.basename(wav_url), processed_wavs)]
if wav_url_to_process != []:
new_table = self.create_ESP_patient_model_sentence_labeled_table(wav_url_to_process, model, sentence, clinicalInformation, fileHandle)
return table.append(new_table)
else:
return table
def isValInList(self, val, lst):
# check `in` for each element in the list
return any([val in x for x in lst])
def manipulate_scalar_distance_table(self, scalar_dist_table_url, emo_labeled_data_table_url):
'''
the function combimed using the data from emo_labeled_data_table_url to filter the data in scalar_dist_table_url
in a way that it will have only same class labels in same column test and ref audio files
Input:
scalar_dist_table_url - table that summarize all available distances between all patient audio files for
specific sentence
emo_labeled_data_table_url - table contains all patient audio files for specific sentence with their emotion
class prediction
Output:
scalar_dist_df - filtered scalar_dist_table_url with labeling for each audio file
'''
# input integrity check:
scalar_dist_table_url_name_array = os.path.basename(Path(scalar_dist_table_url)).split("_")
emo_labeled_data_table_name_array = os.path.basename(Path(emo_labeled_data_table_url)).split("_")
if scalar_dist_table_url_name_array[2] != emo_labeled_data_table_name_array[3]:
raise Exception("Input Error: tables given are of different patients"); return
if scalar_dist_table_url_name_array[3] != emo_labeled_data_table_name_array[4]:
raise Exception("Input Error: tables given are of different sentences"); return
# load data into df:
scalar_dist_df = pd.read_csv(scalar_dist_table_url)
emo_labeled_df = pd.read_csv(Path(emo_labeled_data_table_url))
# adding emo labeling to scalar_dist_df:
for audioFile, label in zip(emo_labeled_df['Filename'], emo_labeled_df['EmotionHardDecision']):
# adding testFile data:
tmpFile_idx = scalar_dist_df['testFilename'] == audioFile
scalar_dist_df.loc[tmpFile_idx, 'testFilenameEmoLabel'] = label
# adding refFile data:
tmpFile_idx = scalar_dist_df['refFilename'] == audioFile
scalar_dist_df.loc[tmpFile_idx, 'refFilenameEmoLabel'] = label
# filter scalar_dist_df to onle same emotion label in test & ref file name:
scalar_dist_df = scalar_dist_df[scalar_dist_df['testFilenameEmoLabel'] == scalar_dist_df['refFilenameEmoLabel']]
return scalar_dist_df
# plot and save methods:
def append_manipulate_scalar_distance_table(self, patient_dir_url, table, model, sentence, clinicalInformation, fileHandle):
patient_column_names = ['testFileIdx', 'testFilename', 'testDate', 'testTime',
'testClinicalStatus', 'testPhn', 'testPhn2', 'testPhn3', 'testTriphone',
'testExpectedTriphone', 'testPhnQuality', 'refFileIdx', 'refFilename',
'refDate', 'refTime', 'refClinicalStatus', 'refPhn', 'refPhn2',
'refPhn3', 'refTriphone', 'refExpectedTriphone', 'refPhnQuality',
'sentence', 'relaxationLen', 'phnDtwFrameNum', 'distItar',
'distItar_longFrame', 'distItpf_300_1500',
'distItpf_longFrame_300_1500', 'distIsar', 'distIsar_longFrame',
'distIspf_300_1500', 'distIspf_longFrame_300_1500', 'distChar',
'distChar_longFrame', 'distChpf_300_1500',
'distChpf_longFrame_300_1500', 'distLs', 'distLs_longFrame', 'distLpcc',
'distLpcc_longFrame', 'distMfcc', 'distMfcc_longFrame', 'distPitch',
'distPitch_RefTestError', 'distPitch_RefTestClipError',
'distPitch_TestRefError', 'distPitch_TestRefClipError', 'distPitchFix',
'distPitchFix_RefTestError', 'distPitchFix_RefTestClipError',
'distPitchFix_TestRefError', 'distPitchFix_TestRefClipError', 'distFF1',
'distFF1_RefTestError', 'distFF1_RefTestClipError',
'distFF1_TestRefError', 'distFF1_TestRefClipError', 'distFF2',
'distFF2_RefTestError', 'distFF2_RefTestClipError',
'distFF2_TestRefError', 'distFF2_TestRefClipError', 'distPE',
'testFilenameEmoLabel', 'refFilenameEmoLabel']
# fix table if needed:
if 'Unnamed: 0' in list(table.columns)[0:10]:
table = table.drop('Unnamed: 0', axis=1)
# check table:
if list(table.columns) != patient_column_names:
sys.exit('\ngiven table columns format error')
# filtering new audio files to process:
processed_wavs = table["Filename"]
all_wav_urls = glob.glob(os.path.join(patient_dir_url, '*.wav'))
all_wav_urls = [x for x in all_wav_urls if sentence in x] # filter wav by sentence
wav_url_to_process = [wav_url for wav_url in all_wav_urls if
not self.isValInList(ntpath.basename(wav_url), processed_wavs)]
if wav_url_to_process != []:
new_table = self.create_ESP_patient_model_sentence_labeled_table(wav_url_to_process, model, sentence,
clinicalInformation, fileHandle)
return table.append(new_table)
else:
return table
# ---------------------
def patient_plotNsave_emotion_over_time(self, patient_prob_tables_urls, model_list, emotion_list, session_hour_range, setup_name):
# TODO: add documentation
for patient_prob_table_url in patient_prob_tables_urls:
# loading data if available:
try:
prob_table = pd.read_csv(patient_prob_table_url)
except:
print("File not avalble in: "+patient_prob_table_url)
# fix numbers loaded as str:
for emotion in emotion_list:
if(type(prob_table[emotion][0]) == str):
prob_table[emotion] = prob_table[emotion].apply(pd.to_numeric, errors='coerce')
# get graph_df:
if (session_hour_range == []) or (type(session_hour_range) != int):
session_hour_range = 1
print("set session_hour_range to default value of 1 hour")
graph_df = self.get_table_by_session(prob_table, session_hour_range, session_action='mean')
patient_id = graph_df["Patient_id"].iloc[0]
# ensure data in the right format:
if (model_list == []) or (type(model_list[0]) != str):
model_list = graph_df.Model.unique()
# sys.warning("using all available models")
# remove unsupported models:
model_list = [val for idx, val in enumerate(self.supported_models) if val in model_list]
emotion_list = [val for idx, val in enumerate(self.suported_emotions) if val in emotion_list]
for model in model_list:
model_graphs_df = graph_df[graph_df['Model'] == model]
for emotion in emotion_list:
# plot:
fig, ax = plt.subplots(num=None, figsize=(20, 10), dpi=200, facecolor='w', edgecolor='k')
x = model_graphs_df['Date']
y = model_graphs_df[emotion]
ax.plot(x, y, linestyle='--', marker='o', color='black')
ax.fill_between(x, 0, 1, where=model_graphs_df['IsWet'],
color='aqua', alpha=0.4, transform=ax.get_xaxis_transform())
ax.legend(["mean sessions probabilty for emotion", "wet sessions"])
fig.suptitle('Mean Sessions Probability\n'+'trained with '+str(len(emotion_list))+' classes\n'+'Patient: '+patient_id+', Model: '+model+', Emotion: '+emotion, fontsize=16)
plt.xlabel('Date\n(may be multiple sessions in one dates - different hours)')
ax.xaxis.set_major_locator(plt.MaxNLocator(30)) # reducing number of plot ticks
plt.setp(ax.xaxis.get_majorticklabels(), rotation=30) # rotate plot tics
plt.grid()
plt.ylabel(emotion+' mean session probability')
# save fig:
save_url_path = "results_tablesOfProb\\"+setup_name+"\\"+patient_id+"\\"+model
Path(save_url_path).mkdir(parents=True, exist_ok=True)
save_file_name = 'MeanSessionsProbability'+'_trainedWith'+str(len(emotion_list))+'Classes'+'_'+patient_id+'_'+model+'_'+emotion
# manager = plt.get_current_fig_manager()
# manager.window.showMaximized()
if os.path.isfile(save_url_path+"\\"+save_file_name+".png"):
os.remove(save_url_path+"\\"+save_file_name+".png")
plt.ioff()
fig.savefig(save_url_path+"\\"+save_file_name+".png", bbox_inches='tight')
plt.close(fig)
def patient_plotNsave_sum_histogram(self, patient_prob_tables_urls, model_list, emotion_list, session_hour_range,setup_name):
#TODO: add a description
for patient_prob_table_url in patient_prob_tables_urls:
# loading data if available:
try:
prob_table = pd.read_csv(patient_prob_table_url)
except:
print("File not avalble in: " + patient_prob_table_url)
# fix numbers loaded as str:
for emotion in emotion_list:
if (type(prob_table[emotion][0]) == str):
prob_table[emotion] = prob_table[emotion].apply(pd.to_numeric, errors='coerce')
# get graph_df:
if (session_hour_range == []) or (type(session_hour_range) != int):
session_hour_range = 1
print("set session_hour_range to default value of 1 hour")
graph_df = self.get_table_by_session(prob_table, session_hour_range, session_action='mean', emotions_list=emotion_list)
patient_id = graph_df["Patient_id"].iloc[0]
# ensure data in the right format:
if (model_list == []) or (type(model_list[0]) != str):
model_list = graph_df.Model.unique()
# print("using all available models")
# remove unsupported models:
model_list = [val for idx, val in enumerate(self.supported_models) if val in model_list]
emotion_list = [val for idx, val in enumerate(self.suported_emotions) if val in emotion_list]
for model in model_list:
model_graphs_df = graph_df[graph_df['Model'] == model]
model_graphs_df_summed_row = model_graphs_df.sum(axis=0, numeric_only=True, skipna=True)
model_graphs_df_std_row = model_graphs_df.std(axis=0, numeric_only=True, skipna=True)
#plot:
model_graphs_df_summed_row.hist()
fig, ax = plt.subplots(num=None, figsize=(20, 10), dpi=200, facecolor='w', edgecolor='k')
x = model_graphs_df[emotion_list].columns
y = model_graphs_df_summed_row
ax.bar(x, y, yerr=model_graphs_df_std_row)
# ax.legend(["mean sessions probabilty for emotion", "wet sessions"])
fig.suptitle('Sum of all soft decision scores for all recordings per emotion\n'+'trained with ' + str(len(emotion_list)) + ' classes\n'+ 'Patient: ' + patient_id + ', Model: ' + model,fontsize=16)
plt.xlabel('Emotions/Classes')
plt.grid()
plt.ylabel('Mean probability for all emotions')
# save fig:
save_url_path = "results_tablesOfProb\\" + setup_name + "\\" + patient_id + "\\" + model
Path(save_url_path).mkdir(parents=True, exist_ok=True)
save_file_name = 'SumOfAllSoftDecisionScoresForAllRecordingsPerEmotion' + '_trainedWith' + str(
len(emotion_list)) + 'Classes' + '_' + patient_id + '_' + model
# manager = plt.get_current_fig_manager()
# manager.window.showMaximized()
if os.path.isfile(save_url_path + "\\" + save_file_name + ".png"):
os.remove(save_url_path + "\\" + save_file_name + ".png")
plt.ioff()
fig.savefig(save_url_path + "\\" + save_file_name + ".png", bbox_inches='tight')
plt.close(fig)
def patient_plotNsave_count_hard_decision_histogram(self, patient_prob_tables_urls, model_list, emotion_list, session_hour_range,setup_name):
#TODO: add a description
for patient_prob_table_url in patient_prob_tables_urls:
# loading data if available:
try:
prob_table = pd.read_csv(patient_prob_table_url)
except:
print("File not avalble in: " + patient_prob_table_url)
# fix numbers loaded as str:
for emotion in emotion_list:
if (type(prob_table[emotion][0]) == str):
prob_table[emotion] = prob_table[emotion].apply(pd.to_numeric, errors='coerce')
# get graph_df:
if (session_hour_range == []) or (type(session_hour_range) != int):
session_hour_range = 1
print("set session_hour_range to default value of 1 hour")
graph_df = self.get_table_by_session(prob_table, session_hour_range, session_action='mean', emotions_list=emotion_list)
patient_id = graph_df["Patient_id"].iloc[0]
# ensure data in the right format:
if (model_list == []) or (type(model_list[0]) != str):
model_list = graph_df.Model.unique()
# print("using all available models")
# remove unsupported models:
model_list = [val for idx, val in enumerate(self.supported_models) if val in model_list]
emotion_list = [val for idx, val in enumerate(self.suported_emotions) if val in emotion_list]
for model in model_list:
model_graphs_df = graph_df[graph_df['Model'] == model]
model_graphs_df_std_row = model_graphs_df[emotion_list].idxmax(axis=1, skipna=True)
from collections import Counter
histogram_dict = Counter(model_graphs_df_std_row)
#plot:
fig, ax = plt.subplots(num=None, figsize=(20, 10), dpi=200, facecolor='w', edgecolor='k')
x = emotion_list
y=list()
for emo in emotion_list: y.append(histogram_dict[emo])
ax.bar(x, y)
# ax.legend(["mean sessions probabilty for emotion", "wet sessions"])
fig.suptitle('Count of hard decision for all recordings per emotion\n'+'trained with ' + str(len(emotion_list)) + ' classes\n'+ 'Patient: ' + patient_id + ', Model: ' + model,fontsize=16)
plt.xlabel('Emotions/Classes')
plt.grid()
plt.ylabel('Count of hard decision for each emotions')
# save fig:
save_url_path = "results_tablesOfProb\\" + setup_name + "\\" + patient_id + "\\" + model
Path(save_url_path).mkdir(parents=True, exist_ok=True)
save_file_name = 'CountOfHardDecisionForAllRecordingsPerEmotion' + '_trainedWith' + str(
len(emotion_list)) + 'Classes' + '_' + patient_id + '_' + model
# manager = plt.get_current_fig_manager()
# manager.window.showMaximized()
if os.path.isfile(save_url_path + "\\" + save_file_name + ".png"):
os.remove(save_url_path + "\\" + save_file_name + ".png")
plt.ioff()
fig.savefig(save_url_path + "\\" + save_file_name + ".png", bbox_inches='tight')
plt.close(fig)
def patient_plotNsave_mean_prob_session_emotion_3d(self, patient_prob_tables_urls, model_list, emotion_list, session_hour_range, setup_name):
#TODO: add description
for patient_prob_table_url in patient_prob_tables_urls:
# loading data if available:
try:
prob_table = pd.read_csv(patient_prob_table_url)
except:
print("File not avalble in: "+patient_prob_table_url)
# fix numbers loaded as str:
for emotion in emotion_list:
if(type(prob_table[emotion][0]) == str):
prob_table[emotion] = prob_table[emotion].apply(pd.to_numeric, errors='coerce')
# get graph_df:
if (session_hour_range == []) or (type(session_hour_range) != int):
session_hour_range = 1
print("set session_hour_range to default value of 1 hour")
# graph_df = self.plot_emotion_over_time(prob_table, session_hour_range)
patient_id = prob_table["PatientName"].iloc[0]
# ensure data in the right format:
if (model_list == []) or (type(model_list[0]) != str):
model_list = prob_table.Model.unique()
# print("using all available models")
# remove unsupported models:
model_list = [val for idx, val in enumerate(self.supported_models) if val in model_list]
emotion_list = [val for idx, val in enumerate(self.suported_emotions) if val in emotion_list]
for model in model_list:
model_graphs_df = prob_table[prob_table['Model'] == model]
model_graphs_df['Date'] = pd.to_datetime(model_graphs_df['Date'], format="%d/%m/%Y")
model_graphs_mean_by_date_df = model_graphs_df.resample('d', on='Date').mean().dropna(how='all')
# add IsWet to model_graphs_mean_by_date_df
model_graphs_mean_by_date_df['IsWet'] = ""
for date in model_graphs_mean_by_date_df.index.values:
model_graphs_mean_by_date_df['IsWet'][date] = (
model_graphs_df['ClinicalStatus'][model_graphs_df['Date'] == date] == 'wet').any()
emotion_list_with_clinical_status = emotion_list.copy()
emotion_list_with_clinical_status.append('IsWet')
model_graphs_mean_by_date_df_only_emotions = model_graphs_mean_by_date_df[emotion_list_with_clinical_status]
model_graphs_mean_by_date_df_only_emotions['IsWet'] = model_graphs_mean_by_date_df_only_emotions['IsWet']*model_graphs_mean_by_date_df[emotion_list].values.max()
model_graphs_mean_by_date_df_only_emotions = model_graphs_mean_by_date_df_only_emotions.astype(float)
model_graphs_mean_by_date_df_only_emotions = model_graphs_mean_by_date_df_only_emotions.transpose()
# plot
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True, sharey=False, figsize=(20, 10), dpi=200,
facecolor='w', edgecolor='k')
ax = sn.heatmap(model_graphs_mean_by_date_df_only_emotions, annot=False)
ax.xaxis.set_major_locator(plt.MaxNLocator(30)) # reducing number of plot ticks
plt.setp(ax.xaxis.get_majorticklabels(), rotation=30) # rotate plot tics
# rewrite x labels text:
labels = [item.get_text() for item in ax.get_xticklabels()]
for i in range(len(labels)):
labels[i] = labels[i][0:10]
ax.set_xticklabels(labels)
# color set wet index
# TODO: color set wet index
# for i, date in zip(range(len(ax.get_xticklabels())), ax.get_xticklabels()):
# # date_dt =
# if model_graphs_mean_by_date_df['IsWet'][date]:
# print(ax.get_xticklabels()[i])
# ax.get_xticklabels()[i].set_color("aqua")
# ax.fill_between(model_graphs_mean_by_date_df_only_emotions.columns.values, 0, 1, where=model_graphs_mean_by_date_df['IsWet'],
# color='aqua', alpha=0.9, transform=ax.get_xaxis_transform())
ax.yaxis.set_major_locator(plt.MaxNLocator(len(emotion_list_with_clinical_status)))
fig.suptitle('Mean Date Probability\n' + 'trained with ' + str(len(
emotion_list)) + ' classes\n' + 'Patient: ' + patient_id + ', Model: ' + model)
plt.xlabel('Date\n(may be multiple sessions in one dates - different hours)')
ax.set_ylabel('Mean Date Probability')
# save fig:
save_url_path = "results_tablesOfProb\\" + setup_name + "\\" + patient_id + "\\" + model
Path(save_url_path).mkdir(parents=True, exist_ok=True)
save_file_name = 'MeanSessionsProbabilityAllEmotionsInOneGraphHeatMap' + '_trainedWith' + str(
len(emotion_list)) + 'Classes' + '_' + patient_id + '_' + model
# manager = plt.get_current_fig_manager()
# manager.window.showMaximized()
if os.path.isfile(save_url_path + "\\" + save_file_name + ".png"):
os.remove(save_url_path + "\\" + save_file_name + ".png")
plt.ioff()
fig.savefig(save_url_path + "\\" + save_file_name + ".png", bbox_inches='tight')
plt.close(fig)
def patient_plotNsave_emotion_over_time_summerize_for_model_one_plot(self, patient_prob_tables_urls, model_list, emotion_list, session_hour_range, setup_name):
# TODO: add documentation
for patient_prob_table_url in patient_prob_tables_urls:
# loading data if available:
try:
prob_table = pd.read_csv(patient_prob_table_url)
except:
print("File not avalble in: "+patient_prob_table_url)
# fix numbers loaded as str:
for emotion in emotion_list:
if(type(prob_table[emotion][0]) == str):
prob_table[emotion] = prob_table[emotion].apply(pd.to_numeric, errors='coerce')
patient_id = prob_table["PatientName"].iloc[0]
# ensure data in the right format:
if (model_list == []) or (type(model_list[0]) != str):
model_list = prob_table.Model.unique()
# print("using all available models")
# remove unsupported models:
model_list = [val for idx, val in enumerate(self.supported_models) if val in model_list]
emotion_list = [val for idx, val in enumerate(self.suported_emotions) if val in emotion_list]
# add IsWet column:
for model in model_list:
model_graphs_df = prob_table[prob_table['Model'] == model] # filter by emotion
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True, sharey=False, figsize=(20, 10), dpi=200, facecolor='w',
edgecolor='k')
for emotion in emotion_list:
model_graphs_df['Date'] = pd.to_datetime(model_graphs_df['Date'], format="%d/%m/%Y")
model_graphs_mean_by_date_df = model_graphs_df.resample('d', on='Date').mean().dropna(how='all')
# add IsWet to model_graphs_mean_by_date_df
model_graphs_mean_by_date_df['IsWet'] = ""
for date in model_graphs_mean_by_date_df.index.values:
model_graphs_mean_by_date_df['IsWet'][date] = (model_graphs_df['ClinicalStatus'][model_graphs_df['Date']==date]=='wet').any()
# plot:
x = model_graphs_mean_by_date_df.index.values
y = model_graphs_mean_by_date_df[emotion]
ax.plot(x, y, linestyle='--', marker='o', label=emotion)
ax.fill_between(x, 0, 1, where=model_graphs_mean_by_date_df['IsWet'],
color='aqua', alpha=0.4, transform=ax.get_xaxis_transform())# plt.xlabel('Date\n(may be multiple sessions in one dates - different hours)')
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.xaxis.set_major_locator(plt.MaxNLocator(30)) # reducing number of plot ticks
plt.setp(ax.xaxis.get_majorticklabels(), rotation=30) # rotate plot tics
# plt.ylabel(emotion+' mean session probability')
ax.grid()
fig.legend(loc='best')
fig.suptitle('Mean Sessions Probability\n' + 'trained with ' + str(len(
emotion_list)) + ' classes\n' + 'Patient: ' + patient_id + ', Model: ' + model)
plt.xlabel('Date\n(may be multiple sessions in one dates - different hours)')
ax.set_ylabel('Mean Date Probability')
# save fig:
save_url_path = "results_tablesOfProb\\"+setup_name+"\\"+patient_id+"\\"+model
Path(save_url_path).mkdir(parents=True, exist_ok=True)
save_file_name = 'MeanSessionsProbabilityAllEmotionsInOneGraph'+'_trainedWith'+str(len(emotion_list))+'Classes'+'_'+patient_id+'_'+model
# manager = plt.get_current_fig_manager()
# manager.window.showMaximized()
if os.path.isfile(save_url_path+"\\"+save_file_name+".png"):
os.remove(save_url_path+"\\"+save_file_name+".png")
plt.ioff()
fig.savefig(save_url_path+"\\"+save_file_name+".png", bbox_inches='tight')
plt.close(fig)
def get_model_variance_per_patientNmodel_heatmap(self, patient_prob_tables_urls, model_list, emotion_list, session_hour_range, setup_name):
"""
Description: the function gets number of patients, models and emotion classes. the function calculate the
mean variance for each session and plot this value over time(date).
Input:
:parm patient_prob_tables_urls: list of urls to each patiant .wav files
:type patient_prob_tables_urls:
:parm model_list: list of scikit learn models. should be set to output probabilities
:type model_list:
:parm emotion_list: list of emotion classes
:type emotion_list:
:param setup_name: string that describes the current setup under which the models where trained by
:type setup_name:
"""
# TODO: complete documentation
for patient_prob_table_url in patient_prob_tables_urls:
# loading data if available:
try:
prob_table = pd.read_csv(patient_prob_table_url)
# remove rows with nan:
prob_table = prob_table[prob_table[emotion_list[0]].notna()]
# remap df index:
prob_table = prob_table.reset_index()
except:
print("File not avalble in: " + patient_prob_table_url)
# fix numbers loaded as str:
for emotion in emotion_list:
if (type(prob_table[emotion][0]) == str):
prob_table[emotion] = prob_table[emotion].apply(pd.to_numeric, errors='coerce')
# remove rows with nan:
prob_table = prob_table[prob_table[emotion_list[0]].notna()]
# remap df index:
prob_table = prob_table.reset_index()
# get patient id
patient_id = prob_table["PatientName"].iloc[0]
# ensure data in the right format:
if (model_list == []) or (type(model_list[0]) != str):
model_list = prob_table.Model.unique()
# print("using all available models")
# remove unsupported models:
model_list = [val for idx, val in enumerate(self.supported_models) if val in model_list]
emotion_list = [val for idx, val in enumerate(self.suported_emotions) if val in emotion_list]
# add IsWet column:
# get graph_df:
if (session_hour_range == []) or (type(session_hour_range) != int):
session_hour_range = 1
print("set session_hour_range to default value of 1 hour")
prob_std_table_by_session_df = self.get_table_by_session(prob_table, session_hour_range, session_action='std', emotions_list=emotion_list)
for model in model_list:
# cut model graph:
prob_table_by_sessionNmodel_df = prob_std_table_by_session_df[prob_std_table_by_session_df['Model']==model]
prob_table_by_sessionNmodel_df = prob_table_by_sessionNmodel_df.reset_index()
prob_table_by_sessionNmodel_df['Date'] = | pd.to_datetime(prob_table_by_sessionNmodel_df['Date'], format="%d/%m/%Y") | pandas.to_datetime |
from unittest import TestCase
import pandas as pd
from datamatch.filters import DissimilarFilter, NonOverlappingFilter
class DissimilarFilterTestCase(TestCase):
def test_valid(self):
f = DissimilarFilter('agency')
index = ['agency', 'uid']
self.assertFalse(f.valid(
pd.Series(['slidell pd', '123'], index=index),
pd.Series(['slidell pd', '456'], index=index)
))
self.assertTrue(f.valid(
pd.Series(['gretna pd', '123'], index=index),
pd.Series(['slidell pd', '456'], index=index)
))
def test_ignore_key_error(self):
index = ['agency', 'uid']
series_a = pd.Series(['slidell pd', '123'], index=index)
series_b = pd.Series(['slidell pd', '456'], index=index)
self.assertRaises(
KeyError,
lambda: DissimilarFilter('first').valid(series_a, series_b)
)
self.assertTrue(DissimilarFilter(
'first', ignore_key_error=True
).valid(series_a, series_b))
class NonOverlappingFilterTestCase(TestCase):
def test_valid(self):
f = NonOverlappingFilter('start', 'end')
index = ['uid', 'start', 'end']
self.assertFalse(f.valid(
pd.Series(['123', 0, 4], index=index),
pd.Series(['456', 3, 6], index=index)
))
self.assertFalse(f.valid(
pd.Series(['123', 10, 14], index=index),
pd.Series(['456', 3, 16], index=index)
))
self.assertFalse(f.valid(
| pd.Series(['123', 0, 4], index=index) | pandas.Series |
import numpy as np
from numpy.core.numeric import zeros_like
import pandas as pd
# [TODO] This code was made in a hurry.
# It can be improved, someday I will. Please excuse me
data = {
"a3": [1.0, 6.0, 5.0, 4.0, 7.0, 3.0,8.0,7.0,5.0],
"class": ["CP", "CP", "CN", "CP", "CN", "CN", "CN", "CP", "CN"]
}
division = np.array([2.0, 3.5, 4.5, 5.5, 6.5, 7.5])
df = pd.DataFrame(data)
df.sort_values(by=["a3"], inplace=True)
print(df)
E_father = 0.9911
for i in division:
print("------------------------------------------------------")
print("Split in ", str(i),"\n")
dfi = df.copy()
dfi["a3"] = dfi["a3"].apply(lambda x: "C0" if x <= i else "C1")
confusion = | pd.crosstab(dfi["a3"], dfi["class"], margins=True, margins_name="Total") | pandas.crosstab |
import pandas as pd
confirmed = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv '
recovered = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_recovered_global.csv '
deaths = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data' \
'/csse_covid_19_time_series/time_series_covid19_deaths_global.csv '
deaths = pd.read_csv(deaths)
recovered = pd.read_csv(recovered)
confirmed = pd.read_csv(confirmed)
def total_confirmed():
df = confirmed.iloc[:, 4:].sum().max()
return df
def total_deaths():
df = deaths.iloc[:, 4:].sum().max()
return df
def total_recovered():
df = recovered.iloc[:, 4:].sum().max()
return df
def complete_world_df():
df = pd.DataFrame({
'Confirmed': [total_confirmed()],
'Deaths': [total_deaths()],
'Recovered': [total_recovered()],
'Active': [total_confirmed() - total_deaths() - total_recovered()]
})
return df
def world_perc_df():
df = pd.DataFrame({
'Death Rate': [total_deaths() / total_confirmed() * 100],
'Recovery Rate': [total_recovered() / total_confirmed() * 100],
# 'Active': [total_confirmed() - total_deaths() - total_recovered()]
})
df
return df
def daily_confirmed():
df = confirmed.iloc[:, 4:].sum(axis=0)
df.index = pd.to_datetime(df.index)
# df /= 1_000_000
return df
def daily_deaths():
df = deaths.iloc[:, 4:].sum(axis=0)
df.index = pd.to_datetime(df.index)
return df
def daily_recovered():
df = recovered.iloc[:, 4:].sum(axis=0)
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
import pandas as pd
import numpy as np
from sklearn.ensemble import IsolationForest
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.preprocessing import MinMaxScaler
#read data
dataset = | pd.read_csv('raw_dataset.csv',engine='python') | pandas.read_csv |
from datetime import date as dt
import numpy as np
import pandas as pd
import pytest
import talib
import os
from finance_tools_py.simulation import Simulation
from finance_tools_py.simulation.callbacks import talib as cb_talib
from finance_tools_py.simulation import callbacks
@pytest.fixture
def init_global_data():
pytest.global_code = '000001'
pytest.global_data = pd.DataFrame({
'code': [pytest.global_code for x in range(1998, 2020)],
'date': [dt(y, 1, 1) for y in range(1998, 2020)],
'close':
np.random.random((len(list(range(1998, 2020))), )),
'high':
np.random.random((len(list(range(1998, 2020))), )),
'low':
np.random.random((len(list(range(1998, 2020))), )),
})
@pytest.fixture
def mock_data():
pytest.mock_code = '600036'
if "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true":
pytest.mock_data = pd.read_csv('tests/data/600036.csv', index_col=None)
else:
pytest.mock_data = pd.read_csv('data/600036.csv', index_col=None)
pytest.mock_data['date'] = pd.to_datetime(pytest.mock_data['date'])
def _mock_data(size):
return pd.DataFrame({
'close': np.random.random((len(list(range(size))), )),
'high': np.random.random((len(list(range(size))), )),
'low': np.random.random((len(list(range(size))), )),
})
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_BBANDS():
print('>>> from finance_tools_py.simulation.callbacks.talib import BBANDS')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import BBANDS
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]})
print(">>> print(data['close'].values)")
print(data['close'].values)
t = 3
u = 2.4
d = 2.7
print('>>> t = {}'.format(t))
print('>>> u = {}'.format(u))
print('>>> d = {}'.format(d))
print(">>> s = Simulation(data,'',callbacks=[BBANDS(t, u, d)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[BBANDS(t, u, d)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'bbands' in col]")
cols = [col for col in s.data.columns if 'bbands' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_WILLR(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import WILLR')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import WILLR
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)],\n\
'high': [y for y in range(0.1, 8.2)],\n\
'low': [y for y in range(0.2, 8.2)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(0.0, 8.0)],
'high': [y for y in np.arange(0.1, 8.1)],
'low': [y for y in np.arange(0.2, 8.2)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t={}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[WILLR(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[WILLR(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'willr' in col]")
cols = [col for col in s.data.columns if 'willr' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_CCI(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import CCI')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import CCI
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1,15.0)],\n\
'low': [y for y in range(0.0, 4.9)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[CCI(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[CCI(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'cci' in col]")
cols = [col for col in s.data.columns if 'cci' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_ATR(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import ATR')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import ATR
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1,15.0)],\n\
'low': [y for y in range(0.0, 4.9)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[ATR(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[ATR(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'atr' in col]")
cols = [col for col in s.data.columns if 'atr' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_LINEARREG_SLOPE(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import ATR')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import LINEARREG_SLOPE
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[LINEARREG_SLOPE('close',t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[LINEARREG_SLOPE('close',t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'lineSlope' in col]")
cols = [col for col in s.data.columns if 'lineSlope' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_TRANGE(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import TRANGE')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import TRANGE
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1,15.0)],\n\
'low': [y for y in range(0.0, 4.9)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)]
})
print(">>> print(data)")
print(data)
print(">>> s = Simulation(data,'',callbacks=[TRANGE()])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[TRANGE()])
s.simulate()
print(">>> cols = [col for col in data.columns if 'trange' in col]")
cols = [col for col in s.data.columns if 'trange' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_NATR(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import NATR')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import NATR
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1,15.0)],\n\
'low': [y for y in range(0.0, 4.9)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[NATR(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[NATR(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'atr' in col]")
cols = [col for col in s.data.columns if 'natr' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_MFI(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import MFI')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import MFI
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\
'high': [y for y in range(10.1, 15.0)],\n\
'low': [y for y in range(0.0, 4.9)],\n\
'volume': [y for y in range(50.0, 100.0, 10.0)]})")
data = pd.DataFrame({
'close': [y for y in np.arange(5.0, 10.0)],
'high': [y for y in np.arange(10.1, 15.0)],
'low': [y for y in np.arange(0.0, 4.9)],
'volume': [y for y in np.arange(50.0, 100.0, 10.0)]
})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[MFI(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[MFI(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'mfi' in col]")
cols = [col for col in s.data.columns if 'mfi' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_SMA(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import SMA')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import SMA
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)]})")
data = pd.DataFrame({'close': [y for y in np.arange(5.0, 10.0)]})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[SMA(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[SMA(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'sma' in col]")
cols = [col for col in s.data.columns if 'sma' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_EMA(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import EMA')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import EMA
print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)]})")
data = pd.DataFrame({'close': [y for y in np.arange(5.0, 10.0)]})
print(">>> print(data)")
print(data)
t = 3
print('>>> t = {}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[EMA(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[EMA(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'ema' in col]")
cols = [col for col in s.data.columns if 'ema' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_DEMA(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import DEMA')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import DEMA
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]})
print(">>> print(data['close'].values)")
print(data['close'].values)
t = 3
print('>>> t={}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[DEMA(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[DEMA(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'dema' in col]")
cols = [col for col in s.data.columns if 'dema' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_WMA(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import WMA')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import WMA
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]})
print(">>> print(data['close'].values)")
print(data['close'].values)
t = 3
print('>>> t={}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[WMA(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[WMA(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'dema' in col]")
cols = [col for col in s.data.columns if 'wma' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_RSI(init_global_data):
print('>>> from finance_tools_py.simulation.callbacks.talib import RSI')
print('>>> from finance_tools_py.simulation import Simulation')
from finance_tools_py.simulation.callbacks.talib import RSI
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]})
print(">>> print(data['close'].values)")
print(data['close'].values)
t = 3
print('>>> t={}'.format(t))
print(">>> s = Simulation(data,'',callbacks=[RSI(t)])")
print('>>> s.simulate()')
s = Simulation(data, '', callbacks=[RSI(t)])
s.simulate()
print(">>> cols = [col for col in data.columns if 'rsi' in col]")
cols = [col for col in s.data.columns if 'rsi' in col]
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(s.data[col].values, 2)))
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="Skipping this test on Travis CI. This is an example.")
def test_example_Rolling_Future(init_global_data):
print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})")
print(">>> print(data['close'].values)")
data = pd.DataFrame({'close': [y for y in range(0, 8)]})
print(data['close'].values)
t = 3
print('>>> t={}'.format(t))
print('>>> print(Rolling_Future(t).on_preparing_data(data))')
callbacks.Rolling_Future(t).on_preparing_data(data)
print(">>> cols=[col for col in data.columns if col!='close']")
cols = [col for col in data.columns if col != 'close']
print(">>> for col in cols:")
print(">>> print('{}:{}'.format(col,np.round(data[col].values,2)))")
for col in cols:
print('{}:{}'.format(col, np.round(data[col].values, 2)))
def test_BBANDS(init_global_data):
t = 5
u = 2
d = 2
b = cb_talib.BBANDS(t, u, d)
b.on_preparing_data(pytest.global_data)
print(pytest.global_data.info())
col_up = 'bbands_{}_{}_{}_up'.format(t, u, d) # 布林带上线
col_mean = 'bbands_{}_{}_{}_mean'.format(t, u, d) # 布林带中线
col_low = 'bbands_{}_{}_{}_low'.format(t, u, d) # 布林带下线
assert col_up in pytest.global_data.columns
assert col_mean in pytest.global_data.columns
assert col_low in pytest.global_data.columns
up, mean, low = talib.BBANDS(pytest.global_data['close'], t, u, d)
assert pd.Series.equals(up, pytest.global_data[col_up])
assert pd.Series.equals(mean, pytest.global_data[col_mean])
assert pd.Series.equals(low, pytest.global_data[col_low])
def test_SMA(init_global_data):
t = 5
b = cb_talib.SMA(t)
b.on_preparing_data(pytest.global_data)
print(pytest.global_data.info())
col = 'sma_close_{}'.format(t)
assert col in pytest.global_data.columns
real = talib.SMA(pytest.global_data['close'], t)
assert pd.Series.equals(real, pytest.global_data[col])
def test_WMA(init_global_data):
t = 5
b = cb_talib.WMA(t)
b.on_preparing_data(pytest.global_data)
print(pytest.global_data.info())
col = 'wma_close_{}'.format(t)
assert col in pytest.global_data.columns
real = talib.WMA(pytest.global_data['close'], t)
assert pd.Series.equals(real, pytest.global_data[col])
def test_EMA(init_global_data):
t = 5
b = cb_talib.EMA(t)
b.on_preparing_data(pytest.global_data)
print(pytest.global_data.info())
col = 'ema_close_{}'.format(t)
assert col in pytest.global_data.columns
real = talib.EMA(pytest.global_data['close'], t)
assert pd.Series.equals(real, pytest.global_data[col])
def test_Sim_BBANDS(init_global_data):
"""测试通过回测调用BBANDS,逻辑与`test_BBANDS`一致"""
t = 5
u = 2
d = 2
b = cb_talib.BBANDS(t, u, d)
s = Simulation(pytest.global_data, pytest.global_code, callbacks=[b])
s.simulate()
print(s.data.info())
col_up = 'bbands_{}_{}_{}_up'.format(t, u, d) # 布林带上线
col_mean = 'bbands_{}_{}_{}_mean'.format(t, u, d) # 布林带中线
col_low = 'bbands_{}_{}_{}_low'.format(t, u, d) # 布林带下线
assert col_up in s.data.columns
assert col_mean in s.data.columns
assert col_low in s.data.columns
up, mean, low = talib.BBANDS(s.data['close'], t, u, d)
assert pd.Series.equals(up, s.data[col_up])
assert | pd.Series.equals(mean, s.data[col_mean]) | pandas.Series.equals |
import pandas
import urllib
import json
import datetime
import pandas
import urllib2
import json
import pymongo
import time
import sys
import datetime
from datetime import date, timedelta
import pymongo
import collections
from bson.json_util import loads
import pandas as pd
import math as m
import numpy as np
import talib
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def get_data(url):
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
file = response.read()
result = json.loads(file)
result = pandas.DataFrame.from_records(result)
return result
from utils import *
import time
import numpy as np
from mxnet import nd, autograd, gluon
from mxnet.gluon import nn, rnn
import mxnet as mx
import datetime
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
def parser(x):
return datetime.datetime.strptime(x,'%Y-%m-%d')
dataset_ex_df = pd.read_csv('data/panel_data_close.csv', header=0, parse_dates=[0], date_parser=parser)
dataset_ex_df[['Date', 'GS']].head(3)
def parser(x):
return datetime.datetime.strptime(x,'%Y-%m-%d')
dataset_ex_df = pd.read_csv('data/panel_data_close.csv', header=0, parse_dates=[0], date_parser=parser)
dataset_ex_df[['Date', 'GS']].head(3)
print('There are {} number of days in the dataset.'.format(dataset_ex_df.shape[0]))
plt.figure(figsize=(14, 5), dpi=100)
plt.plot(dataset_ex_df['Date'], dataset_ex_df['GS'], label='Goldman Sachs stock')
plt.vlines(datetime.date(2016,4, 20), 0, 270, linestyles='--', colors='gray', label='Train/Test data cut-off')
plt.xlabel('Date')
plt.ylabel('USD')
plt.title('Figure 2: Goldman Sachs stock price')
plt.legend()
plt.show()
def get_technical_indicators(dataset):
# Create 7 and 21 days Moving Average
dataset['ma7'] = dataset['price'].rolling(window=7).mean()
dataset['ma21'] = dataset['price'].rolling(window=21).mean()
# Create MACD
dataset['26ema'] = pd.ewma(dataset['price'], span=26)
dataset['12ema'] = | pd.ewma(dataset['price'], span=12) | pandas.ewma |
# -*- coding: utf-8 -*-
"""
Classe for technical analysis of assets.
Created on Sat Oct 31 19:35:28 2020
@author: ryanar
"""
import math
import matplotlib.pyplot as plt
from stock_analysis import StockReader, StockVisualizer, Technical, AssetGroupVisualizer, StockAnalyzer, AssetGroupAnalyzer, StockModeler
from stock_analysis.utils import group_stocks, describe_group, make_portfolio
import numpy as np
import pandas as pd
import scipy.optimize as sco
class Asset:
def __init__(self,name, ticker, assetClass='Stock', data=None):
"""
Create a StockAnalyzer by passing in a pandas DataFrame of OHLC data.
Parameters
----------
df : Dataframe
Contains the stock open, high, low and close and index is the Date.
Returns
-------
None.
"""
self.name = name
self.ticker = ticker
self.assetClass = assetClass
if(data):
self.data = data()
else:
reader = StockReader('2010/01/01','2021/01/01')
if(assetClass=='Stock'):
self.data = reader.get_ticker_data(self.ticker)
else:
self.data = reader.get_bitcoin_data()
self.technical = Technical(self.data)
self.analyzer = StockAnalyzer(self.data)
self.visualizer = StockVisualizer(self.data)
def getName(self):
return self.name
def getTicker(self):
return self.ticker
def getAssetClass(self):
return self.assetClass
def getData(self):
return self.data
def getClose(self):
return self.data.close
def getLogReturn(self):
logReturn = np.log(self.data / self.data.shift(1))
return logReturn.dropna()
def getNormalityTests(self):
print("Normality Test for {}".format(self.name))
print(30 * '-')
return self.analyzer.normalityTests()
def getAnnualizedVolatility(self):
return self.analyzer.annualized_volatility()
def get52WeekHigh(self):
return self.analyzer.get52WeekHigh()
def get52WeekLow(self):
return self.analyzer.get52WeekLow()
def getCumulativeReturns(self):
return self.analyzer.cumulative_returns()
def getCV(self):
return self.analyzer.cv()
def getQCD(self):
return self.analyzer.qcd()
def getVHF(self):
return self.technical.VerticalHorizontalFilter()
def getADX(self):
return self.technical.AverageDirectionalIndex()
def getStopLoss(self):
return self.technical.AverageTrueRangeStopLoss().iloc[-1]
def plotTimeFrame(self,column='close'):
ax = self.visualizer.evolution_over_time(
column,
figsize=(10, 4),
legend=False,
title='%s closing price over time' % (self.name),
)
self.visualizer.add_reference_line(
ax,
x=self.data.high.idxmax(),
color='k',
linestyle=':',
alpha=0.5,
label=f'highest value ({self.data.high.idxmax():%b %d})'
)
ax.set_ylabel('Price ($)')
def plotMovingAverage(self,column='close', average=['60D','200D']):
ax = self.visualizer.moving_average(column, average, title='%s closing price over time' % (self.name))
ax.set_ylabel('Price ($)')
def plotRenko(self):
self.visualizer.renko()
def plotAverageTrueRange(self):
self.visualizer.AverageTrueRange()
def plotIchimokuCloud(self):
self.visualizer.IchimokuCloud(self.name)
def plotSTC(self):
self.visualizer.Schaff()
def plotVolume(self):
self.visualizer.trade_volume()
def plotAfterHours(self):
self.visualizer.after_hours_trades()
def plotCandleStick(self):
self.visualizer.candle_stick()
def plotQQPlot(self):
self.visualizer.qqplot()
def plotOnBalanceVolume(self,ewm):
self.visualizer.OnBalanceVolume(ewm)
def plotBollingerBands(self):
self.visualizer.bollingerbands()
def plotATRTrainingStops(self, timeframe=14):
self.visualizer.ATRTrainingStops(timeframe)
def plotRelativeStrengthIndex(self, timeframe=14):
self.visualizer.RelativeStrengthIndex(timeframe)
def plotMonthlyReturn(self):
percentChange = (self.data.close - self.data.close.shift(1)) / self.data.close
percentChange = percentChange * 100
monthlyPercentChange = percentChange.groupby([percentChange.index.year.rename('Year'), percentChange.index.month.rename('Month')]).sum()
self.visualizer.monthlyReturnsBoxplot(monthlyPercentChange, self.name)
#return monthlyPercentChange
def plotMACD(self):
self.visualizer.MACD()
def plotHistogram(self, column='close'):
self.visualizer.histogram(column)
def plotOpenToClose(self):
print("plotOpenToClose currently doesn't work.")
self.visualizer.open_to_close()
plt.show()
class Portfolio:
""" Class for providing analysis of a stock portfolio. """
def __init__(self):
"""
Create a StockAnalyzer by passing in a pandas DataFrame of OHLC data.
Parameters
----------
df : Dataframe
Contains the stock open, high, low and close and index is the Date.
Returns
-------
None.
"""
self.holdings = []
self.groupVisualizer=AssetGroupVisualizer(self.holdings)
self.logMeanReturn = 0
self.logAnnualCov = 0
def addAsset(self, name, ticker, assetClass='Stock', data=None):
self.holdings.append(Asset(name, ticker,assetClass, data))
def numberOfHoldings(self):
return len(self.holdings)
def getHoldings(self):
return self.holdings
def listHoldings(self):
for holding in self.holdings:
print("Name: %s, Ticker: %s, Asset Type: %s, 52 Week Low: %.2f, 52 Week High: %.2f" % (holding.getName(), holding.getTicker(), holding.getAssetClass(), holding.get52WeekLow(), holding.get52WeekHigh()))
def _getLogClose(self):
df = pd.DataFrame()
for holding in self.holdings:
df[holding.getName()] = holding.getClose()
logReturn = np.log(df / df.shift(1))
return logReturn.dropna()
def logReturnMean(self):
rets = self._getLogClose()
self.logMeanReturn = rets.mean()
def logAnnualVolatility(self):
rets = self._getLogClose()
self.logAnnualCov = rets.cov() * 252
# def min_func_sharpe(weights):
# return -port_ret(weights) / port_vol(weights)
def efficientFrontier(self):
cons = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
print(cons)
# Set the bounds for the 0-100%
bnds = tuple((0, 1) for x in range(self.numberOfHoldings()))
# Create a equal weight of all of the holdings
equalWeighting = np.array(self.numberOfHoldings() * [1. / self.numberOfHoldings(),])
opts = sco.minimize(min_func_sharpe, equalWeighting, method='SLSQP', bounds=bnds, constraints=cons)
# def volatility(self):
# return np.sqrt(np.dot(weights.T, np.dot(rets.cov() * 252, weights)))
def printStatistics(self):
for holding in self.holdings:
print("Name: %s, CV: %s, Annual Volatility: %.2f, qcd: %.2f" % (holding.getName(), holding.getCV(), holding.getAnnualizedVolatility(), holding.getQCD()))
def getStatistics(self):
df = pd.DataFrame()
for holding in self.holdings:
data = {
'Name': holding.getName(),
'CV':holding.getCV(),
'Annual Volatility':holding.getAnnualizedVolatility(),
'qcd':holding.getQCD()
}
stock = | pd.Series(data) | pandas.Series |
import pandas as pd
import sys
from datetime import datetime
from dotenv import load_dotenv
from os import getcwd, getenv, startfile
from tqdm import tqdm
from tweepy import API, Cursor, OAuthHandler, TweepyException
# Loads .env file
load_dotenv()
cwd = getcwd()
today = datetime.now()
# Gets API Credentials for Tweepy
auth = OAuthHandler(getenv("consumer_key"), getenv("consumer_secret"))
auth.set_access_token(getenv("access_key"), getenv("access_secret"))
auth_api = API(auth, wait_on_rate_limit=True)
# Dictionary to store tweets scraped
tweets_dict = {
"Date": [],
"Account Name": [],
"Handle": [],
"URL": [],
"Likes": [],
"Retweets": [],
"Text": [],
}
# Dictionary to store accounts follower count
accounts_dict = {
"Screen Name": [],
"Handle": [],
"URL": [],
"Followers": [],
"Verified": [],
}
""" Gets tweets from single handle """
def get_tweets_single(handle):
try:
# Scrapes for each handle
tweets = auth_api.user_timeline(
screen_name=handle, count=500, include_rts=True, tweet_mode="extended"
)
for tweet in tweets[:500]:
tweets_dict["Date"].append(tweet.created_at)
tweets_dict["Account Name"].append(tweet.user.name)
tweets_dict["Handle"].append(handle)
tweets_dict["URL"].append(f"https://twitter.com/{handle}/status/{tweet.id}")
tweets_dict["Likes"].append(tweet.favorite_count)
tweets_dict["Retweets"].append(tweet.retweet_count)
tweets_dict["Text"].append(tweet.full_text)
except Exception:
sys.exit("\nError. Account handle is suspended, private, or nonexistent.\n")
""" Gets tweets from a list of handles """
def get_tweets_multi(handle_list):
# Counts number of handles unable to scrape
except_count = 0
except_list = []
with tqdm(total=len(handle_list), file=sys.stdout) as pbar:
# Scrapes for each handle
for handle in handle_list:
try:
tweets = auth_api.user_timeline(
screen_name=handle,
count=500,
include_rts=True,
tweet_mode="extended",
)
for tweet in tweets[:500]:
tweets_dict["Date"].append(tweet.created_at)
tweets_dict["Account Name"].append(tweet.user.name)
tweets_dict["Handle"].append(handle)
tweets_dict["URL"].append(
f"https://twitter.com/{handle}/status/{tweet.id}"
)
tweets_dict["Likes"].append(tweet.favorite_count)
tweets_dict["Retweets"].append(tweet.retweet_count)
tweets_dict["Text"].append(tweet.full_text)
# Update progress bar
pbar.update(1)
except TweepyException:
except_count += 1
except_list.append(handle)
pass
if except_count >= 1:
print(
f"\nUnable to scrape from {except_count} accounts. "
"The following are suspended, private, or incorrect:"
)
print(f"{except_list}\n")
""" Load a text file as a list."""
def load_text(file):
try:
with open(file) as in_file:
loaded_txt = in_file.read().strip().split("\n")
return loaded_txt
except IOError as e:
print(
"{}\nError opening {}.".format(e, file),
file=sys.stderr,
)
sys.exit(1)
""" Formats the scraped tweets into an excel file """
def format_tweets(handle, tweets_dict):
file_name = f"Data/{handle} {today.month}-{today.day}.xlsx"
# Formats Tweets into Excel
df = pd.DataFrame(tweets_dict)
# Removes timezone from Date to prevent excel issues
df["Date"] = df["Date"].apply(lambda a: pd.to_datetime(a).date())
writer = pd.ExcelWriter(file_name, engine="xlsxwriter")
df.to_excel(
writer,
sheet_name=handle,
encoding="utf-8",
index=False,
)
workbook = writer.book
worksheet = writer.sheets[handle]
worksheet.freeze_panes(1, 0)
worksheet.autofilter("A1:G1")
top_row = workbook.add_format(
{"bg_color": "black", "font_color": "white"}
) # sets the top row colors
worksheet.set_column("A:A", 18)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 20)
worksheet.set_column("D:D", 10)
worksheet.set_column("E:E", 10)
worksheet.set_column("F:F", 10)
worksheet.set_column("G:G", 75)
# Sets the top row/header font and color
for col_num, value in enumerate(df.columns.values):
worksheet.write(0, col_num, value, top_row)
writer.save()
print(f"Data saved to {cwd}\{file_name}\n")
# Asks user if they want to open the file
opensheet = input("Press y to open the file:\n").lower()
if opensheet == "y":
startfile(f"{cwd}/{file_name}")
else:
pass
""" Checks list and returns list and number of handles that are unavailable """
def check_handles(handle_list):
# Counts number of handles unable to scrape
except_count = 0
except_list = []
with tqdm(total=len(handle_list), file=sys.stdout) as pbar:
for handle in handle_list:
try:
auth_api.get_user(screen_name=handle)
# Update progress bar
pbar.update(1)
except TweepyException:
except_count += 1
except_list.append(handle)
pass
if except_count >= 1:
print(
f"\nUnable to connect with {except_count} handles. "
"The following are suspended, private, or incorrect:"
)
print(f"{except_list}\n")
else:
print(
"\nNone of the handles on the list are suspended, private, or incorrect.\n"
)
""" Get the list of followers for a single account """
def get_follower_list(handle, filename):
# Gets follower count of the user
user = auth_api.get_user(screen_name=handle)
num_followers = user.followers_count
print(f"Fetching names for {num_followers} followers...\n")
# Keeps count for periodic saving
count = 0
count_stop_points = [
int(num_followers / 10),
int((num_followers / 10) * 2),
int((num_followers / 10) * 3),
int((num_followers / 10) * 4),
int((num_followers / 10) * 5),
int((num_followers / 10) * 6),
int((num_followers / 10) * 7),
int((num_followers / 10) * 8),
int((num_followers / 10) * 9),
num_followers,
]
# Count number of suspended or banned accounts
dead_count = 0
# Adds # at the end of multiple sheets to prevent saving over
sheet_count = 0
# Progress bar
with tqdm(total=num_followers, file=sys.stdout) as pbar:
for i in range(1):
for follower in Cursor(auth_api.get_followers, screen_name=handle).items(
num_followers
):
try:
accounts_dict["Screen Name"].append(follower.name)
accounts_dict["Handle"].append(follower.screen_name)
accounts_dict["URL"].append(
f"https://twitter.com/{follower.screen_name}"
)
accounts_dict["Followers"].append(follower.followers_count)
accounts_dict["Verified"].append(follower.verified)
count += 1
pbar.update(1)
if count in count_stop_points:
sheet_count += 1
print("\nStop point reached - saving CSV file...\n")
follow_df = pd.DataFrame(accounts_dict)
follow_df.to_csv(
f"Data/Checkpoints/{handle}_Follower_List{sheet_count}.csv"
)
except TweepyException:
count += 1
dead_count += 1
pbar.update(1)
print(f"\nNumber of suspended/banned/deleted accounts: {dead_count}\n")
# Formats Results into Excel
df = pd.DataFrame(accounts_dict)
writer = pd.ExcelWriter(filename, engine="xlsxwriter")
df.to_excel(
writer,
sheet_name="Accounts",
encoding="utf-8",
index=False,
)
workbook = writer.book
worksheet = writer.sheets["Accounts"]
worksheet.freeze_panes(1, 0)
worksheet.autofilter("A1:C1")
top_row = workbook.add_format(
{"bg_color": "black", "font_color": "white"}
) # sets the top row colors
num_format = workbook.add_format({"num_format": "#,##0"})
worksheet.set_column("A:A", 18)
worksheet.set_column("B:B", 20)
worksheet.set_column("C:C", 10, num_format)
# Sets the top row/header font and color
for col_num, value in enumerate(df.columns.values):
worksheet.write(0, col_num, value, top_row)
writer.save()
print(f"Data saved to {cwd}\{filename}\n")
# Asks user if they want to open the file
opensheet = input("Do you want to open the excel file? (y or n): \n").lower()
if opensheet == "y":
startfile(f"{cwd}/{filename}")
else:
pass
return accounts_dict
""" Gets the follower count of a list of handles """
def get_follower_count(filename, account_list):
# Progress bar
with tqdm(total=len(account_list), file=sys.stdout) as pbar:
for i in range(1):
# Scrapes followers for each handle
for handle in account_list:
try:
users = auth_api.get_user(screen_name=handle)
accounts_dict["Screen Name"].append(users.name)
accounts_dict["Handle"].append(handle)
accounts_dict["URL"].append(f"https://twitter.com/{handle}")
accounts_dict["Followers"].append(users.followers_count)
accounts_dict["Verified"].append(users.verified)
pbar.update(1)
except TweepyException:
accounts_dict["Screen Name"].append(handle.name)
accounts_dict["Handle"].append(handle)
accounts_dict["URL"].append(f"https://twitter.com/{handle}")
accounts_dict["Followers"].append("N/A")
accounts_dict["Verified"].append("N/A")
pass
# Formats Results into Excel
df = | pd.DataFrame(accounts_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
Created on Thu Jan 01 16:00:32 2015
@author: LukasHalim
Forked by @edridgedsouza
'''
import sqlite3
import os
import pandas as pd
from contextlib import closing
class Database():
def __init__(self, path='Godwin.db'):
self.path = os.path.abspath(path)
if not os.path.isfile(self.path):
self.initialize()
def initialize(self):
conn = sqlite3.connect(self.path) # or use :memory: to put it in RAM
cursor = conn.cursor()
cursor.execute('DROP TABLE IF EXISTS comment')
cursor.execute('DROP TABLE IF EXISTS post')
cursor.execute('''
CREATE TABLE post
(post_id text,
failure_in_post integer,
subreddit text,
post_score integer,
num_comments integer);
''')
cursor.execute('''
CREATE TABLE failures
(post_id text,
comment_id text,
num_prev_comments integer,
FOREIGN KEY (post_id)
REFERENCES post (post_id)
ON DELETE CASCADE);
''')
conn.commit()
conn.close()
return self
def reset_db(self):
if os.path.exists(self.path):
os.remove(self.path)
try:
os.remove(f'{self.path}-journal')
except:
pass
self.initialize()
return self
def execute_sql(self, sql, params=()):
with closing(sqlite3.connect(self.path)) as conn:
with conn:
cur = conn.cursor()
cur.execute(sql, params)
res = cur.fetchall()
if res:
df = | pd.DataFrame(res) | pandas.DataFrame |
import pandas as pd
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.stats.stattools import jarque_bera
from sklearn.metrics import (
mean_absolute_error,
r2_score,
median_absolute_error,
mean_squared_error,
mean_squared_log_error,
)
class AnnualTimeSeriesSplit():
"""
Instantiate with number of folds
split accepts a pandas dataframe indexed by datetime covering multiple years sorted ascending
Splits to the number of folds, with a single year returned as the validation set
Walks up the timeseries yielding the indices from each train, test split
"""
def __init__(self, n_splits):
self.n_splits = n_splits
def split(self, X, y=None, groups=None):
years = X.index.year.unique()
for ind, year in enumerate(years[0:self.n_splits]):
final_train_year = years[-1] - self.n_splits + ind
train_final_index = X.index.get_loc(str(final_train_year)).stop
test_final_index = X.index.get_loc(str(final_train_year + 1)).stop
train_indices = list(range(0, train_final_index))
test_indices = list(range(train_final_index, test_final_index))
yield train_indices, test_indices
class RollingAnnualTimeSeriesSplit():
"""
Instantiate with number of folds
split accepts a pandas dataframe indexed by datetime covering multiple years sorted ascending
Splits to the number of folds, with a single year returned as the validation set
Walks up the timeseries yielding the indices from each train, test split
"""
def __init__(self, n_splits, goback_years=5):
self.n_splits = n_splits
self.goback_years = goback_years
def split(self, X, y=None, groups=None):
years = X.index.year.unique()
for ind, year in enumerate(years[0:self.n_splits]):
final_train_year = years[-1] - self.n_splits + ind
start_train_year = final_train_year - self.goback_years +1
print(f'{final_train_year+1}')
train_start_index = X.index.get_loc(str(start_train_year)).start
train_final_index = X.index.get_loc(str(final_train_year)).stop
test_final_index = X.index.get_loc(str(final_train_year + 1)).stop
train_indices = list(range(train_start_index, train_final_index))
test_indices = list(range(train_final_index, test_final_index))
yield train_indices, test_indices
def bound_precision(y_actual: pd.Series, y_predicted: pd.Series, n_to_check=5):
"""
Accepts two pandas series, and an integer n_to_check
Series are:
+ actual values
+ predicted values
Sorts each series by value from high to low, and cuts off each series at n_to_check
Determines how many hits - ie how many of the indices in the actual series are in the predicted series indices
Returns number of hits divided by n_to_check
"""
y_act = y_actual.copy(deep=True)
y_pred = y_predicted.copy(deep=True)
y_act.reset_index(drop=True, inplace=True)
y_pred.reset_index(drop=True, inplace=True)
act_dates =set( y_act.sort_values(ascending=False).head(n_to_check).index)
pred_dates = set(y_pred.sort_values(ascending=False).head(n_to_check).index)
bound_precision = len(act_dates.intersection(pred_dates))/ n_to_check
return bound_precision
def run_cross_val(X, y, cv_splitter, pipeline, scoring=["bound_precision", "mae"]):
scoring_dict = {
"bound_precision": bound_precision,
"mae": mean_absolute_error,
"r2_score": r2_score,
"median_absolute_error": median_absolute_error,
"mean_squared_error": mean_squared_error,
"mean_squared_log_error": mean_squared_log_error,
}
scores_dicts = {}
scores_dicts["train"] = {}
scores_dicts["test"] = {}
for metric in scoring:
scores_dicts["train"][metric] = []
scores_dicts["test"][metric] = []
for train_indx, val_indx in cv_splitter.split(X):
X_train = X.iloc[train_indx]
y_train = y.iloc[train_indx]
X_test = X.iloc[val_indx]
y_test = y.iloc[val_indx]
pipeline.fit(X_train, y_train)
y_train_pred = pipeline.predict(X_train)
for metric in scoring:
score = scoring_dict[metric](y_train, y_train_pred)
scores_dicts["train"][metric].append(score)
y_test_pred = pipeline.predict(X_test)
for metric in scoring:
score = scoring_dict[metric](y_test, y_test_pred)
scores_dicts["test"][metric].append(score)
#print(f'test: {score}')
return scores_dicts
# data_split_dict
# traintest_split_dict
# metric_dict
# values
# {0:
# {'train':
# {'mae': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3],
# 'mse': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3]}
# 'test':
# {'mae': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3],
# 'mse': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3]}},
# 1:
# {'train':
# {'mae': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3],
# 'mse': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3]}
# 'test':
# {'mae': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3],
# 'mse': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3]}},
# 1:
# {'train':
# {'mae': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3],
# 'mse': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3]}
# 'test':
# {'mae': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3],
# 'mse': [1.0, 1.1, 2.1, 3.2, 1.0, 2.1, 3.1, 2.1, 2,2, 2.3]}}
# }
def temporal_split(X, y, splitter_col="day_of_week"):
X_splits = []
y_splits = []
Xt = X.copy(deep=True)
yt = y.copy(deep=True)
split_flags = sorted(Xt[splitter_col].unique())
for split_flag in split_flags:
X_split = Xt[Xt[splitter_col] == split_flag]
X_splits.append(X_split)
y_splits.append(y.loc[X_split.index])
return X_splits, y_splits
def run_data_split_cross_val(
X, y, data_splitter_col, cv_splitter, model, scoring=["mae"]
):
scores_dict = {}
X_splits, y_splits = temporal_split(X, y, data_splitter_col)
for (indx_splitter, X_split, y_split) in zip(
sorted(X[data_splitter_col].unique()), X_splits, y_splits
):
scores_dict[indx_splitter] = run_cross_val(
X_split, y_split, cv_splitter, model, scoring=scoring
)
return scores_dict
def save_run_results(X, n_splits, model_str, some_dict, save_path):
"""
Accepts a dataset used for splitting into train and test,
number of splits in a Rolling Annual Cross Validation Scheme
A dictionary of validation results
A save path for a dataframe
Updates the file and saves
Returns the updated file as a DataFrame
"""
if save_path.exists():
orig_df = pd.read_csv(save_path, index_col=0, header=[0,1] )
if model_str in orig_df.columns:
return "Model name already used"
else:
orig_df = None
test_dict_name = 'test'
n_metrics = len(some_dict[test_dict_name].keys())
df = | pd.DataFrame.from_dict(some_dict[test_dict_name]) | pandas.DataFrame.from_dict |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = | tm.makeTimeDataFrame(100064, "S") | pandas._testing.makeTimeDataFrame |
from unittest import TestCase
import pandas as pd
import numpy as np
from moonstone.analysis.statistical_test import (
statistical_test_groups_comparison,
_compute_best_bins_values
)
class TestStatisticalTestFunction(TestCase):
def setUp(self):
self.test_df = pd.Series({
'sample1': 0.72,
'sample2': 1.98,
'sample3': 0.00,
'sample4': 0.00,
'sample5': 4.99
})
def test_mann_whitney_u_groups(self):
metadata_df = pd.DataFrame(
[
['F', 2],
['M', 1],
['F', 1],
['M', 1],
['M', 2]
],
columns=['sex', 'group'],
index=['sample1', 'sample2', 'sample3', 'sample4', 'sample5'],
)
expected_df = pd.DataFrame(
[
[np.nan, 0.374259],
[0.374259, np.nan]
],
columns=[1, 2],
index=[1, 2]
)
matrix = statistical_test_groups_comparison(self.test_df, metadata_df['group'], stat_test='mann_whitney_u')
pd.testing.assert_frame_equal(matrix, expected_df, check_dtype=False)
def test_mann_whitney_u_groups_wrong_metadata(self):
metadata_df = pd.DataFrame(
[
['F', 2],
['M', 1]
],
columns=['sex', 'group'],
index=['sample6', 'sample7'],
)
with self.assertRaises(RuntimeError) as cm:
statistical_test_groups_comparison(self.test_df, metadata_df['group'], stat_test='mann_whitney_u')
the_exception = cm.exception
self.assertEqual(the_exception.__str__(), "All groups have been dropped: not enough observations by group.")
def test_mann_whitney_u_groups_nonsym(self):
metadata_df = pd.DataFrame(
[
['F', 2],
['M', 1],
['F', 1],
['M', 1],
['M', 2]
],
columns=['sex', 'group'],
index=['sample1', 'sample2', 'sample3', 'sample4', 'sample5'],
)
expected_df = pd.DataFrame(
[
[np.nan, 0.374259],
[np.nan, np.nan]
],
columns=[1, 2],
index=[1, 2]
)
matrix = statistical_test_groups_comparison(self.test_df, metadata_df['group'], stat_test='mann_whitney_u',
output='dataframe', sym=False)
| pd.testing.assert_frame_equal(matrix, expected_df, check_dtype=False) | pandas.testing.assert_frame_equal |
import csv
import requests
import pandas as pd
FRED_UNEMPLOY = 'https://www.quandl.com/api/v3/datasets/UNEMPLOY/GDPDEF/data.csv?api_key=<KEY>'
with requests.Session() as s:
download = s.get(FRED_UNEMPLOY)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter = ',')
UNEMPLOY_list = list(cr)
for row in UNEMPLOY_list:
print(row)
UNEMPLOY_list = | pd.DataFrame(UNEMPLOY_list) | pandas.DataFrame |
# Simplified and slightly modularized from:
# https://github.com/LeonardoL87/SARS-CoV-2-Model-with-and-without-temperature-dependence
import pickle
import datetime
import math as m
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
from typing import Callable
from functools import partial
from scipy import interpolate
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
from lmfit import minimize, Parameters, Parameter, report_fit, Model, Minimizer
N_POPS = {'Barcelona': 2.227e6,
'Catalunya': 7780479,
'Lombardia': 10060574,
'Thuringen': 2392000
}
dt = 1 / 24
def T_inv(T, date_T, ini: int):
T = pd.concat([T, T])
T_hat = savgol_filter(T, 51, 3) # window size 51, polynomial order 3
pos = date_T[ini].day + 30 * date_T[ini].month - 30
T_inv = 1 - T / np.mean(T) + 1.5
T_inv = T_inv[pos:len(T_inv) - 1]
T_inv_hat = 1 - T_hat / np.mean(T_hat) + 1.5
T_inv_hat = T_inv_hat[pos:len(T_inv_hat) - 1]
t = np.arange(len(T_inv))
T_inv = T_inv_hat
return [t, T_inv]
def SEIQRDP(t, y, ps, beta_func: Callable, total_pop: int):
"""
Differential equation system. Can be formulated from the ODE system. Returns a column vector
with the system at time t
"""
delta = ps['delta'].value
alpha = ps['alpha0'].value * m.exp(-ps['alpha1'].value * t)
betQ = beta_func(ps['betaQ'].value, t)
bet = beta_func(ps['beta'].value, t)
gamma = ps['gamma'].value
Lambda = ps['lambda0'].value * (1. - m.exp(-ps['lambda1'].value * t))
kappa = ps['kappa0'].value * m.exp(-ps['kappa1'].value * t)
tau = ps['tau0'].value * (1. - m.exp(-ps['tau1'].value * t))
rho = 0
N = total_pop
S, E, I, Q, R, D, P, V = np.clip(y, a_min=0, a_max=None)
beta = (1 / N) * (bet * I + betQ * Q)
e_frac = (1.0 - m.exp(-beta * dt)) # |exposed prob
i_frac = (1.0 - m.exp(-gamma * dt)) # |infection prob
r_frac = (1.0 - m.exp(-Lambda * dt)) # |recovered prob
p_frac = (1.0 - m.exp(-alpha * dt)) # |protected prob
d_frac = (1.0 - m.exp(-kappa * dt)) # |death prob
rel_frac = (1.0 - m.exp(-tau * dt)) # |release prob
rep_frac = (1.0 - m.exp(-delta * dt)) # |detected prob
vac_frac = (1.0 - m.exp(-rho * dt)) # |vaccinated prob
exposed = np.random.binomial(S, e_frac)
protected = np.random.binomial(S, p_frac)
infection = np.random.binomial(E, i_frac)
detected = np.random.binomial(I, rep_frac)
recovery = np.random.binomial(Q, r_frac)
deaths = np.random.binomial(Q, d_frac)
released = np.random.binomial(P, rel_frac)
vaccinated = np.random.binomial(S, vac_frac)
S = S - exposed - protected + released - vaccinated # | Susceptible
E = E + exposed - infection # | Exposed
I = I + infection - detected # | Infected
Q = Q + detected - recovery - deaths # | Detected
R = R + recovery # | Recovered
D = D + deaths # | Deaths
P = P + protected - released # | Protected
V = V + vaccinated # | Total Cases
return [S, E, I, Q, R, D, P, V]
def simulate(t, u, ps, beta_func: Callable, total_pop: int):
"""Returns a matrix with the dynamic of each population"""
S = np.zeros(len(t))
E = np.zeros(len(t))
I = np.zeros(len(t))
Q = np.zeros(len(t))
R = np.zeros(len(t))
D = np.zeros(len(t))
P = np.zeros(len(t))
Y = np.zeros(len(t))
for j in range(len(t)):
u = SEIQRDP(t[j], u, ps, beta_func=beta_func, total_pop=total_pop)
S[j], E[j], I[j], Q[j], R[j], D[j], P[j], Y[j] = u
return {'t': t, 'S': S, 'E': E, 'I': I, 'Q': Q, 'R': R, 'D': D, 'P': P, 'V': Y}
def interpolation(y, t, ti):
"""Single interpolation of 1 vector"""
f = interpolate.interp1d(t, y, kind='nearest')
f2 = f(ti)
return f2
def sys_interpolation(Y, t, ti):
col = Y.columns
datcol = col[1:len(col)]
y_interp = {}
y_interp['t'] = ti
for i in datcol:
yi = Y[str(i)].to_numpy()
f2 = interpolation(yi, t, ti)
y_interp[str(i)] = f2
return y_interp
def COVID_SEIRC(y, t, ps, beta_func: Callable, total_pop: int):
"""Definition of the model in deterministic way for fitting purposes"""
alpha0 = ps['alpha0'].value
alpha1 = ps['alpha1'].value
bet = ps['beta'].value
betQ = ps['betaQ'].value
gam = ps['gamma'].value
delt = ps['delta'].value
lambda0 = ps['lambda0'].value
lambda1 = ps['lambda1'].value
kappa0 = ps['kappa0'].value
kappa1 = ps['kappa1'].value
tau0 = ps['tau0'].value
tau1 = ps['tau1'].value
rho = 0
S, E, I, Q, R, D, C, V = y
alpha = lambda x: alpha0 * m.exp(-alpha1 * x)
beta = partial(beta_func, b=bet)
betaQ = partial(beta_func, b=betQ)
gamma = gam
delta = delt
Lambda = lambda x: lambda0 * (1. - m.exp(-lambda1 * x))
kappa = lambda x: kappa0 * m.exp(-kappa1 * x)
tau = lambda x: tau0 * (1. - m.exp(-tau1 * x))
BETA = (beta(t) * I + betaQ(t) * Q) * 1 / total_pop
# ___________equations___________________________________
dS = tau(t) * C - alpha(t) * S - BETA * S - rho * S
dE = -gamma * E + BETA * S
dI = gamma * E - delta * I
dQ = delta * I - Lambda(t) * Q - kappa(t) * Q
dR = Lambda(t) * Q
dD = kappa(t) * Q
dC = alpha(t) * S - tau(t) * C
dV = rho * S
return dS, dE, dI, dQ, dR, dD, dC, dV
def run_model(beta_type: 'str', location: str, params_path, n_iter: int = 100, seed: int = 23,
keep_top: int = None):
keep_top = n_iter if keep_top is None else keep_top
np.random.seed(seed)
with open(params_path, 'rb') as fh:
ini, time, active, confirmed, recovered, deaths, _, T, date_T, params_set = pickle.load(fh)
tb, beta = T_inv(T, date_T, ini=ini)
total_pop = N_POPS[location]
f = interp1d(tb, beta, kind='cubic')
beta_terms = {'constant': lambda b, x: b,
'seasonal': lambda b, x: b * (1 + np.sin(2 * np.pi * x * 1 / 360)),
'temperature': lambda b, x: b * f(x)
}
beta_func = beta_terms[beta_type]
E0 = active[ini]
I0 = active[ini]
Q0 = active[ini]
R0 = recovered[ini]
D0 = deaths[ini]
P0 = 0
V0 = 0
S0 = total_pop - E0 - I0 - Q0 - R0 - D0 - P0 - V0
outputs = list()
squared_errors = []
for i in tqdm(np.arange(n_iter), desc='Iterations', leave=False):
# ===============SETTING THE ORIGINAL SET OF PARAMETERS===========================
dt = 1 / 24
y0 = [S0, E0, I0, Q0, R0, D0, P0, V0]
tf = len(time)
tl = int(tf / dt)
t = np.linspace(0, tf - 1, tl)
paropt = params_set[i]
params_set.append(paropt)
sir_out = pd.DataFrame(simulate(t, y0, paropt, beta_func, total_pop=total_pop))
ti = np.linspace(t[0], t[len(t) - 1], int(t[len(t) - 1] - t[0]) + 1)
sir_out = pd.DataFrame(sys_interpolation(sir_out, t, ti))
squared_error = (sir_out[['Q', 'R', 'D']].sum(axis=1) - confirmed).pow(2).sum()
squared_errors.append(squared_error)
outputs.append(sir_out)
squared_errors = np.array(squared_errors)
best_fits = np.argsort(squared_errors)[:keep_top]
results = np.array(outputs)[best_fits]
vector = results.mean(axis=0)[:, :-1]
std = results.std(axis=0)[:, :-1]
t, s, e, i, q, r, d, p = std.transpose()
t, S, E, I, Q, R, D, P = vector.transpose()
TC = Q + R + D
tc = q + r + d
results_df = (pd.concat([pd.DataFrame(vector),
| pd.Series(TC) | pandas.Series |
import numpy
import pandas
import scipy
import sklearn.metrics as metrics
from sklearn.model_selection import train_test_split
import statsmodels.api as stats
# The SWEEP Operator
def SWEEPOperator (pDim, inputM, tol):
# pDim: dimension of matrix inputM, positive integer
# inputM: a square and symmetric matrix, numpy array
# tol: singularity tolerance, positive real
aliasParam = []
nonAliasParam = []
A = numpy.array(inputM, copy = True, dtype = numpy.float)
diagA = numpy.diagonal(A)
for k in range(pDim):
akk = A[k,k]
if (akk >= (tol * diagA[k])):
nonAliasParam.append(k)
for i in range(pDim):
if (i != k):
for j in range(pDim):
if (j != k):
A[i,j] = A[i,j] - A[i,k] * (A[k,j] / akk)
A[j,i] = A[i,j]
A[i,k] = A[i,k] / akk
A[k,i] = A[i,k]
A[k,k] = - 1.0 / akk
else:
aliasParam.append(k)
for i in range(pDim):
A[i,k] = 0.0
A[k,i] = 0.0
return A, aliasParam, nonAliasParam
# A function that find the non-aliased columns, fit a logistic model, and return the full parameter estimates
def build_mnlogit (fullX, y):
# Find the non-redundant columns in the design matrix fullX
nFullParam = fullX.shape[1]
XtX = numpy.transpose(fullX).dot(fullX)
invXtX, aliasParam, nonAliasParam = SWEEPOperator(pDim = nFullParam, inputM = XtX, tol = 1e-13)
# Build a multinomial logistic model
X = fullX.iloc[:, list(nonAliasParam)]
logit = stats.MNLogit(y, X)
thisFit = logit.fit(method='ncg', maxiter = 1000, xtol = 1e-8,
full_output = True, disp = True)
thisParameter = thisFit.params
thisLLK = thisFit.llf
# The number of free parameters
y_category = y.cat.categories
nYCat = len(y_category)
thisDF = len(nonAliasParam) * (nYCat - 1)
# Return model statistics
return (thisLLK, thisDF, thisParameter, thisFit)
inputData = pandas.read_csv('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Data\\policy_2001.csv',
delimiter=',',
usecols = ['CLAIM_FLAG', 'CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY', 'MVR_PTS', 'TIF', 'TRAVTIME'])
# Print number of missing values per variable
print('Number of Missing Values:')
print(pandas.Series.sort_index(inputData.isna().sum()))
# Specify CLAIM_FLAG as a categorical variable
inputData['CLAIM_FLAG'] = inputData['CLAIM_FLAG'].astype('category')
y_category = inputData['CLAIM_FLAG'].cat.categories
nYCat = len(y_category)
# Specify CREDIT_SCORE_BAND as a categorical variable
inputData['CREDIT_SCORE_BAND'] = inputData['CREDIT_SCORE_BAND'].astype('category')
# Create Training and Test partitions
policy_train, policy_test = train_test_split(inputData, test_size = 0.33, random_state = 20201014, stratify = inputData['CLAIM_FLAG'])
nObs_train = policy_train.shape[0]
nObs_test = policy_test.shape[0]
# Build the logistic model
y = policy_train['CLAIM_FLAG']
# Train a Logistic Regression model using the Forward Selection method
devianceTable = pandas.DataFrame()
u = pandas.DataFrame()
# Step 0: Intercept only model
u = y.isnull()
designX = pandas.DataFrame(u.where(u, 1)).rename(columns = {'CLAIM_FLAG': 'const'})
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[0, 'Intercept', DF0, LLK0, None, None, None]])
# Consider Model 1 is CLAIM_FLAG = Intercept + <predictor>
predList = ['CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY', 'MVR_PTS', 'TIF', 'TRAVTIME']
step = 1.0
for pred in predList:
step += 0.1
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = pandas.get_dummies(thisVar)
else:
designX = thisVar
designX = stats.add_constant(designX, prepend=True)
LLK1, DF1, fullParams1, thisFit = build_mnlogit (designX, y)
testDev = 2.0 * (LLK1 - LLK0)
testDF = DF1 - DF0
testPValue = scipy.stats.chi2.sf(testDev, testDF)
devianceTable = devianceTable.append([[step, 'Intercept + ' + pred,
DF1, LLK1, testDev, testDF, testPValue]])
# Step 1: Model is CLAIM_FLAG = Intercept + MVR_PTS
designX = policy_train[['MVR_PTS']]
designX = stats.add_constant(designX, prepend=True)
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[1, 'Intercept + MVR_PTS',
DF0, LLK0, None, None, None]])
# Consider Model 2 is CLAIM_FLAG = Intercept + MVR_PTS + <predictor>
predList = ['CREDIT_SCORE_BAND', 'BLUEBOOK_1000', 'CUST_LOYALTY', 'TIF', 'TRAVTIME']
step = 2.0
for pred in predList:
step += 0.1
designX = policy_train[['MVR_PTS']]
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = designX.join(pandas.get_dummies(thisVar))
else:
designX = designX.join(thisVar)
designX = stats.add_constant(designX, prepend=True)
LLK1, DF1, fullParams1, thisFit = build_mnlogit (designX, y)
testDev = 2.0 * (LLK1 - LLK0)
testDF = DF1 - DF0
testPValue = scipy.stats.chi2.sf(testDev, testDF)
devianceTable = devianceTable.append([[step, 'Intercept + MVR_PTS + ' + pred,
DF1, LLK1, testDev, testDF, testPValue]])
# Step 2: Model is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000
designX = policy_train[['MVR_PTS','BLUEBOOK_1000']]
designX = stats.add_constant(designX, prepend=True)
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[2, 'Intercept + MVR_PTS + BLUEBOOK_1000',
DF0, LLK0, None, None, None]])
# Consider Model 2 is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000 + <predictor>
predList = ['CREDIT_SCORE_BAND', 'CUST_LOYALTY', 'TIF', 'TRAVTIME']
step = 3.0
for pred in predList:
step += 0.1
designX = policy_train[['MVR_PTS','BLUEBOOK_1000']]
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = designX.join(pandas.get_dummies(thisVar))
else:
designX = designX.join(thisVar)
designX = stats.add_constant(designX, prepend=True)
LLK1, DF1, fullParams1, thisFit = build_mnlogit (designX, y)
testDev = 2.0 * (LLK1 - LLK0)
testDF = DF1 - DF0
testPValue = scipy.stats.chi2.sf(testDev, testDF)
devianceTable = devianceTable.append([[step, 'Intercept + MVR_PTS + BLUEBOOK_1000 + ' + pred,
DF1, LLK1, testDev, testDF, testPValue]])
# Step 3: Model is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000 + TRAVTIME
designX = policy_train[['MVR_PTS','BLUEBOOK_1000','TRAVTIME']]
designX = stats.add_constant(designX, prepend=True)
LLK0, DF0, fullParams0, thisFit = build_mnlogit (designX, y)
devianceTable = devianceTable.append([[3, 'Intercept + MVR_PTS + BLUEBOOK_1000 + TRAVTIME',
DF0, LLK0, None, None, None]])
# Consider Model 2 is CLAIM_FLAG = Intercept + MVR_PTS + BLUEBOOK_1000 + TRAVTIME + <predictor>
predList = ['CREDIT_SCORE_BAND', 'CUST_LOYALTY', 'TIF']
step = 4.0
for pred in predList:
step += 0.1
designX = policy_train[['MVR_PTS','BLUEBOOK_1000','TRAVTIME']]
thisVar = policy_train[pred]
dType = thisVar.dtypes.name
if (dType == 'category'):
designX = designX.join( | pandas.get_dummies(thisVar) | pandas.get_dummies |
#%%
import argparse
import os
import tempfile
import mlflow
import mlflow.pytorch
import numpy as np
import optuna
import pandas as pd
import torch
import torch.optim as optim
import torch.utils.data as data
import yaml
from dlkit import models
from dlkit.criterions import Criterion
from estimator import Estimator
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from tqdm import tqdm, trange
from utils import geometry, modules
from utils.dataloader import NpLaparoDataset
from utils.transformer import ScaleTransformer
#%%
def config():
parser = argparse.ArgumentParser()
parser.add_argument("ds_num", type=int, help="d`ataset number")
parser.add_argument("model", help="model name")
parser.add_argument("-b", "--batch-size", default=16, type=int, help="batch size")
parser.add_argument("-e", "--n_epochs", default=100, type=int, help="epochs")
parser.add_argument("-d", "--device", default=0, type=int)
parser.add_argument("-try", "--n_trials", default=3, type=int)
parser.add_argument(
"--pretrained",
action="store_true",
help="Use the model that is pretrained by myself",
)
parser.add_argument("-w", "--n_workers", default=4, type=int)
parser.add_argument("-exp", "--exp_name", default="Debug")
parser.add_argument("--a1", default=1.0, type=float, help="Coefficient of l_2d")
parser.add_argument("--a2", default=1.0, type=float, help="Coefficient of l_orient")
parser.add_argument("--a3", default=1.0, type=float, help="Coefficient of l_phi")
parser.add_argument("--a4", default=1.0, type=float, help="Coefficient of l_gamma")
parser.add_argument(
"--optuna", action="store_true", help="Hyperparameter optimization [False]"
)
return parser
cfg = config().parse_args()
writer = None
class Trainer:
"""Train a model."""
def __init__(self, model, exp_name=cfg.exp_name):
"""
Args:
model (nn.Module): Model name.
exp_name (object, optional): Experiment name. Defaults to cfg.exp_name.
"""
self.device = torch.device(
"cuda:{}".format(cfg.device) if torch.cuda.is_available() else "cpu"
)
self.params = ["trans3d", "trans2d", "orient", "roll", "joint"]
self.scaler = ScaleTransformer(cfg.ds_num)
self.model = model
self.model.to(self.device)
self.loss_weights = {
"trans3d": 1.0,
"trans2d": cfg.a1,
"orient": cfg.a2,
"roll": cfg.a3,
"joint": cfg.a4,
}
# TODO: Read from model_config.yaml
# summary(self.model, (3, 224, 224))
torch.backends.cudnn.benchmark = True
mlflow.set_experiment(cfg.exp_name)
def run(self, trial):
"""
Return:
Best epoch loss through one trial
Execute learning with fixed hyper parameters.
"""
min_loss = np.Inf
with mlflow.start_run():
for key, value in vars(cfg).items():
mlflow.log_param(key, value)
lr = self._get_lr(trial)
optimizer = optim.Adam(self.model.parameters(), lr=lr)
criterions = self._get_criterion(trial)
with trange(cfg.n_epochs) as epoch_bar:
for epoch in epoch_bar:
for phase in ["train", "val"]:
epoch_bar.set_description(
"[{}] Epoch {}".format(phase.title().rjust(5), epoch + 1)
)
epoch_loss = self._train(epoch, phase, criterions, optimizer)
epoch_bar.set_postfix(loss=epoch_loss)
# Log weights when the minimum loss is updated
if phase == "val" and epoch_loss < min_loss:
min_loss = epoch_loss
mlflow.pytorch.log_model(self.model, "best_model")
mlflow.log_artifacts(output_dir, artifact_path="best_model")
mlflow.log_metric("best epoch", epoch + 1)
# Save weights
torch.save(model.state_dict(), output_dir + "/weight.pth")
mlflow.pytorch.log_model(self.model, "model")
self._test()
mlflow.log_artifacts(output_dir, artifact_path="model")
return min_loss
def _train(self, epoch, phase, criterions, optimizer=None):
if phase == "train":
self.model.train()
else:
self.model.eval()
sum_train_loss = torch.zeros(6).to(self.device)
sum_val_loss = torch.zeros(6).to(self.device)
sum_loss_dict = {"train": sum_train_loss, "val": sum_val_loss}
epoch_loss_dict = {"train": None, "val": None}
for inputs, targets in dataloader_dict[phase]:
inputs, targets = inputs.to(self.device), targets.to(self.device)
with torch.set_grad_enabled(phase == "train"):
optimizer.zero_grad()
outputs = self.model(inputs)
pred = self._split_param(outputs)
label = self._split_param(targets)
loss = 0.0
for param, criterion in criterions.items():
param_loss = criterion(pred[param], label[param])
loss += self.loss_weights[param] * param_loss
sum_loss_dict[phase][self.params.index(param) + 1] += param_loss
sum_loss_dict[phase][0] += loss
errors = calc_errors(pred, label, self.scaler)
# Update weights
if phase == "train":
loss.backward()
optimizer.step()
# Calculate the loss through one epoch
epoch_loss_dict[phase] = sum_loss_dict[
phase
].detach().cpu().numpy().copy() / len(dataloader_dict[phase])
logparam_list = ["all", *self.params]
for i, param_name in enumerate(logparam_list):
self._log_scalar(
"Loss_{}/{}".format(param_name.title(), phase),
epoch_loss_dict[phase][i],
epoch,
)
return epoch_loss_dict[phase][0]
def _test(self):
print("\n\nStart Testing...\n")
run_uri = mlflow.get_artifact_uri() + "/model"
estimator = Estimator(
mode=2, ds_num=cfg.ds_num, run_uri=run_uri, transform=transform
)
target_df = val_ds.dataframe.drop(columns=["alpha", "beta"])
target_df["z"] = -target_df["z"]
value_list = []
for i in trange(len(target_df)):
im_path = "./Database/ds_{:03d}/val/img_{:05d}.jpg".format(
cfg.ds_num, i + 1
)
im = Image.open(im_path)
value = estimator(im)
value_list.append(value)
columns = ["x", "y", "z", "x_2d", "y_2d", "nx", "ny", "nz", "gamma", "phi"]
pred_df = | pd.DataFrame(value_list, columns=columns) | pandas.DataFrame |
# coding: utf-8
# # Bike Sharing Dataset Linear Modeling
#
# + Based on Bike Sharing dataset from [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset)
# + This notebook is based upon the hourly data file, i.e. hour.csv
# + This notebook showcases linear modeling using linear regression
# ### Problem Statement
# Given the Bike Sharing dataset with hourly level information of bikes along with weather and other attributes, model a system which can predict the bike count.
# ## Import required packages
# In[1]:
get_ipython().magic('matplotlib inline')
# data manuipulation
import numpy as np
import pandas as pd
# modeling utilities
import scipy.stats as stats
from sklearn import metrics
from sklearn import preprocessing
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_predict
# plotting libraries
import matplotlib.pyplot as plt
import seaborn as sn
sn.set_style('whitegrid')
sn.set_context('talk')
params = {'legend.fontsize': 'x-large',
'figure.figsize': (30, 10),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
# ## Load Dataset
# In[2]:
hour_df = pd.read_csv('hour.csv')
print("Shape of dataset::{}".format(hour_df.shape))
# ## Preprocessing
# + Standarize column names
# + Typecast attributes
# + Encode Categoricals using One Hot Encoding
# ### Standarize Column Names
# In[3]:
hour_df.rename(columns={'instant':'rec_id',
'dteday':'datetime',
'holiday':'is_holiday',
'workingday':'is_workingday',
'weathersit':'weather_condition',
'hum':'humidity',
'mnth':'month',
'cnt':'total_count',
'hr':'hour',
'yr':'year'},inplace=True)
# ### Typecast Attributes
# In[4]:
# date time conversion
hour_df['datetime'] = pd.to_datetime(hour_df.datetime)
# categorical variables
hour_df['season'] = hour_df.season.astype('category')
hour_df['is_holiday'] = hour_df.is_holiday.astype('category')
hour_df['weekday'] = hour_df.weekday.astype('category')
hour_df['weather_condition'] = hour_df.weather_condition.astype('category')
hour_df['is_workingday'] = hour_df.is_workingday.astype('category')
hour_df['month'] = hour_df.month.astype('category')
hour_df['year'] = hour_df.year.astype('category')
hour_df['hour'] = hour_df.hour.astype('category')
#
# ### Encode Categoricals (One Hot Encoding)
# In[5]:
def fit_transform_ohe(df,col_name):
"""This function performs one hot encoding for the specified
column.
Args:
df(pandas.DataFrame): the data frame containing the mentioned column name
col_name: the column to be one hot encoded
Returns:
tuple: label_encoder, one_hot_encoder, transformed column as pandas Series
"""
# label encode the column
le = preprocessing.LabelEncoder()
le_labels = le.fit_transform(df[col_name])
df[col_name+'_label'] = le_labels
# one hot encoding
ohe = preprocessing.OneHotEncoder()
feature_arr = ohe.fit_transform(df[[col_name+'_label']]).toarray()
feature_labels = [col_name+'_'+str(cls_label) for cls_label in le.classes_]
features_df = pd.DataFrame(feature_arr, columns=feature_labels)
return le,ohe,features_df
# given label encoder and one hot encoder objects,
# encode attribute to ohe
def transform_ohe(df,le,ohe,col_name):
"""This function performs one hot encoding for the specified
column using the specified encoder objects.
Args:
df(pandas.DataFrame): the data frame containing the mentioned column name
le(Label Encoder): the label encoder object used to fit label encoding
ohe(One Hot Encoder): the onen hot encoder object used to fit one hot encoding
col_name: the column to be one hot encoded
Returns:
tuple: transformed column as pandas Series
"""
# label encode
col_labels = le.transform(df[col_name])
df[col_name+'_label'] = col_labels
# ohe
feature_arr = ohe.fit_transform(df[[col_name+'_label']]).toarray()
feature_labels = [col_name+'_'+str(cls_label) for cls_label in le.classes_]
features_df = | pd.DataFrame(feature_arr, columns=feature_labels) | pandas.DataFrame |
import pandas as pd
def comparacao(a, b):
if (a == b):
return 'a linha E igual'
else:
return 'a linha NAO e igual'
df = | pd.read_csv('arq.csv', ';', header=0, usecols=["Titulo", "titulo"]) | pandas.read_csv |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
# Copyright 2020 The Many Constraints Neurips 2020 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Cross-group ranking fairness experiments with per-query constraints."""
import math
import random
import sys
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_constrained_optimization as tfco
FLAGS = flags.FLAGS
flags.DEFINE_string('save_to_dir', 'tmp/', 'save the output to the path')
flags.DEFINE_string('input',
'https://www.microsoft.com/en-us/research/project/mslr/',
'path to unzipped MSLR-WEB10K input file')
flags.DEFINE_string('prefix', 'sim', 'identicating prefix of saved files')
flags.DEFINE_float('learning_rate', 0.05, 'learning rate')
flags.DEFINE_integer('loops', 100, 'num of loops')
flags.DEFINE_integer('train_size', 1000, 'num of training queries')
flags.DEFINE_integer('microloops', 1000, 'num of microloops within a loop')
flags.DEFINE_string('optimizer', 'adagrad', 'optimizer')
flags.DEFINE_string('constraint_type', 'cross_group_equal_opportunity',
'constraint_type')
flags.DEFINE_integer('id', 0, 'variable for manual parallelism')
flags.DEFINE_string('type', 'unc', 'unc, tfco or new')
def pair_pos_neg_docs(data, max_num_pairs=10000, max_query_bandwidth=20):
"""Returns pairs of positive-negative docs from given DataFrame."""
# Include a row number
data.insert(0, 'tmp_row_id', list(range(data.shape[0])))
# Separate pos and neg docs.
pos_docs = data[data.label == 1]
if pos_docs.empty:
return
neg_docs = data[data.label == 0]
if neg_docs.empty:
return
# Include a merge key.
pos_docs.insert(0, 'merge_key', 0)
neg_docs.insert(0, 'merge_key', 0)
# Merge docs and drop merge key column.
pairs = pos_docs.merge(
neg_docs, on='merge_key', how='outer', suffixes=('_pos', '_neg'))
pairs = pairs[np.abs(pairs['tmp_row_id_pos'] -
pairs['tmp_row_id_neg']) <= max_query_bandwidth]
pairs.drop(
columns=['merge_key', 'tmp_row_id_pos', 'tmp_row_id_neg'], inplace=True)
if pairs.shape[0] > max_num_pairs:
pairs = pairs.sample(n=max_num_pairs, axis=0, random_state=543210)
return pairs
def convert_labeled_to_paired_data(data_dict,
index=None,
max_num_pairs=200,
max_query_bandwidth=200):
"""Convert data arrays to pandas DataFrame with required column names."""
features = data_dict['features']
labels = data_dict['labels']
groups = data_dict['groups']
queries = data_dict['queries']
if index is not None:
data_df = pd.DataFrame(features[queries == index, :])
data_df = data_df.assign(label=pd.DataFrame(labels[queries == index]))
data_df = data_df.assign(group=pd.DataFrame(groups[queries == index]))
data_df = data_df.assign(query_id=pd.DataFrame(queries[queries == index]))
else:
data_df = pd.DataFrame(features)
data_df = data_df.assign(label= | pd.DataFrame(labels) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
import itertools
def dados_teses():
diretorio = "/media/hdvm02/bd/007/002/007/002"
teses_anos = sorted(os.listdir(diretorio))
lista_dfs = []
for tese_ano in teses_anos:
csv = os.path.join(diretorio,tese_ano)
teses = pd.read_csv(csv, sep=";", encoding='latin-1', on_bad_lines='skip', low_memory=False)
lista_dfs.append(teses)
print(tese_ano)
#print(teses.columns.tolist())
#print(teses.shape)
print(teses.dtypes)
#print(teses.tail())
#print(teses.describe())
print("########")
print("########")
#dataset_teses = pd.concat(lista_dfs, ignore_index=True)
#dataset_teses.to_csv(f'{diretorio}/01_dados_1987_2012.csv', index=False)
#print(dataset_teses)
#return dataset_teses
def analisa_teses():
diretorio = "/media/hdvm02/bd/007/002/007/002"
#df_teses = dados_teses()
teses = pd.read_csv(f'{diretorio}/01_dados_1987_2012.csv')
busca = teses[teses["TituloTese"].str.contains("mercosul|mercosur", case=False, na=False)]
#print(busca)
agrupar_por_ano = busca["TituloTese"].groupby(busca["AnoBase"])
busca_por_ano = agrupar_por_ano.count()
#print(busca_por_ano)
quant_termos = busca_por_ano.values.tolist()
anos = busca_por_ano.index.tolist()
fig_pandas = busca_por_ano.plot(kind="line", x=anos, y=quant_termos)
print(fig_pandas)
def gera_dataframes():
diretorio = "/media/hdvm02/bd/007/002/007/002"
anos = sorted(os.listdir(diretorio))
anos_1987_2012 = anos[1:27]
anos_2013_2016 = anos[27:31]
anos_2017_2021 = anos[31:]
df_1987_2012 = []
df_2013_2016 = []
df_2017_2021 = []
df_geral = []
for ano_1987, ano_2013, ano_2017 in itertools.zip_longest(anos_1987_2012, anos_2013_2016, anos_2017_2021):
csv_1987 = os.path.join(diretorio, ano_1987)
teses = | pd.read_csv(csv_1987, sep=";", encoding='latin-1', on_bad_lines='skip', low_memory=False) | pandas.read_csv |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with | tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False) | pandas._testing.assert_produces_warning |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([ | pd.offsets.QuarterEnd(n=1, startingMonth=12) | pandas.offsets.QuarterEnd |
import pandas as pd
from product.anaiproduct import AnAIProduct
from datetime import timedelta
import pytz
from tqdm import tqdm
pd.options.mode.chained_assignment = None
from modeler.modeler import Modeler as m
from datetime import datetime, timedelta, timezone
import numpy as np
import math
import pickle
from sklearn.preprocessing import OneHotEncoder
class StockCategory(AnAIProduct):
def __init__(self,params):
super().__init__("stock_category",
{"market":{"preload":True,"tables":{"prices":pd.DataFrame([{}]),"sp500": | pd.DataFrame([{}]) | pandas.DataFrame |
import os
import json
import pandas as pd
import numpy as np
from scipy.cluster.hierarchy import linkage, leaves_list
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.WorkspaceClient import Workspace
def get_statistics(df_metadata, result, wgs_dict, dist_col="containment_index",upa_name=None):
'''
Inputs:
- df_metadata:
- result:
- wgs_dict:
- upa_name (optional):
Returns:
'''
df_metadata.fillna("", inplace=True)
df_id_col = "assembly_id" #
row_id_col = "assembly_id" #
columns = df_metadata.columns
stats = []
dist_dict = {}
for row_i, row in enumerate(result):
curr = {}
row_id = row[row_id_col]
# if the id for the row is in the "wgs_accession" namespace, we map it to the "assembly_id" in wgs_dict
if row_id in wgs_dict:
row_id = wgs_dict[row_id]
df_row = df_metadata[df_metadata[df_id_col] == row_id]
df_row = df_row.reset_index()
if len(df_row) > 1:
raise ValueError("should only have 1 assembly id row in the dataframe.")
for key, value in row.items():
curr[key.strip().lower().replace(' ','_').replace('-', '_')] = value
for col in columns:
try:
curr[col.strip().lower().replace(' ','_').replace('-', '_')] = df_row.loc[0, col]
except KeyError as err:
raise ValueError(f"on iter {row_i} could not get col {col} from {df_row}. \nrow = {row}....\n row_id = {row[row_id_col]}")
dist_dict[df_row.loc[0,'assembly_id']] = round(row[dist_col], 3)
if upa_name:
curr['input_name'] = upa_name
stats.append(curr)
return stats, dist_dict
def create_tree(df, tree_cols, dist_dict, source_order=None):
'''
Inputs:
- df:
- tree_cols:
- dist_dict:
- source_order:
Returns:
'''
tree = []
if len(tree_cols) == 0:
return tree
col = tree_cols[0]
type_count = df[col].value_counts().to_dict()
for t in type_count:
count = "({})".format(type_count[t])
leaf = create_tree(df[df[col]==t], tree_cols[1:], dist_dict, source_order=source_order)
if leaf == []:
if len(tree_cols) == 1:
dist = dist_dict[t]
else:
dist = ""
truncated_name = df[df[col]==t].iloc[0]['assembly_id']
tree.append({
'truncated_name': str(truncated_name),
'name': t,
'count': "({})".format(1),
})
tree[-1]['dist'] = dist
else:
tree.append({
'truncated_name': t,
'count': count,
'children': leaf
})
if source_order!=None:
sources = []
# if leaf == []:
# d = df[df[col]==t][['upa','']]
# upas = d['upa'].tolist()
# for upa in upas:
# d[d['upa']==upa][''].tolist()
# else:
source_count = df[df[col]==t]['upa'].value_counts().to_dict()
for i, s in enumerate(source_order):
if s in source_count:
sources.append(source_count[s])
else:
sources.append(0)
tree[-1]['sources'] = sources
tree[-1]['count'] = "({})".format(sum(sources))
return tree
def get_locations(stats, markers, upa_name):
'''
Inputs:
- stats:
- markers:
Returns:
'''
for s in stats:
id_ = s["assembly_id"]
if id_ in markers:
markers[id_]['inputs'].append(upa_name)
else:
# Must have a physical location to go on map
# so here we filter for latitude and longitude variables
if not pd.isnull(s['latitude']) and not pd.isnull(s['longitude']) \
and s['latitude'] != "" and s['longitude'] != "":
markers[id_] = {
'name': s['sample_name'],
'lat': s['latitude'],
'lng': s['longitude'],
'details': "Collection date: %s <br>Analysis completed: %s <br>Geo-location: %s"%(
s["collection_date"], s["analysis_completed"], s["geo_loc_name"]
),
'inputs': [upa_name]
}
return markers
def get_upa_name(ws_url, cb_url, upa, is_test):
'''
'''
if is_test:
return "test_object"
ws = Workspace(ws_url)
objs = ws.get_object_info3({
'objects': [{'ref':upa}]
})
upa_names = [info[1] for info in objs['infos']]
if len(upa_names) > 0:
return upa_names[0]
dfu = DataFileUtil(cb_url)
objs = dfu.get_objects({'object_refs':[upa]})['data']
upa_names = [obj['info'][1] for obj in objs]
if len(upa_names) > 0:
return upa_names[0]
else:
raise ValueError("Could not find name of workspace object with id %s"%upa)
def remap_sources(sources, upa_order):
'''
'''
new_sources = {}
for j, i in enumerate(upa_order):
val = sources[i]
if val !=0 and val != []:
new_sources[j] = val
return new_sources
def rewind_tree(tree, upa_order):
'''
'''
for t_ix, t in enumerate(tree['children']):
t['sources'] = remap_sources(t['sources'], upa_order)
if t.get('children'):
t = rewind_tree(t, upa_order)
tree['children'][t_ix] = t
return tree
def unwind_tree(X, tree):
'''
'''
if tree.get('children'):
for t in tree['children']:
if 'compl' in t:
X.append(np.array([len(mag_ids) for mag_ids in t['sources']]))
else:
X.append(np.array(t['sources']))
X = unwind_tree(X, t)
return X
def get_source_order(tree, upa_names):
'''
'''
X = unwind_tree([tree['sources']], tree)
X = np.transpose(np.array(X))
z = linkage(X, 'ward')
return leaves_list(z)
def format_results(ws_url, cb_url, results, is_test=False):
'''
Inputs:
- results:
Returns:
-
'''
curr_dir = os.path.dirname(os.path.realpath(__file__))
metadata_path = os.path.join(curr_dir, 'data', 'ebi_samples_metadata_with_studies_final_with_cols.csv')
df_metadata = pd.read_csv(metadata_path)
tree_cols = ["category_"+str(i) for i in range(1,6)] + ["assembly_id"]
wgs_to_assembly_id_path = os.path.join(curr_dir, 'data', 'wgs_accession_to_assembly_id_accession.json')
with open(wgs_to_assembly_id_path) as fd:
wgs_dict = json.load(fd)
df_metadata = df_metadata.fillna({col:"Unknown" for col in tree_cols})
upas, upa_names, stats, all_df, markers, dist_dict = [], [], [], [], {}, {}
# biosample,sample-alias,sample-desc,biome_id,sample_id,assembly_id,study-name
id_col = "assembly_id" # put in the ID column
df_id_col = "assembly_id" #Id columns for dataframe
for upa, result in results.items():
upa_name = get_upa_name(ws_url, cb_url, upa, is_test)
upa_names.append(upa_name)
upas.append(upa)
curr_stats, curr_dist_dict = get_statistics(df_metadata, result, wgs_dict, upa_name=upa_name)
markers = get_locations(curr_stats, markers, upa_name)
stats+=curr_stats
for key, val in curr_dist_dict.items():
dist_dict[key]=val
curr_df = df_metadata[df_metadata[df_id_col].isin([s[id_col] for s in curr_stats])]
curr_df.loc[:,'upa'] = upa
all_df.append(curr_df)
markers = [value for key, value in markers.items()]
df = | pd.concat(all_df) | pandas.concat |
from scipy.spatial import distance
import numpy as np
import pandas as pd
import scipy.stats
from utils import *
def alignstrategy(str1,str2,flag):
str1t = strpreprocess(str1,'intlist')
str2t = strpreprocess(str2,'intlist')
str1b = strpreprocess(str1,'bytelist')
str2b = strpreprocess(str2,'bytelist')
if flag == 'backzero':
while(len(str1t)>len(str2t)):
str2t.append(0)
while(len(str2t)>len(str1t)):
str1t.append(0)
if flag == 'NWpure':
from NW import NW
nw = NW(formula = 'int(i==j)', punish = 0)
_,str1b,str2b = nw.execute(str1b,str2b,1)
str1t = [int(str1b[i],16) if str1b[i] != '_' else 0 for i in range(len(str1b))]
str2t = [int(str2b[i],16) if str2b[i] != '_' else 0 for i in range(len(str2b))]
if flag == 'NWamend':
from NW import NW
nw = NW()
_,str1b,str2b = nw.execute(str1b,str2b,1)
str1t = [int(str1b[i],16) if str1b[i] != '_' else 0 for i in range(len(str1b))]
str2t = [int(str2b[i],16) if str2b[i] != '_' else 0 for i in range(len(str2b))]
return str1t,str2t
def DisBraycurtis(str1, str2, flag='backzero'):
##small is best
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.braycurtis(str1t,str2t)
return dis
def DisCanberra(str1,str2, flag='backzero'):
##small is best
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.canberra(str1t,str2t)
return dis
def DisMinkowski(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.wminkowski(str1t,str2t,2,np.ones(len(str1t)))
return dis
def DisChebyshev(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.chebyshev(str1t,str2t)
return dis
def DisCityblock(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.cityblock(str1t,str2t)
return dis
def DisSqeuclidean(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.sqeuclidean(str1t,str2t)
return dis
def DisCosine(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.cosine(str1t,str2t)
return dis
def DisCorrelation(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.correlation(str1t,str2t)
return dis
def DisDice(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.dice(str1t,str2t)
return dis
def DisJaccard(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.jaccard(str1t,str2t)
return dis
def DisKulsinski(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.kulsinski(str1t,str2t)
return dis
def DisSokalmichener(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.sokalmichener(str1t,str2t)
return dis
def DisSokalsneath(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.sokalsneath(str1t,str2t)
return dis
def DisYule(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.yule(str1t,str2t)
return dis
def DisRussellrao(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.russellrao(str1t,str2t)
return dis
def DisHamming(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
dis = distance.hamming(str1t,str2t)
return dis
def DisLevenshtein(str1,str2, flag='backzero'):
str1t = strpreprocess(str1,'str')
str2t = strpreprocess(str2,'str')
import Levenshtein
dis = Levenshtein.distance(str1t,str2t)
return dis
def DisKendall(str1,str2, flag='backzero'):
str1t,str2t = alignstrategy(str1,str2,flag)
str1t = pd.Series(str1t)
str2t = | pd.Series(str2t) | pandas.Series |
# -*- coding: utf-8 -*-
###########################################################################
# we have searched for keywords in the original news
# for stemmed keywords in the stemmed news
# for lemmatized keywords int the lemmatized news
# now, want to merge all the results to see whats happening
###########################################################################
import pandas as pd
import numpy as np
from functions import add_stem
newsid_synonyms_origin=pd.read_csv('output/file1_keywords_original_keywords.csv') #input: output of solr_indexing_data
print(len(newsid_synonyms_origin))
#287199
newsid_synonyms_stem=pd.read_csv('output/file1_keywords_stemmed_keywords.csv') #input: output of solr_indexing_data
print(len(newsid_synonyms_stem))
#639888
newsid_synonyms_lemma=pd.read_csv('output/file1_keywords_lemmatized_keywords.csv') # input: output of solr_indexing_data
print(len(newsid_synonyms_lemma))
#484864
newsid_synonyms=newsid_synonyms_origin.copy()
newsid_synonyms=newsid_synonyms.append(newsid_synonyms_stem)
newsid_synonyms=newsid_synonyms.append(newsid_synonyms_lemma)
newsid_synonyms=newsid_synonyms.drop_duplicates()
print(len(newsid_synonyms))
#514806
newsid_synonyms.rename(index=str, columns={"disease_query":"synonyms"}, inplace=True)
####################################################
selected_news_origin=pd.read_csv('output/file1output_origin_news.csv') #input: output of solr_indexing_data
print(len(selected_news_origin))
#81003
selected_news_stem=pd.read_csv('output/file1output_stemmed_news.csv') #input: output of solr_indexing_data
print(len(selected_news_stem))
#104489
selected_news_lemma=pd.read_csv('output/file1output_lemmatized_news.csv') #input: output of solr_indexing_data
selected_news_lemma.columns
print(len(selected_news_lemma))
#94512
selected_news=selected_news_origin.copy()
selected_news=selected_news.append(selected_news_stem, sort=True)
selected_news=selected_news.append(selected_news_lemma, sort=True)
selected_news.drop('text', axis=1, inplace=True)
selected_news=selected_news.drop_duplicates()
print(len(selected_news))
#104912
####
#add the text of the news from original text
# read the original news
news=pd.read_excel("output/selected_news_original_with_doc_idx.xlsx") # output of before solr program
print(len(news))
#199230
temp_selected_news= pd.merge(selected_news,news[["text", "doc_idx"]], how="left", on='doc_idx')
temp_selected_news.sort_values(by=['doc_idx'], inplace=True)
print(len(temp_selected_news))
#104912
selected_news=temp_selected_news.copy()
print(len(selected_news))
#104912
#Did you see any changes in the number of News selected before and after “lemmatizing and stemming” ?
origin_doc_idx=selected_news_origin[['doc_idx']].drop_duplicates()
stem_doc_idx=selected_news_stem[['doc_idx']].drop_duplicates()
lemma_doc_idx=selected_news_lemma[['doc_idx']].drop_duplicates()
print(len(origin_doc_idx))
#81003
print(len(stem_doc_idx))
#104489
print(len(lemma_doc_idx))
#94512
#########################################################################
## From selected news exclude those with the given tags
#tags_to_delete=pd.read_excel("input/list of health related tags_to_delete.xlsx")
#print(len(tags_to_delete))
##8
#selected_news_final=selected_news[~selected_news["trimmed_tag"].isin(tags_to_delete["Tags"])]
#print(len(selected_news_final))
##82091
##########################################################################
# Add category to each assigned synonyms
mesh_keywords1=pd.read_excel('input/public_health_mesh_keywords_with_synonyms_sep052018.xlsx') # input: given by maryam
mesh_keywords1.rename(index=str, columns={"mesh_synonym":"synonyms","mesh_keyword":"category"}, inplace=True)
mesh_keywords1.columns
mesh_keywords2=add_stem(mesh_keywords1, "synonyms")
mesh_keywords2.columns
mesh_keywords2.rename(index=str, columns={"mesh_synonym":"synonyms","mesh_keyword":"category"}, inplace=True)
temp_mesh_stem=mesh_keywords2[['category','stem_synonyms']]
temp_mesh_lemma=mesh_keywords2[['category','lemma_synonyms']]
temp_mesh_stem.rename(index=str, columns={"stem_synonyms":"synonyms"}, inplace=True)
temp_mesh_lemma.rename(index=str, columns={"lemma_synonyms":"synonyms"}, inplace=True)
mesh_keywords=mesh_keywords2[['synonyms','category']]
mesh_keywords=mesh_keywords.append(temp_mesh_stem, sort=True)
mesh_keywords=mesh_keywords.append(temp_mesh_lemma, sort=True)
mesh_keywords=mesh_keywords.drop_duplicates()
print(len(mesh_keywords))
#1900
####################################################################################
newsid_synonyms_categories=pd.merge(newsid_synonyms,mesh_keywords[['synonyms', 'category']],how='left',on=["synonyms"])
print(len(newsid_synonyms_categories))
#516309
# Merge some categories together and create new column (upcategory)
smoking=['smoking', 'smoking cessation','smoking prevention',
'tobacco industry', 'tobacco products', 'tobacco smoke pollution',
'tobacco use disorder','nicotine']
risk=['risk assessment', 'risk factors','risk-taking','risk']
health_behavior=['health behavior','health promotion','attitude to health', 'health knowledge, attitudes, practice']
occupational=['occupational diseases', 'occupational exposure']
mortality=['mortality','cause of death']
psychology=['stress, psychological','psychology']
Air_pollution=['air pollution', 'air pollution, indoor','environmental pollutants',
'air pollutants','isolation & purification']
newsid_synonyms_categories['upcategory']=newsid_synonyms_categories['category']
newsid_synonyms_categories['upcategory']=np.where(newsid_synonyms_categories['category'].isin(smoking),'Smoking_tobaccoproducts_industry_prevention',newsid_synonyms_categories['upcategory'])
b=newsid_synonyms_categories[newsid_synonyms_categories['upcategory']=='Smoking_tobaccoproducts_industry_prevention']
newsid_synonyms_categories['upcategory']=np.where(newsid_synonyms_categories['category'].isin(risk),'risk',newsid_synonyms_categories['upcategory'])
b=newsid_synonyms_categories[newsid_synonyms_categories['upcategory']=='risk']
newsid_synonyms_categories['upcategory']=np.where(newsid_synonyms_categories['category'].isin(health_behavior),'health behavior_attitude',newsid_synonyms_categories['upcategory'])
b=newsid_synonyms_categories[newsid_synonyms_categories['upcategory']=='health behavior_attitude']
newsid_synonyms_categories['upcategory']=np.where(newsid_synonyms_categories['category'].isin(occupational),'occupational',newsid_synonyms_categories['upcategory'])
newsid_synonyms_categories['upcategory']=np.where(newsid_synonyms_categories['category'].isin(mortality),'mortality',newsid_synonyms_categories['upcategory'])
newsid_synonyms_categories['upcategory']=np.where(newsid_synonyms_categories['category'].isin(psychology),'psychology',newsid_synonyms_categories['upcategory'])
newsid_synonyms_categories['upcategory']=np.where(newsid_synonyms_categories['category'].isin(Air_pollution),'air_pollution',newsid_synonyms_categories['upcategory'])
### verification
temp_categories=newsid_synonyms_categories[['upcategory','category']].drop_duplicates().sort_values(by='upcategory')
##########
selected_news_with_categories=pd.merge(selected_news_final,newsid_synonyms_categories,how='left',on='doc_idx')
print(len(selected_news_with_categories))
#442701
temp_mesh=mesh_keywords['category'].unique()
print(len(temp_mesh))
#100
newsid_synonyms.columns
print(len(newsid_synonyms))
#514806
newsid_synonyms.drop_duplicates(inplace=True)
print(len(newsid_synonyms))
#514806
newsid_synonyms_categories.drop_duplicates(inplace=True)
print(len(newsid_synonyms_categories))
#516309
selected_news_with_categories2=selected_news_with_categories[[ 'doc_idx', 'text', 'year', 'synonyms','upcategory']]
selected_news_with_categories2.drop_duplicates(inplace=True)
print(len(selected_news_with_categories2))
# 442701 for category
# 442389 for upcategory
selected_news_with_categories2.to_excel("output/after_solr_all_selected_news_with_synonyms_and_upcategories.xlsx")
selected_news_with_categories_2=selected_news_with_categories[[ 'doc_idx', 'text', 'year', 'synonyms','category']]
selected_news_with_categories_2.drop_duplicates(inplace=True)
print(len(selected_news_with_categories_2))
# 442701 for category
selected_news_with_categories_2.to_excel("output/after_solr_all_selected_news_with_synonyms_and_categories.xlsx")
######################################################################
# add message header and trimmed_tag to news
print(len(selected_news_with_categories2))
#442701
selected_news_with_categories2.columns
news_with_tags= | pd.merge(selected_news_with_categories2,news[["message_header","trimmed_tag","doc_idx"]], how="left", on='doc_idx') | pandas.merge |
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def plot_feature_importance(X, Y):
clf = DecisionTreeClassifier()
clf.fit(X, Y)
features = X.columns.values
importances = clf.feature_importances_
df_plot = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.colors as mcolors
import numpy as np
import folium
import difflib
import geopandas as gpd
import unicodedata
#function to remove accents from states and municipios
def remove_accents(input_str):
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
#PROCESS SUI INPUT DATA, SAVE AS CSVs
def load_sui_data():
folder = 'sui_database_in'
files = os.listdir(folder)
u_codes = pd.read_excel('Listado de Agentes.xls')
u_codes['Des Agente'] = [remove_accents(i) for i in list(u_codes['Des Agente'])]
u_codes = u_codes.loc[u_codes['Estado'] == 'OPERACION']
sorter = ['DISTRIBUCIÓN','COMERCIALIZACIÓN','GENERACIÓN','TRANSPORTE']
u_codes['Actividad'] = u_codes['Actividad'].astype('category')
u_codes['Actividad'].cat.set_categories(sorter, inplace=True)
u_codes = u_codes.sort_values(['Actividad'])
u_codes = u_codes.drop_duplicates(subset = ['Des Agente'], keep = 'first')
code_dict = pd.Series(u_codes['Código'].values, index=u_codes['Des Agente']).to_dict()
def match_empresa_to_code(row):
"""Accents, spaces, or other differences in the long Empresa names can throw off mapping.
This function finds the closest string (slow), and just replaces it with a 4 digit acronym used by XM
"""
empresa_str_in = row['Empresa']
if empresa_str_in in code_dict.keys():
return code_dict[empresa_str_in]
else:
empresa_str_matches = difflib.get_close_matches(empresa_str_in, list(code_dict.keys()), cutoff = 0.8, n = 1)
if len(empresa_str_matches) > 0:
return code_dict[empresa_str_matches[0]]
else:
print(empresa_str_in, 'no matches!')
return empresa_str_in
good_names = ['Departamento', 'Municipio', 'Empresa', 'Variable Calculada',
'Estrato 1', 'Estrato 2', 'Estrato 3', 'Estrato 4', 'Estrato 5',
'Estrato 6', 'Total Residencial', 'Industrial', 'Comercial', 'Oficial',
'Otros', 'Total No Residencial']
out_files = {}
for f in files:
print(f)
in_f = pd.read_csv(os.path.join(folder, f), encoding = 'latin',
names = good_names + ['bogus_1','bogus_2'], skiprows = 1)
in_f.fillna('missing', inplace = True)
#there are some bad rows in the csv for Bogota, where there is an uneven number of columns, this block deals with that
in_f_error = in_f[in_f['bogus_1'] != 'missing']
in_f_error = in_f_error[in_f_error.columns[2:]]
in_f_error.columns = good_names
in_f_good = in_f[in_f ['bogus_1'] == 'missing'].drop(['bogus_1','bogus_2'], axis = 'columns')
in_f = pd.concat([in_f_error, in_f_good], axis = 'rows')
in_f = in_f.replace('missing',None)
in_f = in_f.dropna(axis = 'index', how = 'any')
in_f['year'] = in_f.loc[in_f['Departamento'] == 'Año', 'Municipio'].item()
in_f['land_type'] = in_f.loc[in_f['Departamento'] == 'Ubicación', 'Municipio'].item() #keep year as a string so groupbys dont add it
data_type = in_f.loc[in_f['Departamento'] == 'Reporte a Consultar', 'Municipio'].item()
in_f['data_type'] = data_type
out_f = in_f.loc[~in_f['Departamento'].isin(['Año','Período','Ubicación','Reporte a Consultar'])]
out_f['Empresa'] = [remove_accents(i) for i in list(out_f['Empresa'])]
out_f['Departamento'] = [remove_accents(i) for i in list(out_f['Departamento'])]
out_f['Municipio'] = [remove_accents(i) for i in list(out_f['Municipio'])]
out_f['Empresa'] = out_f.apply(match_empresa_to_code, axis = 1)
#convert string columns to int if missing values (originally coded as 'ND') are present
for c in out_f.columns:
if 'ND' in list(in_f[c]):
out_f[c] = out_f[c].replace('ND', 0)
out_f[c] = pd.to_numeric(out_f[c], errors='ignore')
if data_type not in out_files:
out_files[data_type] = out_f
else:
out_files[data_type] = pd.concat([out_f, out_files[data_type]])
out_files['Consumo'].to_csv('sui_database_out/consumption_kwh.csv')
out_files['Factura Promedio'].to_csv('sui_database_out/average_bill.csv')
out_files['Consumo Promedio'].to_csv('sui_database_out/average_consumption.csv')
out_files['Tarifa Media'].to_csv('sui_database_out/average_rate.csv')
out_files['Total Facturado'].to_csv('sui_database_out/total_billed.csv')
out_files['Suscriptores'].to_csv('sui_database_out/subscribers.csv')
out_files['Valor Consumo'].to_csv('sui_database_out/consumption_cost.csv')
return out_files
def load_sui_data_from_csv():
out_files = {}
out_files['Consumo'] = pd.read_csv('sui_database_out/consumption_kwh.csv')
out_files['Factura Promedio'] = pd.read_csv('sui_database_out/average_bill.csv')
out_files['Consumo Promedio']= pd.read_csv('sui_database_out/average_consumption.csv')
out_files['Tarifa Media'] = pd.read_csv('sui_database_out/average_rate.csv')
out_files['Total Facturado'] = pd.read_csv('sui_database_out/total_billed.csv')
out_files['Suscriptores'] = pd.read_csv('sui_database_out/subscribers.csv')
out_files['Valor Consumo'] = pd.read_csv('sui_database_out/consumption_cost.csv')
return out_files
#%%
#function to convert a complete long-df (with different entries for state, municipality, rural/urban, company)
#into one row per municipality (and state in case different municipalities have the same name)
def data_grouper(df, level = 'Municipio', land_type = 'Total', func = 'sum'):
"""
Funciton to group data by Departamento or Municipio and perform an aggregating function.
To Do
-----
-If 'func' is 'mean', then mean should be weighted for each Municipio based on total consumption. (i.e. two utilities in one Municipio with very different charachteristics and sizes)
"""
df = df.loc[df['land_type'] == land_type]
df = df.replace(0, np.nan)
if level == 'Municipio':
df_out = df.groupby(['Departamento','Municipio'], as_index = False).agg(func)
elif level == 'Departamento':
df_out = df.groupby('Departamento', as_index = False).agg(func)
try:
df_out = df_out.drop(['Unnamed: 0','year'], axis = 'columns')
except KeyError:
df_out = df_out
return df_out
#%%
#READ IN GEOJSONS AND CLEAN UP
muni_shape_in = gpd.read_file('reference_data/colombia-municipios.geojson', encoding = 'latin')
muni_shape_in.rename({'NOMBRE_DPT':'Departamento','NOMBRE_MPI':'Municipio'}, axis = 'columns', inplace = True)
muni_shape_in['Departamento'] = [remove_accents(i) for i in list(muni_shape_in['Departamento'])]
muni_shape_in['Departamento'] = muni_shape_in['Departamento'].replace('SANTAFE DE BOGOTA D.C', 'BOGOTA')
muni_shape_in['Municipio'] = muni_shape_in['Municipio'].replace('SANTAFE DE BOGOTA D.C.', ' D.C.')
muni_shape_in['Municipio'] = [remove_accents(i) for i in list(muni_shape_in['Municipio'])]
muni_shape_in.crs = {'init':'epsg:4326'}
muni_vs_shape_in = gpd.read_file('reference_data/colombia-municipios-very-simplified.json') #very simple
muni_vs_shape_in.rename({'NOMBRE_DPT':'Departamento','NOMBRE_MPI':'Municipio'}, axis = 'columns', inplace = True)
muni_vs_shape_in['Departamento'] = [remove_accents(i) for i in list(muni_vs_shape_in['Departamento'])]
muni_vs_shape_in['Departamento'] = muni_vs_shape_in['Departamento'].replace('SANTAFE DE BOGOTA D.C', 'BOGOTA')
muni_vs_shape_in['Municipio'] = muni_vs_shape_in['Municipio'].replace('SANTAFE DE BOGOTA D.C.', ' D.C.')
muni_vs_shape_in['Municipio'] = [remove_accents(i) for i in list(muni_vs_shape_in['Municipio'])]
muni_vs_shape_in.crs = {'init':'epsg:4326'}
#merging
dept_shape_in = gpd.read_file('reference_data/colombia-departamento-simplified.json', encoding = 'latin')
dept_shape_in.rename({'NOMBRE_DPT':'Departamento','NOMBRE_MPI':'Municipio'}, axis = 'columns', inplace = True)
dept_shape_in['Departamento'] = dept_shape_in['Departamento'].replace('SANTAFE DE BOGOTA D.C', 'BOGOTA')
dept_shape_in.crs = {'init':'epsg:4326'}
#functions to merge data df with shape gdf
def muni_shape_merger(df):
out = muni_shape_in.merge(df, on = ['Departamento','Municipio'], how = 'inner')
out.fillna(0, inplace = True)
return out
def muni_vs_shape_merger(df):
out = muni_vs_shape_in[['Departamento','Municipio','geometry']].merge(df, on = ['Departamento','Municipio'], how = 'right')
return out
def dept_shape_merger(df):
out = dept_shape_in.merge(df, on = ['Departamento'], how = 'outer')
out.fillna(0, inplace = True)
return out
def hexcolormapper(df, column, colorscale):
bins = np.percentile(df[column], np.linspace(0,100,15))
binned = np.digitize(df[column], bins)
norm = matplotlib.colors.Normalize(vmin=min(binned), vmax=max(binned), clip=False)
mapper = plt.cm.ScalarMappable(norm=norm, cmap=colorscale)
df[f'{column}_color'] = np.vectorize(lambda x: mcolors.to_hex(mapper.to_rgba(x)))(binned)
return df
def mapper(data_df, columns, columns_cmap = None, level = 'Municipio'):
""" Function to create a folium map with given levels of input.
Parameters
----------
data_df : pandas.DataFrame
dataframe that has been grouped to display a metric (i.e. number of subscribers
or total consumption). Rows are based on a geographic jurisdiction (i.e. Departamento or Municipio)
which should be included in a column.
columns : list
list of columns representing rates (i.e. Estrato 1, Total Residencial, Comercial, etc.)
each column will be its own choropleth layer. Can also be a string for pregenerated columns.
columns_cmap : list
list of equal length to columns, with a colar scheme to use for each column
level : 'Departamento' or 'Municipio'
The jurisdictional level to be plotting
"""
if columns == 'estrata_columns':
columns = ['Estrato 1', 'Estrato 2', 'Estrato 3', 'Estrato 4', 'Estrato 5', 'Estrato 6', 'Total Residencial']
columns_cmap = ['Greens'] * 7
elif columns == 'all_columns':
columns = ['Estrato 1', 'Estrato 2', 'Estrato 3', 'Estrato 4', 'Estrato 5', 'Estrato 6',
'Total Residencial', 'Industrial','Comercial','Oficial','Otros','Total No Residencial']
columns_cmap = ['Greens'] * 7 + ['Reds','Blues','Purples','Purples','Reds']
elif columns == 'sec_columns':
columns = ['Total Residencial','Industrial','Comercial']
columns_cmap = ['Greens', 'Reds', 'Blues']
elif columns == 'estrata_columns_lim':
columns = ['Estrato 1','Estrato 4','Estrato 6']
columns_cmap = ['Greens'] * 3
elif columns == 'total_residencial':
columns = ['Total Residencial']
columns_cmap = ['Greens']
if level == 'Municipio':
data_gdf = muni_shape_merger(data_df)
elif level == 'Departamento':
data_gdf = dept_shape_merger(data_df)
m = folium.Map(location=[4.57, -74.29], zoom_start=5, tiles = 'stamentoner', width = '60%',
min_lat = -9, max_lat = 18, min_lon = -84, max_lon = -59, min_zoom = 5, max_zoom = 9,
attr = 'Data Source: SUI')
count = 0
local_gdfs = []
for c in columns:
cmap = columns_cmap[count]
count += 1
data_gdf = hexcolormapper(data_gdf, c, cmap)
if level == 'Municipio':
local_gdf = data_gdf[[f'{c}',f'{c}_color','geometry','Departamento','Municipio']]
local_gdf.columns = ['data','color','geometry', 'Departamento','Municipio'] #folium doesn't like when dfs in a loop have different column names
if level == 'Departamento':
local_gdf = data_gdf[[f'{c}',f'{c}_color','geometry','Departamento']]
local_gdf.columns = ['data','color','geometry', 'Departamento']
local_gdfs.append(local_gdf)
fgs = []
count = 0
for c in columns:
local_gdf = local_gdfs[count]
if count == 0:
on_switch = True
else:
on_switch = False
count +=1
fg_ = folium.FeatureGroup(name=c, show = on_switch)
for index, row in local_gdf.iterrows():
geojson_ = folium.GeoJson(local_gdf.iloc[index:index+1],
style_function = lambda feature: {
'fillColor': feature['properties']['color'],
'fillOpacity' : 0.7,
'color': '#000000',
'weight':0.2
})
if level == 'Departamento':
popup_ = folium.Popup(
f"<b>Dep:</b> {row['Departamento']}<br>"
f"<b>Data:</b> {int(row['data'])}<br>"
)
# elif level == 'Municipio':
# popup_ = folium.Popup(
# f"<b>Dep:</b> {row['Departamento']}<br>"
# f"<b>Muni:</b> {row['Municipio']}<br>"
# f"<b>Data:</b> {int(row['data'])}<br>"
# )
popup_.add_to(geojson_)
geojson_.add_to(fg_)
fgs.append(fg_)
for fg in fgs:
m.add_child(fg)
# folium.Choropleth(
# geo_data=shape_df,
# name=c,
# data=data_df,
# columns=[key,c],
# key_on=f'feature.properties.{key}',
# fill_color=cmap,
# fill_opacity=0.5,
# line_opacity=0.5,
# legend_name=c,
# bins=9,
# legend = False
# ).add_to(m)
m.add_child(folium.map.LayerControl(collapsed = False, autoZIndex = True))
return m
#%%
def land_type_aggregator(df):
"""
Accepts an ungrouped df (i.e. sui_data['Consumo']) and returns a df with rows for Municipios
and columns for sum by land type (Centro, Urbano, Rural)
"""
groupeddf = df.groupby(['Departamento','Municipio','land_type'], as_index = False).sum()
municipio_lists = []
for d in set(groupeddf['Departamento']):
d_df = groupeddf.loc[groupeddf['Departamento'] == d]
for m in set(d_df['Municipio']):
m_df = d_df.loc[d_df['Municipio'] == m]
rural_df = m_df.loc[m_df['land_type'] == 'Rural']
rural_val = int(rural_df['Total Residencial'].item()) + int(rural_df['Total No Residencial'].item())
centro_df = m_df.loc[m_df['land_type'] == 'Centro Poblado']
centro_val = int(centro_df['Total Residencial'].item()) + int(centro_df['Total No Residencial'].item())
urban_df = m_df.loc[m_df['land_type'] == 'Urbano']
urban_val = int(urban_df['Total Residencial'].item()) + int(urban_df['Total No Residencial'].item())
municipio_lists.append([d, m, rural_val, centro_val, urban_val])
land_type_df = pd.DataFrame(municipio_lists, columns = ['Departamento','Municipio', 'Rural', 'Centro Poblado', 'Urbano'])
return land_type_df
#%%
def load_sui_tariff():
"""
Still unsure what exactly is in this database, but it seems related to tariff components.
Unused for now.
"""
folder = '/Users/skoebric/Dropbox/GitHub/colombia_eda/sui_tariffs_in'
files = os.listdir(folder)
#good_names = ['Departamento', 'Municipio', 'Empresa', 'Variable Calculada',
# 'Estrato 1', 'Estrato 2', 'Estrato 3', 'Estrato 4', 'Estrato 5',
# 'Estrato 6', 'Total Residencial', 'Industrial', 'Comercial', 'Oficial',
# 'Otros', 'Total No Residencial']
tariff_dfs = []
for f in files:
in_f = pd.read_csv(os.path.join(folder, f), encoding = 'latin')
tariff_dfs.append(in_f)
tariff_df = pd.concat(tariff_dfs)
tariff_df = tariff_df.dropna(thresh = 8)
tariff_df['Fecha'] = pd.to_datetime(tariff_df['Fecha'])
tariff_df['Empresa'] = [remove_accents(i) for i in list(tariff_df['Empresa'])]
tariff_df['Mercado'] = [remove_accents(i) for i in list(tariff_df['Mercado'])]
tariff_df = tariff_df.sort_values(by=['Empresa','Fecha','Mercado'], ascending = [True, False, True])
recent_rates = []
for e in set(tariff_df['Empresa']):
e_df = tariff_df.loc[tariff_df['Empresa'] == e]
for m in set(e_df['Mercado']):
m_df = e_df.loc[e_df['Mercado'] == m]
if len(m_df) > 0:
m_df_list = list(m_df.iloc[0])
recent_rates.append(m_df_list)
recent_rate_df = pd.DataFrame(recent_rates, columns = tariff_df.columns)
recent_rate_df.rename({'Gm':'generation_charge',
'i':'transmission_charge',
'j':'comercialization?',
})
return recent_rate_df
#recent_rate_df.to_csv('/Users/skoebric/Dropbox/GitHub/colombia_eda/sui_tariffs_out/recent_tariff_structure.csv')
#%%
def demand_8760_getter(input_codes = None):
"""
Returns a long-df with an 8760 for each Empresa's 2018 Demand.
Empresa
"""
retail_df = pd.read_excel('reference_data/Demanda_por_OR_2018.xls')
nonreg_df1 = pd.read_excel('reference_data/Demanda_Comercial_Por_Comercializador_2018SEM1.xls')
nonreg_df2 = pd.read_excel('reference_data/Demanda_Comercial_Por_Comercializador_2018SEM2.xls')
demand_df = pd.concat([retail_df, nonreg_df1, nonreg_df2], axis = 'rows')
demand_df = demand_df.rename({'Codigo':'Empresa'}, axis = 'columns')
if input_codes is not None:
demand_df = demand_df.loc[demand_df['Empresa'].isin(input_codes)]
demand_df['Fecha'] = pd.to_datetime(demand_df['Fecha'])
demand_melt_df = pd.melt(demand_df, id_vars = ['Empresa','Fecha'], value_vars = [str(i) for i in range(0,24)],
var_name = 'hour', value_name = 'demand')
demand_melt_df['hour'] = | pd.to_numeric(demand_melt_df['hour']) | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from multiprocessing import cpu_count
import pandas as pd
from joblib import Parallel, delayed
from src.geo_mapping_helpers import map_district_to_community_area
def filter_single_year_range_departure_ca_format_datetimes(df):
"""
Replace single station name, remove non-city trips and change datetime
column format.
"""
df = df[~df["community"].isin(["OUTSIDE CITY"])].copy()
for c_new, c in zip(["startdatehour"], ["starttime"]):
df[c_new] = | pd.to_datetime(df[c]) | pandas.to_datetime |
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
needs_i8_conversion,
)
import pandas as pd
from pandas import NumericIndex
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_unique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.unique()
# dict.fromkeys preserves the order
unique_values = list(dict.fromkeys(obj.values))
if isinstance(obj, pd.MultiIndex):
expected = pd.MultiIndex.from_tuples(unique_values)
expected.names = obj.names
tm.assert_index_equal(result, expected, exact=True)
elif isinstance(obj, pd.Index) and obj._is_backward_compat_public_numeric_index:
expected = NumericIndex(unique_values, dtype=obj.dtype)
| tm.assert_index_equal(result, expected, exact=True) | pandas._testing.assert_index_equal |
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from pandas import testing as pdt
from rle_array.autoconversion import auto_convert_to_rle, decompress
from rle_array.dtype import RLEDtype
pytestmark = pytest.mark.filterwarnings("ignore:performance")
@pytest.mark.parametrize(
"orig, threshold, expected",
[
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": pd.Series([1], dtype=np.int32),
"uint64": pd.Series([1], dtype=np.uint64),
"float64": pd.Series([1.2], dtype=np.float64),
"bool": pd.Series([True], dtype=np.bool_),
"object": pd.Series(["foo"], dtype=np.object_),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
# threshold
None,
# expected
pd.DataFrame(
{
"int64": pd.Series([1], dtype=RLEDtype(np.int64)),
"int32": pd.Series([1], dtype=RLEDtype(np.int32)),
"uint64": pd.Series([1], dtype=RLEDtype(np.uint64)),
"float64": pd.Series([1.2], dtype=RLEDtype(np.float64)),
"bool": pd.Series([True], dtype=RLEDtype(np.bool_)),
"object": pd.Series(["foo"]).astype(RLEDtype(np.object_)),
"datetime64": pd.Series(
[pd.Timestamp("2020-01-01")], dtype="datetime64[ns]"
),
}
),
),
(
# orig
pd.DataFrame(
{
"int64": pd.Series([1], dtype=np.int64),
"int32": | pd.Series([1], dtype=np.int32) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 00:13:06 2020
@author: sahand
"""
from rake_nltk import Rake
import pandas as pd
import re
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
st = set(stopwords.words('english'))
path = '/home/sahand/GoogleDrive/Data/'
# data_address = path+"Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/1990-2019/1990-2019 abstract_title"
data_address = path+"Corpus/AI 4k/copyr_deflem_stopword_removed_thesaurus May 28/1990-2019/1990-2019 abstract_title"
df1 = pd.read_csv(data_address,names=['abstract'])
labels = | pd.read_csv(path+'Corpus/AI 4k/embeddings/clustering/k10/Doc2Vec patent_wos_ai corpus DEC 200,500,10 k10 labels') | pandas.read_csv |
import requests
import pandas as pd
from datetime import datetime
def get_irradiance_next_hour():
# API parametros
baseurl = 'http://dataservice.accuweather.com/forecasts/v1/hourly/1hour/'
location_key = '310683'
apikey = '<KEY>'
parameters = {
'apikey': apikey,
'details': 'true'
}
url = baseurl + location_key
response = requests.get(url, params=parameters)
try:
result = response.json()[0]['SolarIrradiance']['Value']
except:
result = 0
return result
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
dff = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(dff.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(dff.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# Write to csv
def write_to_csv_RT(filename, names, data):
print('writing onto csv file at: ', datetime.now())
filepath = "../Data/" + filename
df_to_csv = pd.DataFrame(columns=names)
ts = datetime.now()
new_row = pd.DataFrame([data], columns=names, index=[ts])
df_to_csv = pd.concat([df_to_csv, | pd.DataFrame(new_row) | pandas.DataFrame |
#using TA-Lib to create technical analysis / charts / patterns
#package imports
import pandas as pd
import numpy as np
from pandas_datareader import DataReader
import math
import os
import path
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
import time
import datetime
import requests
import io
import json
import talib as ta
from talib import MA_Type
style.use('ggplot')
#data import -- work on making this a function or at least try and condense it
df = | pd.read_csv('TWTR.csv', parse_dates=True, index_col=0) | pandas.read_csv |
"""Test ir_dist._util utility functions"""
from scirpy.ir_dist._util import (
DoubleLookupNeighborFinder,
reduce_and,
reduce_or,
merge_coo_matrices,
)
import pytest
import numpy as np
import scipy.sparse as sp
import pandas as pd
import numpy.testing as npt
@pytest.fixture
def dlnf_square():
clonotypes = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
check_exact=False):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
if check_dtype:
assert_attr_equal('dtype', left, right, obj='ExtensionArray')
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
else:
_testing.assert_almost_equal(left_valid, right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj='ExtensionArray')
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
is_extension_array_dtype(right) and not is_categorical_dtype(right)):
return assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
* left.index.names == right.index.names
* left.columns.names == right.columns.names
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.util.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'{shape!r}'.format(shape=left.shape),
'{shape!r}'.format(shape=right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.columns'.format(obj=obj))
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
def assert_panel_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
msg = "non-matching item (right) '{item}'".format(item=item)
assert item in right, msg
litem = left.iloc[i]
ritem = right.iloc[i]
assert_frame_equal(litem, ritem,
check_less_precise=check_less_precise,
check_names=check_names)
for i, item in enumerate(right._get_axis(0)):
msg = "non-matching item (left) '{item}'".format(item=item)
assert item in left, msg
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == 'block':
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left_index, right_index)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_sp_array_equal(left.values, right.values,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(np.asarray(left.values),
np.asarray(right.values))
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_index_equal(left.columns, right.columns,
obj='{obj}.columns'.format(obj=obj))
if check_fill_value:
assert_attr_equal('default_fill_value', left, right, obj=obj)
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(
series, right[col],
check_dtype=check_dtype,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices
)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = ("Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return | Index([False, True] + [False] * (k - 2), name=name) | pandas.Index |
"""analysis.py: Collection of classes for performing analysis on Corpus"""
# <NAME> (<EMAIL>)
# DS 5001
# 6 May 2021
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.core.algorithms import mode
import plotly.express as px
import scipy.cluster.hierarchy as sch
from gensim.models import word2vec
from scipy.linalg import eigh
from scipy.sparse.construct import random
from scipy.spatial.distance import pdist
from sklearn.decomposition import LatentDirichletAllocation as LDA
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.manifold import TSNE
from eta_modules.preprocessing import Corpus
class HierarchicalClusterAnalysis:
def __init__(self, max_features=5000, tfidf_method='max', OHCO_level=['work_id', 'chapter_id']):
self.max_features = max_features
self.tfidf_method = tfidf_method
self.OHCO_level = OHCO_level
self.vocab = None
self.metrics = None
def fit(self, corpus: Corpus, metrics=['cosine']):
# Copy corpus over to prevent undesired modifications
self.corpus = corpus.copy()
self.metrics = metrics
self.bow = self.corpus.bow
self.vocab = self.corpus.vocab
# If original TFIDF bag or method doesn't match, recalculate
# Otherwise, already have good TFIDF values to use
if (corpus.tfidf_OHCO != self.OHCO_level) or (f"tfidf_{self.tfidf_method}_sum" not in self.vocab):
self.corpus.compute_tfidf(OHCO_level=self.OHCO_level, methods=[self.tfidf_method])
# Reassign objects just to be safe
self.bow = self.corpus.bow
self.vocab = self.corpus.vocab
self.vocab['dfidf'] = self.vocab['df'] * self.vocab['idf']
# Filter VOCAB to `max_features` words using DF-IDF; use that to filter BOW TFIDF values
self.vocab = self.vocab.sort_values('dfidf', ascending=False).head(self.max_features)
self.tfidf = self.bow[f"tfidf_{self.tfidf_method}"].unstack(fill_value=0)
self.tfidf = self.tfidf[self.vocab.index]
# Collapse tfidf to book level means
self.tfidf = self.tfidf.groupby(['work_id']).mean()
## Create DataFrame to hold pairwise distances
# Multindex -- combinations of indices; e.g., (0, 1), (0, 2), etc.
work_ids = self.corpus.lib.index.tolist()
self.pdists = pd.DataFrame(index= | pd.MultiIndex.from_product([work_ids, work_ids]) | pandas.MultiIndex.from_product |
import pandas as pd
import numpy as np
#import sys
#sys.path.append("F:\3RDSEM\DM\Assignment_1\DM-Project\Assignment-1\Code")
from Utility import getDataFrame
fileNames = ["./../DataFolder/CGMSeriesLunchPat1.csv", "./../DataFolder/CGMSeriesLunchPat2.csv",
"./../DataFolder/CGMSeriesLunchPat3.csv", "./../DataFolder/CGMSeriesLunchPat4.csv",
"./../DataFolder/CGMSeriesLunchPat5.csv"]
def Feature_Extraction(df):
feature_1_df = df.groupby(np.arange(len(df.columns))//6, axis=1).mean()
feature_1_df.columns=['mean_'+str(i+1) for i, column in enumerate(feature_1_df.columns)]
for i, columns in enumerate(feature_1_df.columns):
feature_1_df['shifted_mean' + str(i+1)] = feature_1_df['mean_'+str(i+1)].shift(1)
#==================================================================
local_maxima = []
for i in range(0,len(df.index)):
indices = []
for j in range(0, len(df.columns)-1):
if((df.iloc[i][df.columns[j]] >= df.iloc[i][df.columns[j-1]] and
df.iloc[i][df.columns[j]] > df.iloc[i][df.columns[j+1]]) or
(df.iloc[i][df.columns[j]] > df.iloc[i][df.columns[j-1]] and
df.iloc[i][df.columns[j]] >= df.iloc[i][df.columns[j+1]])
):
indices.append(j)
local_maxima.append(indices)
local_minima = []
for i in range(0,len(df.index)):
indices = []
for j in range(0, len(df.columns)-1):
if((df.iloc[i][df.columns[j]] <= df.iloc[i][df.columns[j-1]] and
df.iloc[i][df.columns[j]] < df.iloc[i][df.columns[j+1]]) or
(df.iloc[i][df.columns[j]] < df.iloc[i][df.columns[j-1]] and
df.iloc[i][df.columns[j]] <= df.iloc[i][df.columns[j+1]])):
indices.append(j)
local_minima.append(indices)
#==================================================================
feature_2 = []
for i,maxima in enumerate(local_maxima):
global_maxima = 0
temp_list = []
for val in maxima:
temp_list.extend(df.iloc[i][:].tolist())
global_maxima = max(df.iloc[i][val], global_maxima)
feature_2.append([global_maxima, (temp_list.index(global_maxima)) // 6 + 1 if temp_list != [] else -1])
feature_2_df = pd.DataFrame(feature_2)
feature_2_df.columns = ['Global_Maximum', 'Global_Maximum_Interval']
#==================================================================
segments = [(i) * 6 for i in range(len(df.columns)//6 + 1)]
feature_3 = []
for i, (maxima, minima) in enumerate(zip(local_maxima, local_minima)):
count_local_maxima_interval = [0] * (len(df.columns)//6)
count_local_minima_interval = [0] * (len(df.columns)//6)
for val in maxima:
for seg in range(1, len(segments)):
if(val > segments[seg-1] and val <= segments[seg]):
count_local_maxima_interval[seg-1] += 1
for val in minima:
for seg in range(1, len(segments)):
if(val > segments[seg-1] and val <= segments[seg]):
count_local_minima_interval[seg-1] += 1
feature_3.append(count_local_maxima_interval + count_local_minima_interval)
feature_3_df = pd.DataFrame(feature_3)
feature_3_df.columns = ["Count_Local_Max_" + str(i) for i in range(1, len(segments))] + \
["Count_Local_Min_" + str(i) for i in range(1, len(segments))]
#==================================================================
segments = [(i) * 6 for i in range(len(df.columns)//6 + 1)]
feature_4 = []
interval = -1
for row, (maxima) in enumerate(local_maxima):
diff_interval = [0] * (len(df.columns)//6)
for val in maxima:
for seg in range(1, len(segments)):
if(val > segments[seg-1] and val <= segments[seg]):
interval = seg-1
break
local_maxima_interval = df.iloc[row][val]
prev = val - 1
prev_local_minimum = 1000
while(prev > segments[interval]):
prev_local_minimum = min(df.iloc[row][prev], prev_local_minimum)
prev -= 1
prev_local_minimum = min(df.iloc[row][prev], prev_local_minimum)
prev_local_minimum %= 1000
diff = local_maxima_interval - prev_local_minimum
diff_interval[interval] = diff
feature_4.append(diff_interval)
feature_4_df = pd.DataFrame(feature_4)
feature_4_df.columns = ["Diff_Local_Max_Min_Interval_" + str(i) for i in range(1, len(segments))]
#==================================================================
segments = [(i) * 6 for i in range(len(df.columns) // 6 + 1)]
feature_5 = {}
for i in range(len(segments) - 1):
df1 = df.iloc[:, segments[i]:segments[i + 1]]
diff1 = df1[df1.columns[::-1]].diff(axis=1)
if 'cgmSeries_30' in diff1.columns:
diff1['cgmSeries_30'].fillna(0, inplace=True)
sum1 = diff1.sum(axis=1)
feature_5[i] = sum1
feature_5_df = pd.DataFrame.from_dict(feature_5)
feature_5_df.columns = ['CGM_Displacement_Interval_' + str(i) for i in range(1, len(segments))]
#==================================================================
segments = [(i) * 6 for i in range(len(df.columns) // 6 + 1)]
feature_6 = {}
for i in range(len(segments) - 1):
df1 = df.iloc[:, segments[i]:segments[i + 1]]
diff1 = df1[df1.columns[::-1]].diff(axis=1)
if 'cgmSeries_30' in diff1.columns:
diff1['cgmSeries_30'].fillna(0, inplace=True)
mean1 = diff1.mean(axis=1)
feature_6[i] = mean1
feature_6_df = pd.DataFrame.from_dict(feature_6)
feature_6_df.columns = ['CGM_Velocity_Interval_' + str(i) for i in range(1, len(segments))]
#==================================================================
final_df = | pd.concat([df, feature_1_df, feature_2_df, feature_3_df, feature_4_df, feature_5_df, feature_6_df], axis=1) | pandas.concat |
"""
Makes a figure providing an overview of our dataset with a focus on lineages
laid out as follows:
a - Patient metadata
b - Donut plot of our lineage distributions vs the world
c - Timeline of patient sampling vs lineages identified
d - Choropleth of lineages by region
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from typing import Dict
import logging
import matplotlib
from matplotlib.lines import Line2D
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition,
mark_inset)
from covid_bronx import lineage_colors_dict, lineage_colors_dict_rgb
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
savefile = "figures/figure1_v2"
months = {
1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec',
}
from covid_bronx.metadata import preprocess_metadata
from matplotlib.colors import colorConverter
# a) Timeline of lineages
logger.info("Plotting 1a")
timeline = pd.read_csv("data/external/global_lineages.csv")
from covid_bronx.metadata import get_metadata
metadata = get_metadata()
index = pd.date_range(metadata['collection_date'].min(), metadata['collection_date'].max())
metadata.index = metadata['name']
df = | pd.read_csv("data/external/pangolin2.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pysam
import os
import pandas as pd
import numpy as np
import time
import argparse
import sys
from multiprocessing import Pool
# In[ ]:
# ##arguments for testing
# bam_file_path = '/fh/scratch/delete90/ha_g/realigned_bams/cfDNA_MBC_ULP_hg38/realign_bam_paired_snakemake-master/results/MBC_1041_1_ULP/MBC_1041_1_ULP_recalibrated.bam'
# bam_file_name = 'MBC_1041_1_ULP'
# mapable_path = '../../downloads/genome/repeat_masker.mapable.k50.Umap.hg38.bedGraph'
# ref_seq_path = '/fh/fast/ha_g/grp/reference/GRCh38/GRCh38.fa'
# chrom_sizes_path = '/fh/fast/ha_g/grp/reference/GRCh38/hg38.standard.chrom.sizes'
# out_dir = './tmp/'
# map_q = 20
# size_range = [15,500]
# CPU = 4
# In[ ]:
parser = argparse.ArgumentParser()
parser.add_argument('--bam_file', help='sample_bam_file', required=True)
parser.add_argument('--bam_file_name', help='sample name (does not need to match actual file name)', required=True)
parser.add_argument('--mapable_regions', help='highly mapable regions to be used in GC correction, bedGraph or bed foramt', required=True)
parser.add_argument('--ref_seq',help='reference sequence (fasta format)',required=True)
parser.add_argument('--chrom_sizes',help='path to chromosome sizes for the reference seq',required=True)
parser.add_argument('--out_dir',help='folder for GC bias results',required=True)
parser.add_argument('--map_q',help='minimum mapping quality for reads to be considered',type=int,required=True)
parser.add_argument('--size_range',help='range of read sizes to be included',nargs=2, type=int, required=True)
parser.add_argument('--CPU',help='number of CPU for parallelizing', type=int, required=True)
args = parser.parse_args()
bam_file_path = args.bam_file
bam_file_name = args.bam_file_name
mapable_path=args.mapable_regions
ref_seq_path = args.ref_seq
chrom_sizes_path = args.chrom_sizes
out_dir = args.out_dir
map_q = args.map_q
size_range = args.size_range
CPU = args.CPU
# In[ ]:
print('arguments provided:')
print('\tbam_file_path = "'+bam_file_path+'"')
print('\tbam_file_name = "'+bam_file_name+'"')
print('\tmapable_regions = "'+mapable_path+'"')
print('\tref_seq_path = "'+ref_seq_path+'"')
print('\tchrom_sizes_path = "'+chrom_sizes_path+'"')
print('\tout_dir = "'+out_dir+'"')
print('\tmap_q = '+str(map_q))
print('\tsize_range = '+str(size_range))
print('\tCPU = '+str(CPU))
# In[ ]:
mapable_name = mapable_path.rsplit('/',1)[1].rsplit('.',1)[0]
out_file = out_dir +'/'+mapable_name+'/GC_counts/'+ bam_file_name+'.GC_counts.txt'
print('out_file',out_file)
# In[ ]:
#create a directory for the GC data
if not os.path.exists(out_dir +'/'+mapable_name):
os.mkdir(out_dir +'/'+mapable_name)
if not os.path.exists(out_dir +'/'+mapable_name+'/GC_counts/'):
os.mkdir(out_dir +'/'+mapable_name+'/GC_counts/')
# In[ ]:
#import filter
mapable_intervals = pd.read_csv(mapable_path, sep='\t', header=None)
#remove non standard chromosomes and X and Y
chroms = ['chr'+str(m) for m in range(1,23)]
mapable_intervals = mapable_intervals[mapable_intervals[0].isin(chroms)]
print('chroms:', chroms)
print('number_of_intervals:',len(mapable_intervals))
sys.stdout.flush()
# In[ ]:
def collect_reads(sublist):
#create a dict for holding the frequency of each read length and GC content
GC_dict = {}
for length in range(size_range[0],size_range[1]+1):
GC_dict[length]={}
for num_GC in range(0,length+1):
GC_dict[length][num_GC]=0
#import the bam file
#this needs to be done within the loop otherwise it gives a truncated file warning
bam_file = pysam.AlignmentFile(bam_file_path, "rb")
print('sublist intervals:',len(sublist))
#this might also need to be in the loop
#import the ref_seq
ref_seq=pysam.FastaFile(ref_seq_path)
for i in range(len(sublist)):
chrom = sublist.iloc[i][0]
start = sublist.iloc[i][1]
end = sublist.iloc[i][2]
if i%5000==0:
print('interval',i,':',chrom,start,end,'seconds:',np.round(time.time()-start_time))
sys.stdout.flush()
#fetch any read that overlaps the inteterval (don't need to extend the interval because the fetch function does this automatically)
fetched = bam_file.fetch(chrom,start,end)
for read in fetched:
#use both fw (positive template length) and rv (negative template length) reads
if (read.is_reverse==False and read.template_length>=size_range[0] and read.template_length<=size_range[1]) or (read.is_reverse==True and -read.template_length>=size_range[0] and -read.template_length<=size_range[1]):
#qc filters, some longer fragments are considered 'improper pairs' but I would like to keep these
if read.is_paired==True and read.mapping_quality>=map_q and read.is_duplicate==False and read.is_qcfail==False:
if read.is_reverse==False:
read_start = read.reference_start
read_end = read.reference_start+read.template_length
elif read.is_reverse==True:
read_end = read.reference_start + read.reference_length
read_start = read_end + read.template_length
fragment_seq = ref_seq.fetch(read.reference_name,read_start,read_end)
#tally up the GC content
fragment_seq=fragment_seq.replace('g','G').replace('c','C').replace('a','A').replace('t','T').replace('n','N')
# #################
# ##logic check####
# #################
# if read.is_reverse==False:
# if fragment_seq[0:read.reference_length]==read.query_sequence and len(fragment_seq)==read.template_length:
# print('fw match',read.reference_length)
# else:
# print(fragment_seq[0:read.reference_length],read.reference_length,'fw')
# print(read.query_sequence,len(read.query_sequence),'fw')
# print(len(fragment_seq),read.template_length)
# print('\n')
# elif read.is_reverse==True:
# if fragment_seq[-read.reference_length:]==read.query_sequence and len(fragment_seq)==-read.template_length:
# print('rv match',read.reference_length)
# else:
# print(fragment_seq[-read.reference_length:],read.reference_length,'rv')
# print(read.query_sequence,len(read.query_sequence),'rv')
# print(len(fragment_seq),read.template_length)
# print('\n')
# #################
#split and convert to numpy array
fragment_seq = np.array(list(fragment_seq))
#replace with values
fragment_seq[(fragment_seq=='G') | (fragment_seq=='C')]=1
fragment_seq[(fragment_seq=='A') | (fragment_seq=='T')]=0
fragment_seq[(fragment_seq=='N')]=np.random.randint(2) #choose a random 0 or 1 for N (so that you always get an integer) #should be very rare if the filter is done right
fragment_seq = fragment_seq.astype(int)
num_GC = int(fragment_seq.sum())
GC_dict[abs(read.template_length)][num_GC]+=1
print('done')
return(GC_dict)
# In[ ]:
start_time = time.time()
p = Pool(processes=CPU) #use the available CPU
sublists = np.array_split(mapable_intervals,CPU) #split the list into sublists, one per CPU
GC_dict_list = p.map(collect_reads, sublists, 1)
# In[ ]:
all_GC_df = | pd.DataFrame() | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
from pandas.testing import assert_frame_equal
from gators.feature_generation.polynomial_features import PolynomialFeatures
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data_inter():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_int16_inter():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.int16)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
).astype(np.int16)
return obj, X, X_expected
@ pytest.fixture
def data_all():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float32)
obj = PolynomialFeatures(
interaction_only=False, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4.],
[3., 4., 5., 9., 12., 15., 16., 20., 25.],
[6., 7., 8., 36., 42., 48., 49., 56., 64.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B',
'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C']
).astype(np.float32)
return obj, X, X_expected
@ pytest.fixture
def data_degree():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=False, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4., 0., 0.,
0., 0., 0., 0., 1., 2., 4., 8.],
[3., 4., 5., 9., 12., 15., 16., 20., 25., 27., 36.,
45., 48., 60., 75., 64., 80., 100., 125.],
[6., 7., 8., 36., 42., 48., 49., 56., 64., 216., 252.,
288., 294., 336., 384., 343., 392., 448., 512.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B', 'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C',
'A__x__A__x__A', 'A__x__A__x__B', 'A__x__A__x__C', 'A__x__B__x__B', 'A__x__B__x__C',
'A__x__C__x__C', 'B__x__B__x__B', 'B__x__B__x__C', 'B__x__C__x__C', 'C__x__C__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_inter_degree():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2., 0.],
[3., 4., 5., 12., 15., 20., 60.],
[6., 7., 8., 42., 48., 56., 336.]]),
columns=['A', 'B', 'C', 'A__x__B',
'A__x__C', 'B__x__C', 'A__x__B__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_subset():
X = pd.DataFrame(np.arange(12).reshape(
3, 4), columns=list('ABCD'), dtype=np.float64)
obj = PolynomialFeatures(
columns=['A', 'B', 'C'], interaction_only=True, degree=2).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 3., 0., 0., 2.],
[4., 5., 6., 7., 20., 24., 30.],
[8., 9., 10., 11., 72., 80., 90.]]),
columns=['A', 'B', 'C', 'D', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_inter_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_int16_inter_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.int16)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
).astype(np.int16)
return obj, X, X_expected
@ pytest.fixture
def data_all_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float32)
obj = PolynomialFeatures(
interaction_only=False, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4.],
[3., 4., 5., 9., 12., 15., 16., 20., 25.],
[6., 7., 8., 36., 42., 48., 49., 56., 64.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B',
'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C']
).astype(np.float32)
return obj, X, X_expected
@ pytest.fixture
def data_degree_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=False, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4., 0., 0.,
0., 0., 0., 0., 1., 2., 4., 8.],
[3., 4., 5., 9., 12., 15., 16., 20., 25., 27., 36.,
45., 48., 60., 75., 64., 80., 100., 125.],
[6., 7., 8., 36., 42., 48., 49., 56., 64., 216., 252.,
288., 294., 336., 384., 343., 392., 448., 512.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B', 'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C',
'A__x__A__x__A', 'A__x__A__x__B', 'A__x__A__x__C', 'A__x__B__x__B', 'A__x__B__x__C',
'A__x__C__x__C', 'B__x__B__x__B', 'B__x__B__x__C', 'B__x__C__x__C', 'C__x__C__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_inter_degree_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2., 0.],
[3., 4., 5., 12., 15., 20., 60.],
[6., 7., 8., 42., 48., 56., 336.]]),
columns=['A', 'B', 'C', 'A__x__B',
'A__x__C', 'B__x__C', 'A__x__B__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_subset_ks():
X = ks.DataFrame(np.arange(12).reshape(
3, 4), columns=list('ABCD'), dtype=np.float64)
obj = PolynomialFeatures(
columns=['A', 'B', 'C'], interaction_only=True, degree=2).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 3., 0., 0., 2.],
[4., 5., 6., 7., 20., 24., 30.],
[8., 9., 10., 11., 72., 80., 90.]]),
columns=['A', 'B', 'C', 'D', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
def test_inter_pd(data_inter):
obj, X, X_expected = data_inter
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_inter_ks(data_inter_ks):
obj, X, X_expected = data_inter_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_inter_pd_np(data_inter):
obj, X, X_expected = data_inter
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
@pytest.mark.koalas
def test_inter_ks_np(data_inter_ks):
obj, X, X_expected = data_inter_ks
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
def test_int16_inter_pd(data_int16_inter):
obj, X, X_expected = data_int16_inter
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_int16_inter_ks(data_int16_inter_ks):
obj, X, X_expected = data_int16_inter_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_int16_inter_pd_np(data_int16_inter):
obj, X, X_expected = data_int16_inter
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
@pytest.mark.koalas
def test_int16_inter_ks_np(data_int16_inter_ks):
obj, X, X_expected = data_int16_inter_ks
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
def test_all_pd(data_all):
obj, X, X_expected = data_all
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_all_ks(data_all_ks):
obj, X, X_expected = data_all_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_all_pd_np(data_all):
obj, X, X_expected = data_all
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_all_ks_np(data_all_ks):
obj, X, X_expected = data_all_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_degree_pd(data_degree):
obj, X, X_expected = data_degree
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_degree_ks(data_degree_ks):
obj, X, X_expected = data_degree_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_degree_pd_np(data_degree):
obj, X, X_expected = data_degree
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 14 10:52:33 2022
COVID-19 DEATHS IN US - COUNTY
Author: <NAME> (<EMAIL>)
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
muertes_us=pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv')
muertes_us=muertes_us.drop(labels=['UID','iso2','iso3','code3','FIPS','Admin2','Country_Region','Lat','Long_'],axis=1)
muertes_us=muertes_us.groupby(['Combined_Key']).sum()
poblacion=muertes_us['Population']
muertes_us=muertes_us.drop(labels=['Population'],axis=1)
muertes_us=muertes_us.diff(periods=1,axis=1)
muertes=muertes_us.sum(axis=1)
muertes=pd.concat([muertes,poblacion],axis=1)
#muertes=muertes.rename(columns={'0':'Muertes','Population':'Poblacion'})
muertes_us=muertes_us.transpose()
muertes_us.index= | pd.to_datetime(muertes_us.index,dayfirst=False,yearfirst=False) | pandas.to_datetime |
import time
import numpy as np
import pandas as pd
import logging
from scipy.sparse import issparse, csr_matrix
from scipy.stats import chi2
from sklearn.neighbors import NearestNeighbors
from anndata import AnnData
from joblib import effective_n_jobs
from typing import List, Tuple
from pegasus.tools import update_rep, X_from_rep, knn_is_cached
from .. import decorators as pg_deco
logger = logging.getLogger("pegasus")
def calculate_nearest_neighbors(
X: np.array,
K: int = 100,
n_jobs: int = -1,
method: str = "hnsw",
M: int = 20,
efC: int = 200,
efS: int = 200,
random_state: int = 0,
full_speed: int = False,
):
"""Calculate nearest neighbors
X is the sample by feature matrix
Return K -1 neighbors, the first one is the point itself and thus omitted.
TODO: Documentation
"""
nsample = X.shape[0]
if nsample <= 1000:
method = "sklearn"
if nsample < K:
logger.warning(
"Warning: in calculate_nearest_neighbors, number of samples = {} < K = {}!\n Set K to {}.".format(
nsample, K, nsample
)
)
K = nsample
n_jobs = effective_n_jobs(n_jobs)
if method == "hnsw":
import hnswlib
assert not issparse(X)
# Build hnsw index
knn_index = hnswlib.Index(space="l2", dim=X.shape[1])
knn_index.init_index(
max_elements=nsample, ef_construction=efC, M=M, random_seed=random_state
)
knn_index.set_num_threads(n_jobs if full_speed else 1)
knn_index.add_items(X)
# KNN query
knn_index.set_ef(efS)
knn_index.set_num_threads(n_jobs)
indices, distances = knn_index.knn_query(X, k=K)
# eliminate the first neighbor, which is the node itself
for i in range(nsample):
if indices[i, 0] != i:
indices[i, 1:] = indices[i, 0:-1]
distances[i, 1:] = distances[i, 0:-1]
indices = indices[:, 1:].astype(int)
distances = np.sqrt(distances[:, 1:])
else:
assert method == "sklearn"
knn = NearestNeighbors(
n_neighbors=K - 1, n_jobs=n_jobs
) # eliminate the first neighbor, which is the node itself
knn.fit(X)
distances, indices = knn.kneighbors()
return indices, distances
@pg_deco.TimeLogger()
def get_neighbors(
data: AnnData,
K: int = 100,
rep: str = "pca",
n_jobs: int = -1,
random_state: int = 0,
full_speed: bool = False,
) -> Tuple[List[int], List[float]]:
"""Find K nearest neighbors for each data point and return the indices and distances arrays.
Parameters
----------
data : `AnnData`
An AnnData object.
K : `int`, optional (default: 100)
Number of neighbors, including the data point itself.
rep : `str`, optional (default: 'pca')
Representation used to calculate kNN. If `None` use data.X
n_jobs : `int`, optional (default: -1)
Number of threads to use. -1 refers to all available threads
random_state: `int`, optional (default: 0)
Random seed for random number generator.
full_speed: `bool`, optional (default: False)
If full_speed, use multiple threads in constructing hnsw index. However, the kNN results are not reproducible. If not full_speed, use only one thread to make sure results are reproducible.
Returns
-------
kNN indices and distances arrays.
Examples
--------
>>> indices, distances = tools.get_neighbors(adata)
"""
rep = update_rep(rep)
indices_key = rep + "_knn_indices"
distances_key = rep + "_knn_distances"
if knn_is_cached(data, indices_key, distances_key, K):
indices = data.uns[indices_key]
distances = data.uns[distances_key]
logger.info("Found cached kNN results, no calculation is required.")
else:
indices, distances = calculate_nearest_neighbors(
X_from_rep(data, rep),
K=K,
n_jobs=effective_n_jobs(n_jobs),
random_state=random_state,
full_speed=full_speed,
)
data.uns[indices_key] = indices
data.uns[distances_key] = distances
return indices, distances
def get_symmetric_matrix(csr_mat: "csr_matrix") -> "csr_matrix":
tp_mat = csr_mat.transpose().tocsr()
sym_mat = csr_mat + tp_mat
sym_mat.sort_indices()
idx_mat = (csr_mat != 0).astype(int) + (tp_mat != 0).astype(int)
idx_mat.sort_indices()
# idx_mat = csr_mat.indices & tp_mat.indices
# idx_mat.sort()
idx = idx_mat.data == 2
sym_mat.data[idx] /= 2.0
return sym_mat
# We should not modify distances array!
@pg_deco.TimeLogger()
def calculate_affinity_matrix(
indices: List[int], distances: List[float]
) -> "csr_matrix":
nsample = indices.shape[0]
K = indices.shape[1]
# calculate sigma, important to use median here!
sigmas = np.median(distances, axis=1)
sigmas_sq = np.square(sigmas)
# calculate local-scaled kernel
normed_dist = np.zeros((nsample, K), dtype=float)
for i in range(nsample):
numers = 2.0 * sigmas[i] * sigmas[indices[i, :]]
denoms = sigmas_sq[i] + sigmas_sq[indices[i, :]]
normed_dist[i, :] = np.sqrt(numers / denoms) * np.exp(
-np.square(distances[i, :]) / denoms
)
W = csr_matrix(
(normed_dist.ravel(), (np.repeat(range(nsample), K), indices.ravel())),
shape=(nsample, nsample),
)
W = get_symmetric_matrix(W)
# density normalization
z = W.sum(axis=1).A1
W = W.tocoo()
W.data /= z[W.row]
W.data /= z[W.col]
W = W.tocsr()
W.eliminate_zeros()
return W
def neighbors(
data: AnnData,
K: int = 100,
rep: "str" = "pca",
n_jobs: int = -1,
random_state: int = 0,
full_speed: bool = False,
) -> None:
"""Compute k nearest neighbors and affinity matrix, which will be used for diffmap and graph-based community detection algorithms.
The kNN calculation uses hnswlib introduced by [Malkov16]_.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix with rows for cells and columns for genes.
K: ``int``, optional, default: ``100``
Number of neighbors, including the data point itself.
rep: ``str``, optional, default: ``"pca"``
Embedding representation used to calculate kNN. If ``None``, use ``data.X``; otherwise, keyword ``'X_' + rep`` must exist in ``data.obsm``.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use. If ``-1``, use all available threads.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
full_speed: ``bool``, optional, default: ``False``
* If ``True``, use multiple threads in constructing ``hnsw`` index. However, the kNN results are not reproducible.
* Otherwise, use only one thread to make sure results are reproducible.
Returns
-------
``None``
Update ``data.uns``:
* ``data.uns[rep + "_knn_indices"]``: kNN index matrix. Row i is the index list of kNN of cell i (excluding itself), sorted from nearest to farthest.
* ``data.uns[rep + "_knn_distances"]``: kNN distance matrix. Row i is the distance list of kNN of cell i (excluding itselt), sorted from smallest to largest.
* ``data.uns["W_" + rep]``: kNN graph of the data in terms of affinity matrix.
Examples
--------
>>> pg.neighbors(adata)
"""
# calculate kNN
rep = update_rep(rep)
indices, distances = get_neighbors(
data,
K=K,
rep=rep,
n_jobs=n_jobs,
random_state=random_state,
full_speed=full_speed,
)
# calculate affinity matrix
W = calculate_affinity_matrix(indices[:, 0 : K - 1], distances[:, 0 : K - 1])
data.uns["W_" + rep] = W
def calc_kBET_for_one_chunk(knn_indices, attr_values, ideal_dist, K):
dof = ideal_dist.size - 1
ns = knn_indices.shape[0]
results = np.zeros((ns, 2))
for i in range(ns):
observed_counts = (
| pd.Series(attr_values[knn_indices[i, :]]) | pandas.Series |
import os
import string
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from common.utils import DATA_DIR
pos_tags_features = [
'noun',
'verb',
'adjective',
'adverb'
]
cos_sim_features = [
'cos_sim'
]
sentiment_features = [
'positive_count',
'negative_count'
]
word_related_features = [
'unique_lemma',
'unique_tokens'
]
tf_idf_features = [
'tf_idf'
]
boosting_params = {
"n_estimators": [20, 50, 100, 200, 300, 400],
"max_depth": [1, 2, 3, 5, 8, 10],
"learning_rate": [0.01, 0.03, 0.05]
}
rf_params = {
"n_estimators": [20, 50, 100, 200, 300, 400],
"max_depth": [1, 2, 3, 5],
"max_features": [1, None, "sqrt"]
}
svm_params = {
"loss": ['hinge', 'squared_hinge'],
"C": [0.5, 1.0, 10],
"max_iter": [10000]
}
# boosting_params = {
# "n_estimators": [20],
# "max_depth": [1],
# "learning_rate": [0.01]
# }
# rf_params = {
# "n_estimators": [20],
# "max_depth": [1],
# "max_features": [1]
# }
# svm_params = {
# "loss": ['hinge'],
# "C": [0.5],
# "max_iter": [10000]
# }
def get_answers(removed_ids):
answers = pd.read_csv(os.path.join(DATA_DIR, 'all_data.csv'))
answers = answers[~answers['id'].isin(removed_ids)]
answers = answers.iloc[:, 2:]
# remove punctuation
for answer in answers:
answers[answer] = answers[answer].str.replace('[{}]'.format(string.punctuation), '')
return answers
def add_tfidf_feature_to_data(orig_df, columns, v):
X_td = v.fit_transform(columns)
tf_idf = pd.DataFrame(X_td.toarray(), columns=v.get_feature_names())
tf_idf = tf_idf.add_prefix('tf_idf-')
tf_idf = orig_df.reset_index(drop=True).join(tf_idf.reset_index(drop=True), rsuffix='_r')
return tf_idf
def add_tfidf_features(data, removed_ids):
answers = get_answers(removed_ids)
all_answers = answers['question 1']
v = TfidfVectorizer(lowercase=False)
for answer in answers.iloc[:, 1:]:
all_answers = all_answers + " " + answers[answer].fillna('')
data = add_tfidf_feature_to_data(data, all_answers, v)
return data
def get_features(df_input, patterns, ans=None):
if not ans:
ans = '[0-9]+'
features = pd.DataFrame()
for pattern in patterns:
if pattern != 'tf_idf':
regex = 'q_{}_{}.*'.format(ans, pattern)
else:
regex = pattern
cols = df_input.filter(regex=regex, axis=1)
features = | pd.concat([features, cols], axis=1) | pandas.concat |
"""
:noindex:
preprocess.py
====================================
Script to convert provider datasets individual record dictionaries
Data Sources:
`https://www.acaps.org/covid-19-government-measures-dataset <https://www.acaps.org/covid-19-government-measures-dataset>`_
`https://www.cdc.gov/mmwr/preview/mmwrhtml/00001590.htm <https://www.cdc.gov/mmwr/preview/mmwrhtml/00001590.htm>`_
`https://github.com/HopkinsIDD/hit-covid <https://github.com/HopkinsIDD/hit-covid>`_
`https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv <https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest_withnotes.csv>`_
`https://who.maps.arcgis.com/apps/opsdashboard/index.html#/ead3c6475654481ca51c248d52ab9c61 <https://who.maps.arcgis.com/apps/opsdashboard/index.html#/ead3c6475654481ca51c248d52ab9c61>`_
"""
import pandas as pd
import logging
from datetime import datetime as dt
from utils import create_dir
from preprocess import utils, check
# Create tmp directory
create_dir('tmp')
# Create preprocess directory in tmp
create_dir('tmp/preprocess')
# Setup logging to log into the preprocess directory
logging.basicConfig(filename='tmp/preprocess/preprocess.log',
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
print("Preprocessing Data...")
logging.info("Preprocessing Data...")
# HOTFIX: limit the number of records that will be ingested from each dataset. Used for development.
# Should be None for production.
record_limit = None
# Allows ingestion hashes to be saved - should be True in production, not in development
save_ingestion_hashes = False
# Define dataset sources
jh = "https://raw.githubusercontent.com/HopkinsIDD/hit-covid/master/data/hit-covid-longdata.csv"
cdc = "data/raw/CDC_ITF_210421.csv"
acaps = "data/raw/ACAPS_latest.csv"
oxcgrt = "data/raw/OxCGRT_latest_withnotes.csv"
euro = "data/raw/WHOEURO_PHSMdatabase_External_27.04.2021.xlsx"
check_dir = 'config/input_check'
# Load accepted column reference
column_config = {'JH_HIT': pd.read_csv(check_dir + '/columns/JH_HIT.csv'),
'CDC_ITF': pd.read_csv(check_dir + '/columns/CDC_ITF.csv'),
'ACAPS': pd.read_csv(check_dir + '/columns/ACAPS.csv'),
'OXCGRT': pd.read_csv(check_dir + '/columns/OXCGRT.csv'),
'EURO': pd.read_csv(check_dir + '/columns/EURO.csv')}
# Load accepted date format reference
date_config = pd.read_csv(check_dir + '/date_format/date_format.csv')
ingestion_hashes = {'JH_HIT': 'config/ingestion_hashes/JH_HIT.csv',
'CDC_ITF': 'config/ingestion_hashes/CDC_ITF.csv',
'OXCGRT': 'config/ingestion_hashes/OXCGRT.csv',
'ACAPS': 'config/ingestion_hashes/ACAPS.csv',
'EURO': 'config/ingestion_hashes/EURO.csv'}
# Read EURO Data
"""
euro = pd.read_csv(euro,
parse_dates=["Start of measure", "End of measure"],
low_memory=False,
dtype={'Category': str,
'Subcategory': str,
'Measure': str})
"""
euro = pd.read_excel(euro, engine='openpyxl',
dtype={'Category': str,
'Subcategory': str,
'Measure': str},
sheet_name=0)
print(euro.columns)
# Convert EURO columns to str
euro.columns = euro.columns.astype("str")
# Remove records that have already been processed
euro = utils.filter_new_hashes(euro, ingestion_hashes['EURO'],
save_ingestion_hashes=save_ingestion_hashes)
# Check EURO Data
check.check_input(records=euro,
column_config=column_config['EURO'],
date_config=date_config,
dataset='EURO')
euro = euro.fillna('')
# Convert EURO data to list of record dicts
euro = utils.df_to_records(euro, "EURO")
# Log the number of EUROrecords
logging.info("EURO_RECORDS=%d" % len(euro))
# Read JH_HIT Data
jh = pd.read_csv(jh)
# Remove records that have already been processed
jh = utils.filter_new_hashes(jh, ingestion_hashes['JH_HIT'],
save_ingestion_hashes=save_ingestion_hashes)
# Check JH_HIT Data
check.check_input(records=jh,
column_config=column_config['JH_HIT'],
date_config=date_config,
dataset='JH_HIT')
# Convert JH_HIT data to list of record dicts
jh = utils.df_to_records(jh, "JH_HIT")
# Log the number of JH_HIT records
logging.info("JH_HIT_RECORDS=%d" % len(jh))
# Read CDC_ITF data
cdc = pd.read_csv(cdc,
dtype={'Date implemented or lifted': str,
'Date Entered': str},
parse_dates=['Date implemented or lifted', 'Date Entered'],
encoding='latin1')
cdc = cdc.rename(columns={"Unique Identifier": "Unique Identifier"})
# Remove records that have already been processed
cdc = utils.filter_new_hashes(cdc, ingestion_hashes['CDC_ITF'], save_ingestion_hashes=save_ingestion_hashes)
# Parse CDC_ITF date format
cdc["Date implemented or lifted"] = pd.to_datetime(cdc["Date implemented or lifted"],
format='%d/%m/%Y')
cdc["Date Entered"] = | pd.to_datetime(cdc["Date Entered"], format='%d/%m/%Y') | pandas.to_datetime |
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_validate, train_test_split
from classification.utils import print_params
from sklearn import metrics
from config import CHANNEL_NAMES
from data.utils import prepare_dfs
# @print_params
def predict(lab, ba, cols, estimator, metapkl, gs=None, evaluate_on_all=False,
channels=CHANNEL_NAMES, selector=None,
print_incorrectly_predicted=False, show_selected=False, seed=213,
eval_cv=True):
df, df_bef, df_aft = prepare_dfs('all')
if cols is None:
df = df.loc[(slice(None), slice(ba)), channels]
else:
df = df.loc[(slice(None), slice(ba)), (channels, (cols))]
X = df.dropna()
X.columns = X.columns.droplevel(0)
y = X.join(metapkl)[lab]
X = X[y.isin([-1, 1])]
y = y[y.isin([-1, 1])]
if selector is not None:
X = selector.fit_transform(X, y)
if show_selected and hasattr(selector, 'get_support'):
# print('(\'' + '\', \''.join(np.unique(df.columns.values[selector.get_support()])) + '\')')
print(list(df.columns.values[selector.get_support()]))
if gs is not None:
gs = gs.fit(X, y)
estimator = gs.best_estimator_
if eval_cv:
unique, counts = np.unique(y, return_counts=True)
print('Class distribution: ', dict(zip(unique, counts)))
scoring = ['accuracy', 'precision_weighted', 'recall_weighted', 'f1_weighted']
scores = cross_validate(estimator, X, y, cv=5, scoring=scoring, return_train_score=False)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores['test_'+scoring[0]].mean(), scores['test_'+scoring[0]].std()))
print("Precision: %0.2f (+/- %0.2f)" % (scores['test_'+scoring[1]].mean(), scores['test_'+scoring[1]].std()))
print("Recall: %0.2f (+/- %0.2f)" % (scores['test_'+scoring[2]].mean(), scores['test_'+scoring[2]].std()))
print("F1: %0.2f (+/- %0.2f)" % (scores['test_'+scoring[3]].mean(), scores['test_'+scoring[3]].std()))
print(
'{: <3.2f} $\pm$ {:<3.2f} & {: <3.2f} $\pm$ {: <3.2f} & '.format(scores['test_'+scoring[0]].mean(), scores['test_'+scoring[0]].std(),
scores['test_'+scoring[1]].mean(), scores['test_'+scoring[1]].std()) +
'{: <3.2f} $\pm$ {:<3.2f} & {: <3.2f} $\pm$ {: <3.2f} & '.format(scores['test_'+scoring[2]].mean(), scores['test_'+scoring[2]].std(),
scores['test_'+scoring[3]].mean(), scores['test_'+scoring[3]].std()) +
' \\\\ \hline'
)
else:
X_train, X_test, y_train, y_test = \
train_test_split(
X, y, test_size=0.3, random_state=seed)
unique, counts = np.unique(y_train, return_counts=True)
print('Training distribution: ', dict(zip(unique, counts)))
unique, counts = np.unique(y_test, return_counts=True)
print('Testing distribution: ', dict(zip(unique, counts)))
y_train = y_train.astype('int')
estimator = estimator.fit(X_train, y_train)
y_pred = estimator.predict(X_test)
y_pred = y_pred.astype('int')
y_test = y_test.astype('int')
print("Accuracy score: %.2f" % metrics.accuracy_score(y_test, y_pred))
print('Confusion matrix:\n', metrics.confusion_matrix(y_test, y_pred))
print('Precision score: ', metrics.precision_score(y_test, y_pred, average='weighted'))
print('Recall score: ', metrics.recall_score(y_test, y_pred, average='weighted'))
print('f1 score: ', metrics.f2_score(y_test, y_pred, average='weighted'))
print('ROC AUC score: ', metrics.roc_auc_score(y_test, y_pred, average='weighted'))
# print('Coefficients: \n', np.array(X.stack().columns)[estimator.support_])
# print(
# '{:.2f} & {:.2f} & {:.2f} & {} & {} \\\\ \hline'.format(
# metrics.accuracy_score(y_test, y_pred),
# metrics.f1_score(y_test, y_pred, average='weighted'),
# metrics.roc_auc_score(y_test, y_pred, average='weighted'),
# get_cm(metrics.confusion_matrix(y_test, y_pred)),
# ', '.join(channels)
# ))
if print_incorrectly_predicted:
print('Incorrectly predicted:')
print(pd.DataFrame(y_test[y_pred != y_test]).join(X))
if evaluate_on_all:
y_pred = estimator.predict(X)
y_pred = y_pred.astype('int')
print("Accuracy score: %.2f" % metrics.accuracy_score(y, y_pred))
print('Confusion matrix:\n', metrics.confusion_matrix(y, y_pred))
print('Precision score: ', metrics.precision_score(y, y_pred, average='weighted'))
print('Recall score: ', metrics.recall_score(y, y_pred, average='weighted'))
print('f1 score: ', metrics.f1_score(y, y_pred, average='weighted'))
print('ROC AUC score: ', metrics.roc_auc_score(y, y_pred, average='weighted'))
if print_incorrectly_predicted:
print('Incorrectly predicted:')
print( | pd.DataFrame(y[y_pred != y]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Functions to process raw respondent data from new survey
Convert that to habits
Find top n best matches from 'archetypes' of original survey of 10K people.
Generate habit lists to see overlap with best cluster, unique habits, etc.
Clean habit naming for consisntency.
"""
import sys
sys.path.append("../CommonFunctions")
import pandas as pd
#import pathlib as pl #path library
import params as param
| pd.set_option('display.expand_frame_repr', False) | pandas.set_option |
"""
The :mod:`mlshells.model_selection.search` includes utilities to
optimize hyper-parameters.
:class:`mlshell.model_selection.Optimizer` class proposes unified interface to
arbitrary optimizer. Intended to be used in :class:`mlshell.Workflow` . For new
optimizer formats no need to edit `Workflow` class, just adapt in compliance to
interface.
:class:`mlshell.model_selection.RandomizedSearchOptimizer` contains
:class:`sklearn.model_selection.RandomizedSearchCV` implementation.
:class:`mlshell.model_selection.MockOptimizer` subclass provides efficient
brute force prediction-related parameters as separate optimize step. For
example: classification threshold or scorer function kwargs don`t need whole
pipeline refit to probe.
"""
import copy
import os
import platform
import time
import uuid
import jsbeautifier
import mlshell
import numpy as np
import pandas as pd
import sklearn
import tabulate
__all__ = ['Optimizer', 'RandomizedSearchOptimizer', 'MockOptimizer']
class Optimizer(object):
"""Unified optimizer interface.
Implements interface to access arbitrary optimizer.
Interface: dump_runs, update_best and all underlying optimizer methods.
Attributes
----------
optimizer : :class:`sklearn.model_selection.BaseSearchCV`
Underlying optimizer.
Notes
-----
Calling unspecified methods are redirected to underlying optimizer object.
"""
def __init__(self):
self.optimizer = None
def __hash__(self):
return str(self.optimizer)
def __getattr__(self, name):
"""Redirect unknown methods to optimizer object."""
def wrapper(*args, **kwargs):
getattr(self.optimizer, name)(*args, **kwargs)
return wrapper
def __getstate__(self):
# Allow pickle.
return self.__dict__
def __setstate__(self, d):
# Allow unpickle.
self.__dict__ = d
def update_best(self, prev):
"""Combine results from multi-stage optimization.
The logic of choosing the best run is set here. Currently best hp
combination and corresponding estimator taken from the last stage.
But if any hp brute force in more than one stage, more complicated rule
is required to merge runs.
Parameters
----------
prev : dict
Previous stage ``update_best`` output for some pipeline-data pair.
Initially set to {}. See ``update_best`` output format.
Returns
-------
nxt : dict
Result of merging runs on all optimization stages for some
pipeline-data pair: {
'params': list of dict
List of ``cv_results_['params']`` for all runs in stages.
'best_params_' : dict
Best estimator tuned params from all optimization stages.
'best_estimator_' : :mod:`sklearn` estimator
Best estimator ``optimizer.best_estimator_`` if exist, else
``optimizer.estimator.set_params(**best_params_))`` (
if not 'refit' is True).
'best_score_' : tuple
Best score ``('scorer_id', optimizer.best_score_)`` , where
``scorer_id=str(optimizer.refit)``. If best_score_ is
absent, ``('', float('-inf'))`` used.
}
Notes
-----
:class:`mlshell.Workflow` utilize:
* 'best_estimator_' key to update pipeline in ``objects``.
* 'params' in built-in plotter.
* 'best_score_' in dump/dump_pred for file names.
"""
curr = self.optimizer
best_index_ = getattr(curr, 'best_index_', None)
cv_results_ = getattr(curr, 'cv_results_', None)
if best_index_ is None or cv_results_ is None:
return prev
# Only modifiers.
params = cv_results_['params']
best_params_ = params[best_index_]
best_estimator_ = getattr(curr, 'best_estimator_', None)
if best_estimator_ is None:
# If not 'refit'.
best_estimator_ = curr.estimator.set_params(**best_params_)
params_init = best_estimator_.get_params()
params_full = [{**params_init, **p} for p in params]
best_score_ = getattr(curr, 'best_score_', float('-inf'))
if best_score_ is float('-inf'):
scorer_id = ''
else:
scorer_id = str(getattr(curr, 'refit', ''))
nxt = {
'best_estimator_': best_estimator_,
'best_params_': {**prev.get('best_params_', {}), **best_params_},
'params': [*prev.get('params', []), *params_full],
'best_score_': (scorer_id, best_score_),
}
return nxt
def dump_runs(self, logger, dirpath, pipeline, dataset, **kwargs):
"""Dump results.
Parameters
----------
logger : :class:`logging.Logger`
Logger.
dirpath : str
Absolute path to dump dir.
pipeline : :class:`mlshell.Pipeline`
Pipeline used for optimizer.fit.
dataset : :class:`mlshell.Dataset`
Dataset used for optimizer.fit.
**kwargs : dict
Additional kwargs to pass in low-level dump function.
Notes
-----
Resulted file name ``<timestamp>_runs.csv``. Each row corresponds to
run, columns names:
* 'id' random UUID for run (hp combination).
* All pipeline parameters.
* Grid search output ``runs`` keys.
* Pipeline info: 'pipeline__id', 'pipeline__hash', 'pipeline__type'.
* Dataset info: 'dataset__id', 'dataset__hash'.
Hash could alter when interpreter restarted, because of address has
changed for some underlying function.
"""
runs = copy.deepcopy(self.optimizer.cv_results_)
best_ind = self.optimizer.best_index_
self._pprint(logger, self.optimizer)
self._dump_runs(logger, dirpath, pipeline, dataset, runs, best_ind,
**kwargs)
return None
def _pprint(self, logger, optimizer):
"""Pretty print optimizer results.
Parameters
----------
logger : :class:`logging.Logger`
Logger.
optimizer : :class:`sklearn.model_selection.BaseSearchCV`
Underlying optimizer.
"""
jsb = jsbeautifier.beautify
modifiers = self._find_modifiers(optimizer.cv_results_)
param_modifiers = set(f'param_{i}' for i in modifiers)
best_modifiers = {key: optimizer.best_params_[key] for key in modifiers
if key in optimizer.best_params_}
runs_avg = {
'mean_fit_time': optimizer.cv_results_['mean_fit_time'].mean(),
'mean_score_time': optimizer.cv_results_['mean_score_time'].mean()
}
useful_keys = [key for key in optimizer.cv_results_
if key in param_modifiers
or 'mean_train' in key or 'mean_test' in key]
df = | pd.DataFrame(optimizer.cv_results_) | pandas.DataFrame |
import pandas as pd
import pytest
| pd.set_option("display.max_rows", 500) | pandas.set_option |
import pandas as pd
import numpy as np
from tqdm import tqdm
from lib import config
from lib.logger import Log
logger = Log()
# Historical data columns
_DATE_COLUMN = 'DATE'
_N1 = 'N1'
_N2 = 'N2'
_N3 = 'N3'
_N4 = 'N4'
_N5 = 'N5'
_N6 = 'N6'
# Result columns
_DRAW_COLUMN = 'draw'
_MAX_SUCCESS_COLUMN = 'max_success'
_COMP_COLUMN = 'comp'
_COMP_COLUMN_INDEX = 7
def draw_to_str(draw, sep='-'):
"""Readable representation of a draw."""
nums = np.asarray(draw).tolist()
return sep.join(map(str, nums))
def check_draw(df_historical, draw, sort=True):
"""Check a combination from historical draw."""
df = df_historical.copy()
for i, row in tqdm(enumerate(df.values), total=df.shape[0], desc='historical'):
s_row = set(row[1:7])
s_draw = set(draw)
success = len(s_draw.intersection(s_row))
comp_number = row[_COMP_COLUMN_INDEX]
df.at[i, _DRAW_COLUMN] = draw_to_str(draw)
df.at[i, _MAX_SUCCESS_COLUMN] = int(success)
df.at[i, _COMP_COLUMN] = int(comp_number in s_draw)
return df
def scrutiny(f_test, f_historical, f_out, fmt='csv', success_filter=3, order_date_only=False, verbose=True):
"""Test all numbers in ``f_test`` file with the historical draws and return the matches for each one.
:param f_test: file with the numbers to test, each row for each independent experiment
:type f_test: str
:param f_historical: file with historical draws
:type f_historical: str
:param f_out: file to save the output with results
:type f_out: str
:param fmt: output file format
:type fmt: str ('csv' for CSV format)
:param success_filter: number of matches from witch the success is returned
:type success_filter: int
:param order_date_only: if True only sorted by
:type order_date_only: bool
:return: data frame with all success
:rtype: pandas.DataFrame
"""
config.verbose(verbose)
# Data frame to store the results
df_total = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import sys
import pandas as pd
import numpy as np
# In[3]:
# In[4]:
#file 불러오기
#filepath = sys.argv[1]
#filename = sys.argv[2]
filepath = "/home/data/projects/rda/workspace/rda/files/"
filename = "input3.csv"
data = pd.read_csv(filepath + "/" + filename, encoding='UTF-8')
# In[ ]:
#사용자 지정 parameter
#kmeans
'''
k_clusters = int(sys.argv[3])
k_iter = int(sys.argv[4])
#dbscan
eps = float(sys.argv[5])
min_samples = int(sys.argv[6])
#hierarchy
h_clusters = int(sys.argv[7])
'''
# In[ ]:
k_clusters = 5
k_iter = 300
#dbscan
eps = 0.5
min_samples =3
#hierarchy
h_clusters = 3
#모든 feature에 대해 결측치 갖는 샘플 제거
data_0 =data.dropna(axis=0,how='all')
print(data_0.shape)
#label 값이 결측치인 샘플 제거
data_l =data.loc[data["label"].notnull(), :]
print(data_l.shape)
#50%이상이 결측치인 feature 삭제
data_f =data_l.dropna(axis=1,thresh=data_l.shape[0]/2)
print(data_f.shape)
#나머지는 각 label에 대해서 median imputation 수행
data_na_remove = data_f.fillna(data_f.mean())
print(data_na_remove.shape)
data_na_remove
# In[17]:
print(data_na_remove.shape)
data = data_na_remove.iloc[:100,:5]
X = data_na_remove.iloc[:100,1:5]
Y = data_na_remove.iloc[:100,0] #임의의
data_na_remove["label"].unique()
# In[ ]:
from sklearn.cluster import KMeans, DBSCAN ,AgglomerativeClustering
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics import silhouette_score
ari =[]
nmi =[]
silhouette =[]
#kmeans
kmeans = KMeans(n_clusters= k_clusters,max_iter=k_iter).fit(X)
predict_k = pd.DataFrame(kmeans.predict(X))
predict_k.columns=['predict_kmeans']
#concat
data_k = pd.concat([data,predict_k],axis=1)
#scores
ari.append(adjusted_rand_score(Y,kmeans.predict(X)))
nmi.append(normalized_mutual_info_score(Y,kmeans.predict(X)))
silhouette.append(silhouette_score(X,kmeans.predict(X)))
#dbscan
dbscan = DBSCAN(eps= eps,min_samples= min_samples)
predict_db = pd.DataFrame(dbscan.fit_predict(X))
predict_db.columns=['predict_dbscan']
# concat
data_d = pd.concat([data_k,predict_db],axis=1)
#scores
ari.append(adjusted_rand_score(Y,dbscan.fit_predict(X)))
nmi.append(normalized_mutual_info_score(Y,dbscan.fit_predict(X)))
silhouette.append(silhouette_score(X,dbscan.fit_predict(X)))
# hierarchy
hierarchy = AgglomerativeClustering(n_clusters= h_clusters)
predict_h = pd.DataFrame(hierarchy.fit_predict(X))
predict_h.columns=['predict_hierarchy']
#concat
data_h = pd.concat([data_d,predict_h],axis=1)
#scores
ari.append(adjusted_rand_score(Y,hierarchy.fit_predict(X)))
nmi.append(normalized_mutual_info_score(Y,hierarchy.fit_predict(X)))
silhouette.append(silhouette_score(X,hierarchy.fit_predict(X)))
#data save
#data_h.to_csv('./public/files/cluster_data2_' + filename + '_.csv')
#data_h.to_csv('./cluster_data2_' + filename + '_.csv', mode = "w",encoding='cp949')
#clustering score save
score = pd.concat([pd.Series(ari),pd.Series(nmi), | pd.Series(silhouette) | pandas.Series |
'''Module to assemble custom tensorflow procedures.'''
from dataclasses import dataclass, field
import tensorflow_datasets as tfds
import pandas as pd
from pyspark.sql import functions as F
from pyspark.sql import (
SparkSession,
functions as F,
DataFrame
)
from pyspark.sql.types import *
@dataclass
class DataExtraction:
'''Class to extract data using tensorflow datasets package.
'''
dict_param : dict = field(default_factory=dict)
int_batch_size: int = None
def download_data_batch(self):
'''Download the data if it is not done yet and convert to batch.
'''
# Download or just load the data
ds = tfds.load(**self.dict_param)
# Converts to batch iterable unities
ds_batch = ds.batch(self.int_batch_size)
# Return the batch iterable unities
return ds_batch
def generator_batch_to_pandas(self, ds_batch):
'''Convert the batch data into a pandas DataFrame.'''
# Iterate over each batch
for n, ex in enumerate(ds_batch):
# Converts the batch data to a pandas DataFrame
df = | pd.DataFrame.from_dict(ex['data']) | pandas.DataFrame.from_dict |
import os
import numpy as np
import pandas as pd
import z5py
from mobie import add_segmentation
from mobie.metadata.image_dict import load_image_dict
ROOT = '/g/kreshuk/pape/Work/data/mito_em/data'
RESOLUTION = [.03, .008, .008]
SCALE_FACTORS = [[1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2]]
def compute_object_scores(seg, gt):
from map3d.vol3d_util import seg_iou3d_sorted
ui, uc = np.unique(seg, return_counts=True)
uc = uc[ui > 0]
ui = ui[ui > 0]
pred_score = np.ones((len(ui), 2), int)
pred_score[:, 0] = ui
pred_score[:, 1] = uc
thres = [5e3, 1.5e4]
area_rng = np.zeros((len(thres) + 2, 2), int)
area_rng[0, 1] = 1e10
area_rng[-1, 1] = 1e10
area_rng[2:, 0] = thres
area_rng[1:-1, 1] = thres
result_p, result_fn, pred_score_sorted = seg_iou3d_sorted(seg, gt, pred_score, area_rng)
seg_ids = result_p[:, 0].astype('uint32')
ious = result_p[:, 4]
best_scores = []
worst_scores = []
unique_seg_ids = np.unique(seg_ids)
for seg_id in unique_seg_ids:
this_scores = ious[seg_ids == seg_id]
best_scores.append(np.max(this_scores))
worst_scores.append(np.max(this_scores))
return unique_seg_ids, np.array(best_scores), np.array(worst_scores)
def make_score_table(checkpoint, sample, seg_name):
print("make score table ...")
key = 'setup0/timepoint0/s0'
checkpoint_name = os.path.split(checkpoint)[1]
mobie_name = f'{checkpoint_name}_{seg_name}'
path = os.path.join(ROOT, sample, 'images', 'local', f'{mobie_name}.n5')
print("loading segmentation")
with z5py.File(path, 'r') as f:
ds = f[key]
ds.n_threads = 8
seg = ds[:]
print("loading labels")
gt_path = os.path.join(ROOT, sample, 'images', 'local', 'em-mitos.n5')
with z5py.File(gt_path, 'r') as f:
ds = f[key]
ds.n_threads = 8
gt = ds[:]
unique_seg_ids, best_scores, worst_scores = compute_object_scores(seg, gt)
data = np.concatenate([
unique_seg_ids[:, None],
best_scores[:, None],
worst_scores[:, None]
], axis=1)
columns = ['label_id', 'best_score', 'worst_score']
tab = | pd.DataFrame(data, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pytest
from ..testing_utils import make_ecommerce_entityset
from featuretools import Timedelta
from featuretools.computational_backends import PandasBackend
from featuretools.primitives import (
Absolute,
Add,
Count,
CumCount,
CumMax,
CumMean,
CumMin,
CumSum,
Day,
Diff,
DirectFeature,
Divide,
Equals,
Feature,
GreaterThan,
GreaterThanEqualTo,
Haversine,
Hour,
IdentityFeature,
IsIn,
IsNull,
Latitude,
LessThan,
LessThanEqualTo,
Longitude,
Mod,
Mode,
Multiply,
Negate,
Not,
NotEquals,
NumCharacters,
NumWords,
Percentile,
Subtract,
Sum,
get_transform_primitives,
make_trans_primitive
)
from featuretools.synthesis.deep_feature_synthesis import match
from featuretools.variable_types import Boolean, Datetime, Numeric, Variable
# some tests change the entityset values, so we have to create it fresh
# for each test (rather than setting scope='module')
@pytest.fixture
def es():
return make_ecommerce_entityset()
@pytest.fixture(scope='module')
def int_es():
return make_ecommerce_entityset(with_integer_time_index=True)
def test_make_trans_feat(es):
f = Hour(es['log']['datetime'])
pandas_backend = PandasBackend(es, [f])
df = pandas_backend.calculate_all_features(instance_ids=[0],
time_last=None)
v = df[f.get_name()][0]
assert v == 10
def test_diff(es):
value = IdentityFeature(es['log']['value'])
customer_id_feat = \
DirectFeature(es['sessions']['customer_id'],
child_entity=es['log'])
diff1 = Diff(value, es['log']['session_id'])
diff2 = Diff(value, customer_id_feat)
pandas_backend = PandasBackend(es, [diff1, diff2])
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
val1 = df[diff1.get_name()].values.tolist()
val2 = df[diff2.get_name()].values.tolist()
correct_vals1 = [
np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7
]
correct_vals2 = [np.nan, 5, 5, 5, 5, -20, 1, 1, 1, -3, np.nan, 5, -5, 7, 7]
for i, v in enumerate(val1):
v1 = val1[i]
if np.isnan(v1):
assert (np.isnan(correct_vals1[i]))
else:
assert v1 == correct_vals1[i]
v2 = val2[i]
if np.isnan(v2):
assert (np.isnan(correct_vals2[i]))
else:
assert v2 == correct_vals2[i]
def test_diff_single_value(es):
diff = Diff(es['stores']['num_square_feet'], es['stores'][u'région_id'])
pandas_backend = PandasBackend(es, [diff])
df = pandas_backend.calculate_all_features(instance_ids=[5],
time_last=None)
assert df.shape[0] == 1
assert df[diff.get_name()].dropna().shape[0] == 0
def test_compare_of_identity(es):
to_test = [(Equals, [False, False, True, False]),
(NotEquals, [True, True, False, True]),
(LessThan, [True, True, False, False]),
(LessThanEqualTo, [True, True, True, False]),
(GreaterThan, [False, False, False, True]),
(GreaterThanEqualTo, [False, False, True, True])]
features = []
for test in to_test:
features.append(test[0](es['log']['value'], 10))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_direct(es):
log_rating = DirectFeature(es['products']['rating'],
child_entity=es['log'])
to_test = [(Equals, [False, False, False, False]),
(NotEquals, [True, True, True, True]),
(LessThan, [False, False, False, True]),
(LessThanEqualTo, [False, False, False, True]),
(GreaterThan, [True, True, True, False]),
(GreaterThanEqualTo, [True, True, True, False])]
features = []
for test in to_test:
features.append(test[0](log_rating, 4.5))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_transform(es):
day = Day(es['log']['datetime'])
to_test = [(Equals, [False, True]),
(NotEquals, [True, False]),
(LessThan, [True, False]),
(LessThanEqualTo, [True, True]),
(GreaterThan, [False, False]),
(GreaterThanEqualTo, [False, True])]
features = []
for test in to_test:
features.append(test[0](day, 10))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 14],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_of_agg(es):
count_logs = Count(es['log']['id'],
parent_entity=es['sessions'])
to_test = [(Equals, [False, False, False, True]),
(NotEquals, [True, True, True, False]),
(LessThan, [False, False, True, False]),
(LessThanEqualTo, [False, False, True, True]),
(GreaterThan, [True, True, False, False]),
(GreaterThanEqualTo, [True, True, False, True])]
features = []
for test in to_test:
features.append(test[0](count_logs, 2))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
def test_compare_all_nans(es):
nan_feat = Mode(es['log']['product_id'], es['sessions'])
compare = nan_feat == 'brown bag'
# before all data
time_last = pd.Timestamp('1/1/1993')
pandas_backend = PandasBackend(es, [nan_feat, compare])
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=time_last)
assert df[nan_feat.get_name()].dropna().shape[0] == 0
assert not df[compare.get_name()].any()
def test_arithmetic_of_val(es):
to_test = [(Add, [2.0, 7.0, 12.0, 17.0], [2.0, 7.0, 12.0, 17.0]),
(Subtract, [-2.0, 3.0, 8.0, 13.0], [2.0, -3.0, -8.0, -13.0]),
(Multiply, [0, 10, 20, 30], [0, 10, 20, 30]),
(Divide, [0, 2.5, 5, 7.5], [np.inf, 0.4, 0.2, 2 / 15.0],
[np.nan, np.inf, np.inf, np.inf])]
features = []
logs = es['log']
for test in to_test:
features.append(test[0](logs['value'], 2))
features.append(test[0](2, logs['value']))
features.append(Divide(logs['value'], 0))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[2 * i].get_name()].values.tolist()
assert v == test[1]
v = df[features[2 * i + 1].get_name()].values.tolist()
assert v == test[2]
test = to_test[-1][-1]
v = df[features[-1].get_name()].values.tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1:]
def test_arithmetic_two_vals_fails(es):
with pytest.raises(ValueError):
Add(2, 2)
def test_arithmetic_of_identity(es):
logs = es['log']
to_test = [(Add, [0., 7., 14., 21.]),
(Subtract, [0, 3, 6, 9]),
(Multiply, [0, 10, 40, 90]),
(Divide, [np.nan, 2.5, 2.5, 2.5])]
features = []
for test in to_test:
features.append(test[0](logs['value'], logs['value_2']))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2, 3],
time_last=None)
for i, test in enumerate(to_test[:-1]):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
i, test = 3, to_test[-1]
v = df[features[i].get_name()].values.tolist()
assert (np.isnan(v[0]))
assert v[1:] == test[1][1:]
def test_arithmetic_of_direct(es):
rating = es['products']['rating']
log_rating = DirectFeature(rating,
child_entity=es['log'])
customer_age = es['customers']['age']
session_age = DirectFeature(customer_age,
child_entity=es['sessions'])
log_age = DirectFeature(session_age,
child_entity=es['log'])
to_test = [(Add, [38, 37, 37.5, 37.5]),
(Subtract, [28, 29, 28.5, 28.5]),
(Multiply, [165, 132, 148.5, 148.5]),
(Divide, [6.6, 8.25, 22. / 3, 22. / 3])]
features = []
for test in to_test:
features.append(test[0](log_age, log_rating))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 3, 5, 7],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
# P TODO: rewrite this test
def test_arithmetic_of_transform(es):
diff1 = Diff(IdentityFeature(es['log']['value']),
IdentityFeature(es['log']['product_id']))
diff2 = Diff(IdentityFeature(es['log']['value_2']),
IdentityFeature(es['log']['product_id']))
to_test = [(Add, [np.nan, 14., -7., 3.]),
(Subtract, [np.nan, 6., -3., 1.]),
(Multiply, [np.nan, 40., 10., 2.]),
(Divide, [np.nan, 2.5, 2.5, 2.])]
features = []
for test in to_test:
features.append(test[0](diff1, diff2))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 2, 11, 13],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert np.isnan(v.pop(0))
assert np.isnan(test[1].pop(0))
assert v == test[1]
def test_not_feature(es):
likes_ice_cream = es['customers']['loves_ice_cream']
not_feat = Not(likes_ice_cream)
features = [not_feat]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1],
time_last=None)
v = df[not_feat.get_name()].values
assert not v[0]
assert v[1]
def test_arithmetic_of_agg(es):
customer_id_feat = es['customers']['id']
store_id_feat = es['stores']['id']
count_customer = Count(customer_id_feat,
parent_entity=es[u'régions'])
count_stores = Count(store_id_feat,
parent_entity=es[u'régions'])
to_test = [(Add, [6, 2]),
(Subtract, [0, -2]),
(Multiply, [9, 0]),
(Divide, [1, 0])]
features = []
for test in to_test:
features.append(test[0](count_customer, count_stores))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(
instance_ids=['United States', 'Mexico'], time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test[1]
# TODO latlong is a string in entityset. Asserts in test_latlong fail
# def latlong_unstringify(latlong):
# lat = float(latlong.split(", ")[0].replace("(", ""))
# lon = float(latlong.split(", ")[1].replace(")", ""))
# return (lat, lon)
def test_latlong(es):
log_latlong_feat = es['log']['latlong']
latitude = Latitude(log_latlong_feat)
longitude = Longitude(log_latlong_feat)
features = [latitude, longitude]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
latvalues = df[latitude.get_name()].values
lonvalues = df[longitude.get_name()].values
assert len(latvalues) == 15
assert len(lonvalues) == 15
real_lats = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
real_lons = [0, 2, 4, 6, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6]
for i, v, in enumerate(real_lats):
assert v == latvalues[i]
for i, v, in enumerate(real_lons):
assert v == lonvalues[i]
def test_haversine(es):
log_latlong_feat = es['log']['latlong']
log_latlong_feat2 = es['log']['latlong2']
haversine = Haversine(log_latlong_feat, log_latlong_feat2)
features = [haversine]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
values = df[haversine.get_name()].values
real = [0., 524.15585776, 1043.00845747, 1551.12130243,
2042.79840241, 0., 137.86000883, 275.59396684,
413.07563177, 0., 0., 524.15585776,
0., 739.93819145, 1464.27975511]
assert len(values) == 15
for i, v in enumerate(real):
assert v - values[i] < .0001
def test_cum_sum(es):
log_value_feat = es['log']['value']
cum_sum = CumSum(log_value_feat, es['log']['session_id'])
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 50, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_min(es):
log_value_feat = es['log']['value']
cum_min = CumMin(log_value_feat, es['log']['session_id'])
features = [cum_min]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_min.get_name()].values
assert len(cvalues) == 15
cum_min_values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i, v in enumerate(cum_min_values):
assert v == cvalues[i]
def test_cum_max(es):
log_value_feat = es['log']['value']
cum_max = CumMax(log_value_feat, es['log']['session_id'])
features = [cum_max]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_max.get_name()].values
assert len(cvalues) == 15
cum_max_values = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
for i, v in enumerate(cum_max_values):
assert v == cvalues[i]
def test_cum_sum_use_previous(es):
log_value_feat = es['log']['value']
cum_sum = CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 45, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_use_previous_integer_time(int_es):
es = int_es
log_value_feat = es['log']['value']
with pytest.raises(AssertionError):
CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'm'))
cum_sum = CumSum(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 45, 0, 1, 3, 6, 0, 0, 5, 0, 7, 21]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat,
where=compare_feat)
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 50, 50, 50, 50, 50, 50,
0, 5, 5, 12, 26]
for i, v in enumerate(cum_sum_values):
if not np.isnan(v):
assert v == cvalues[i]
else:
assert (np.isnan(cvalues[i]))
def test_cum_sum_use_previous_and_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
# todo should this be cummean?
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat,
where=compare_feat,
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_sum_values = [0, 5, 15, 30, 45, 45, 45, 45, 45, 45,
0, 5, 5, 12, 26]
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_sum_group_on_nan(es):
log_value_feat = es['log']['value']
es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +
['toothpaste'] * 3 + ['brown bag'] * 2 +
['shoes'] +
[np.nan] * 4 +
['coke_zero'] * 2)
cum_sum = CumSum(log_value_feat, es['log']['product_id'])
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15,
15, 35,
0, 1, 3,
3, 3,
0,
np.nan, np.nan, np.nan, np.nan]
for i, v in enumerate(cum_sum_values):
if np.isnan(v):
assert (np.isnan(cvalues[i]))
else:
assert v == cvalues[i]
def test_cum_sum_use_previous_group_on_nan(es):
# TODO: Figure out how to test where `df`
# in pd_rolling get_function() has multiindex
log_value_feat = es['log']['value']
es['log'].df['product_id'] = (['coke zero'] * 3 + ['car'] * 2 +
['toothpaste'] * 3 + ['brown bag'] * 2 +
['shoes'] +
[np.nan] * 4 +
['coke_zero'] * 2)
cum_sum = CumSum(log_value_feat,
es['log']['product_id'],
es["log"]["datetime"],
use_previous=Timedelta(40, 'seconds'))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15,
15, 35,
0, 1, 3,
3, 0,
0,
np.nan, np.nan, np.nan, np.nan]
for i, v in enumerate(cum_sum_values):
if np.isnan(v):
assert (np.isnan(cvalues[i]))
else:
assert v == cvalues[i]
def test_cum_sum_use_previous_and_where_absolute(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_sum = CumSum(log_value_feat, dfeat, es["log"]["datetime"],
where=compare_feat,
use_previous=Timedelta(40, 'seconds'))
features = [cum_sum]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_sum_values = [0, 5, 15, 30, 50, 0, 0, 0, 0, 0,
0, 5, 0, 7, 21]
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_mean(es):
log_value_feat = es['log']['value']
cum_mean = CumMean(log_value_feat, es['log']['session_id'])
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 2.5, 5, 7.5, 10, 0, .5, 1, 1.5, 0, 0, 2.5, 0, 3.5, 7]
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_mean_use_previous(es):
log_value_feat = es['log']['value']
cum_mean = CumMean(log_value_feat, es['log']['session_id'],
use_previous=Timedelta(3, 'observations',
entity=es['log']))
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 2.5, 5, 10, 15, 0, .5, 1, 2, 0, 0, 2.5, 0, 3.5, 7]
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_mean_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_mean = CumMean(log_value_feat, dfeat,
where=compare_feat)
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 5, 7.5, 10, 12.5, 12.5, 12.5, 12.5, 12.5, 12.5,
0, 5, 5, 6, 26. / 3]
for i, v in enumerate(cum_mean_values):
if not np.isnan(v):
assert v == cvalues[i]
else:
assert (np.isnan(cvalues[i]))
def test_cum_mean_use_previous_and_where(es):
log_value_feat = es['log']['value']
compare_feat = GreaterThan(log_value_feat, 3)
# todo should this be cummean?
dfeat = Feature(es['sessions']['customer_id'], es['log'])
cum_mean = CumMean(log_value_feat, dfeat,
where=compare_feat,
use_previous=Timedelta(2, 'observations',
entity=es['log']))
features = [cum_mean]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cum_mean_values = [0, 5, 7.5, 12.5, 17.5, 17.5, 17.5, 17.5, 17.5, 17.5,
0, 5, 5, 6, 10.5]
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_count(es):
log_id_feat = es['log']['id']
cum_count = CumCount(log_id_feat, es['log']['session_id'])
features = [cum_count]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
cvalues = df[cum_count.get_name()].values
assert len(cvalues) == 15
cum_count_values = [1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 1, 2, 1, 2, 3]
for i, v in enumerate(cum_count_values):
assert v == cvalues[i]
def test_text_primitives(es):
words = NumWords(es['log']['comments'])
chars = NumCharacters(es['log']['comments'])
features = [words, chars]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=range(15),
time_last=None)
word_counts = [514, 3, 3, 644, 1268, 1269, 177, 172, 79,
240, 1239, 3, 3, 3, 3]
char_counts = [3392, 10, 10, 4116, 7961, 7580, 992, 957,
437, 1325, 6322, 10, 10, 10, 10]
word_values = df[words.get_name()].values
char_values = df[chars.get_name()].values
assert len(word_values) == 15
for i, v in enumerate(word_values):
assert v == word_counts[i]
for i, v in enumerate(char_values):
assert v == char_counts[i]
def test_overrides(es):
value = Feature(es['log']['value'])
value2 = Feature(es['log']['value_2'])
feats = [Add, Subtract, Multiply, Divide]
compare_ops = [GreaterThan, LessThan, Equals, NotEquals,
GreaterThanEqualTo, LessThanEqualTo]
assert Negate(value).hash() == (-value).hash()
compares = [(value, value),
(value, value2),
(value2, 2)]
overrides = [
value + value,
value - value,
value * value,
value / value,
value > value,
value < value,
value == value,
value != value,
value >= value,
value <= value,
value + value2,
value - value2,
value * value2,
value / value2,
value > value2,
value < value2,
value == value2,
value != value2,
value >= value2,
value <= value2,
value2 + 2,
value2 - 2,
value2 * 2,
value2 / 2,
value2 > 2,
value2 < 2,
value2 == 2,
value2 != 2,
value2 >= 2,
value2 <= 2,
]
i = 0
for left, right in compares:
for feat in feats:
f = feat(left, right)
o = overrides[i]
assert o.hash() == f.hash()
i += 1
for compare_op in compare_ops:
f = compare_op(left, right)
o = overrides[i]
assert o.hash() == f.hash()
i += 1
our_reverse_overrides = [
2 + value2,
2 - value2,
2 * value2,
2 / value2]
i = 0
for feat in feats:
if feat != Mod:
f = feat(2, value2)
o = our_reverse_overrides[i]
assert o.hash() == f.hash()
i += 1
python_reverse_overrides = [
2 < value2,
2 > value2,
2 == value2,
2 != value2,
2 <= value2,
2 >= value2]
i = 0
for compare_op in compare_ops:
f = compare_op(value2, 2)
o = python_reverse_overrides[i]
assert o.hash() == f.hash()
i += 1
def test_override_boolean(es):
count = Count(es['log']['value'], es['sessions'])
count_lo = GreaterThan(count, 1)
count_hi = LessThan(count, 10)
to_test = [[True, True, True],
[True, True, False],
[False, False, True]]
features = []
features.append(count_lo.OR(count_hi))
features.append(count_lo.AND(count_hi))
features.append(~(count_lo.AND(count_hi)))
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test
def test_override_cmp_from_variable(es):
count_lo = IdentityFeature(es['log']['value']) > 1
to_test = [False, True, True]
features = [count_lo]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
v = df[count_lo.get_name()].values.tolist()
for i, test in enumerate(to_test):
assert v[i] == test
def test_override_cmp(es):
count = Count(es['log']['value'], es['sessions'])
_sum = Sum(es['log']['value'], es['sessions'])
gt_lo = count > 1
gt_other = count > _sum
ge_lo = count >= 1
ge_other = count >= _sum
lt_hi = count < 10
lt_other = count < _sum
le_hi = count <= 10
le_other = count <= _sum
ne_lo = count != 1
ne_other = count != _sum
to_test = [[True, True, False],
[False, False, True],
[True, True, True],
[False, False, True],
[True, True, True],
[True, True, False],
[True, True, True],
[True, True, False]]
features = [gt_lo, gt_other, ge_lo, ge_other, lt_hi,
lt_other, le_hi, le_other, ne_lo, ne_other]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(instance_ids=[0, 1, 2],
time_last=None)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].values.tolist()
assert v == test
def test_isin_feat(es):
isin = IsIn(es['log']['product_id'],
list_of_outputs=["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_other_syntax(es):
isin = Feature(es['log']['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_other_syntax_int(es):
isin = Feature(es['log']['value']).isin([5, 10])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isin_feat_custom(es):
def pd_is_in(array, list_of_outputs=None):
if list_of_outputs is None:
list_of_outputs = []
return pd.Series(array).isin(list_of_outputs)
def isin_generate_name(self):
return u"%s.isin(%s)" % (self.base_features[0].get_name(),
str(self.kwargs['list_of_outputs']))
IsIn = make_trans_primitive(
pd_is_in,
[Variable],
Boolean,
name="is_in",
description="For each value of the base feature, checks whether it is "
"in a list that is provided.",
cls_attributes={"generate_name": isin_generate_name})
isin = IsIn(es['log']['product_id'],
list_of_outputs=["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
isin = Feature(es['log']['product_id']).isin(["toothpaste", "coke zero"])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].values.tolist()
assert true == v
isin = Feature(es['log']['value']).isin([5, 10])
features = [isin]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(8), None)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].values.tolist()
assert true == v
def test_isnull_feat(es):
value = IdentityFeature(es['log']['value'])
diff = Diff(value, es['log']['session_id'])
isnull = IsNull(diff)
features = [isnull]
pandas_backend = PandasBackend(es, features)
df = pandas_backend.calculate_all_features(range(15), None)
# correct_vals_diff = [
# np.nan, 5, 5, 5, 5, np.nan, 1, 1, 1, np.nan, np.nan, 5, np.nan, 7, 7]
correct_vals = [True, False, False, False, False, True, False, False,
False, True, True, False, True, False, False]
values = df[isnull.get_name()].values.tolist()
assert correct_vals == values
def test_init_and_name(es):
from featuretools import calculate_feature_matrix
log = es['log']
features = [Feature(v) for v in log.variables] +\
[GreaterThan(Feature(es["products"]["rating"], es["log"]), 2.5)]
# Add Timedelta feature
features.append(pd.Timestamp.now() - Feature(log['datetime']))
for transform_prim in get_transform_primitives().values():
# use the input_types matching function from DFS
input_types = transform_prim.input_types
if type(input_types[0]) == list:
matching_inputs = match(input_types[0], features)
else:
matching_inputs = match(input_types, features)
if len(matching_inputs) == 0:
raise Exception(
"Transform Primitive %s not tested" % transform_prim.name)
for s in matching_inputs:
instance = transform_prim(*s)
# try to get name and calculate
instance.get_name()
calculate_feature_matrix([instance], entityset=es).head(5)
def test_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
pandas_backend = PandasBackend(es, [p])
df = pandas_backend.calculate_all_features(range(10, 17), None)
true = es['log'].df[v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_dependent_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
p2 = Percentile(p - 1)
pandas_backend = PandasBackend(es, [p, p2])
df = pandas_backend.calculate_all_features(range(10, 17), None)
true = es['log'].df[v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_agg_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
agg = Sum(p, es['sessions'])
pandas_backend = PandasBackend(es, [agg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby('session_id')['percentile'].sum()[[0, 1]]
for t, a in zip(true_p.values, df[agg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg_percentile(es):
v = Feature(es['log']['value'])
p = Percentile(v)
agg = Sum(p, es['sessions'])
pagg = Percentile(agg)
pandas_backend = PandasBackend(es, [pagg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
log_vals['percentile'] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby('session_id')['percentile'].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg(es):
v = Feature(es['log']['value'])
agg = Sum(v, es['sessions'])
pagg = Percentile(agg)
pandas_backend = PandasBackend(es, [pagg])
df = pandas_backend.calculate_all_features([0, 1], None)
log_vals = es['log'].df[[v.get_name(), 'session_id']]
true_p = log_vals.groupby('session_id')[v.get_name()].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_direct_percentile(es):
v = Feature(es['customers']['age'])
p = Percentile(v)
d = Feature(p, es['sessions'])
pandas_backend = PandasBackend(es, [d])
df = pandas_backend.calculate_all_features([0, 1], None)
cust_vals = es['customers'].df[[v.get_name()]]
cust_vals['percentile'] = cust_vals[v.get_name()].rank(pct=True)
true_p = cust_vals['percentile'].loc[[0, 0]]
for t, a in zip(true_p.values, df[d.get_name()].values):
assert (pd.isnull(t) and | pd.isnull(a) | pandas.isnull |
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
""" Module for motif activity prediction """
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
warnings.filterwarnings("ignore", message="sklearn.externals.joblib is deprecated")
import os
import sys
try:
from itertools import izip
except ImportError:
izip = zip
import logging
import pandas as pd
import numpy as np
from scipy.stats import hypergeom, mannwhitneyu
from statsmodels.stats.multitest import multipletests
from tqdm.auto import tqdm
# scikit-learn
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import MultiTaskLassoCV, BayesianRidge
from sklearn.multioutput import MultiOutputRegressor
from sklearn.preprocessing import scale, LabelEncoder
from sklearn.svm import LinearSVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import xgboost
from gimmemotifs import __version__
from gimmemotifs.motif import read_motifs
from gimmemotifs.scanner import scan_regionfile_to_table
from gimmemotifs.config import MotifConfig
from gimmemotifs.utils import pfmfile_location
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
logger = logging.getLogger("gimme.maelstrom")
class Moap(object):
"""Moap base class.
Motif activity prediction.
"""
_predictors = {}
name = None
@classmethod
def create(cls, name, ncpus=None):
"""Create a Moap instance based on the predictor name.
Parameters
----------
name : str
Name of the predictor (eg. Xgboost, BayesianRidge, ...)
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Returns
-------
moap : Moap instance
moap instance.
"""
try:
return cls._predictors[name.lower()](ncpus=ncpus)
except KeyError:
raise Exception("Unknown class")
@classmethod
def register_predictor(cls, name):
"""Register method to keep list of predictors."""
def decorator(subclass):
"""Register as decorator function."""
cls._predictors[name.lower()] = subclass
subclass.name = name.lower()
return subclass
return decorator
@classmethod
def list_predictors(self):
"""List available predictors."""
return list(self._predictors.keys())
@classmethod
def list_classification_predictors(self):
"""List available classification predictors."""
preds = [self.create(x) for x in self._predictors.keys()]
return [x.name for x in preds if x.ptype == "classification"]
@classmethod
def list_regression_predictors(self):
"""List available regression predictors."""
preds = [self.create(x) for x in self._predictors.keys()]
return [x.name for x in preds if x.ptype == "regression"]
register_predictor = Moap.register_predictor
@register_predictor("BayesianRidge")
class BayesianRidgeMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using Bayesian Ridge Regression.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Coefficients of the regression model.
"""
self.act_description = "activity values: coefficients of the" "regression model"
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting BayesianRidge")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
logger.debug("Scaling motif scores")
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# logger.debug("Scaling y")
# Normalize across samples and features
# y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
model = BayesianRidge()
logger.debug("Fitting model")
coefs = []
for col in tqdm(y.columns, total=len(y.columns)):
model.fit(X, y[col])
coefs.append(model.coef_)
logger.info("Done")
self.act_ = pd.DataFrame(coefs, columns=X.columns, index=y.columns).T
@register_predictor("Xgboost")
class XgboostRegressionMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using XGBoost.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Feature scores.
"""
self.act_description = "activity values: feature scores from" "fitted model"
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting XGBoostRegression")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# Normalize across samples and features
# y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
# Define model
xgb = xgboost.XGBRegressor(
n_estimators=500,
learning_rate=0.01,
nthread=self.ncpus,
min_child_weight=2,
max_depth=3,
subsample=0.8,
colsample_bytree=0.8,
objective="reg:squarederror",
)
logger.debug("xgb: 0%")
self.act_ = pd.DataFrame(index=X.columns)
# Fit model
for i, col in enumerate(tqdm(y.columns)):
xgb.fit(X, y[col].values)
d = xgb.get_booster().get_fscore()
self.act_[col] = [d.get(m, 0) for m in X.columns]
for motif in self.act_.index:
if self.act_.loc[motif, col] != 0:
high = df_y.loc[
df_X[motif] >= df_X[motif].quantile(0.75), col
].mean()
low = df_y.loc[
df_X[motif] <= df_X[motif].quantile(0.25), col
].mean()
if low > high:
self.act_.loc[motif, col] *= -1
logger.debug("..{}%".format(int(float(i + 1) / len(y.columns) * 100)))
logger.info("Done")
@register_predictor("MWU")
class MWUMoap(Moap):
def __init__(self, *args, **kwargs):
"""Predict motif activities using Mann-Whitney U p-value
This method compares the motif score distribution of each
cluster versus the motif score distribution of all other
clusters.
Parameters
----------
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
-log10 of the Mann-Whitney U p-value, corrected for multiple
testing using the Benjamini-Hochberg correction
"""
self.act_ = None
self.act_description = (
"activity values: BH-corrected " "-log10 Mann-Whitney U p-value"
)
self.pref_table = "score"
self.supported_tables = ["score"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting MWU")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
# calculate Mann-Whitney U p-values
pvals = []
clusters = df_y[df_y.columns[0]].unique()
for cluster in clusters:
pos = df_X[df_y.iloc[:, 0] == cluster]
neg = df_X[df_y.iloc[:, 0] != cluster]
p = []
for m in pos:
try:
p.append(mannwhitneyu(pos[m], neg[m], alternative="greater")[1])
except Exception as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("motif {} failed, setting to p = 1\n".format(m))
p.append(1)
pvals.append(p)
# correct for multipe testing
pvals = np.array(pvals)
fpr = multipletests(pvals.flatten(), method="fdr_bh")[1].reshape(pvals.shape)
# create output DataFrame
self.act_ = pd.DataFrame(-np.log10(fpr.T), columns=clusters, index=df_X.columns)
logger.info("Done")
@register_predictor("Hypergeom")
class HypergeomMoap(Moap):
def __init__(self, *args, **kwargs):
"""Predict motif activities using hypergeometric p-value
Parameters
----------
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
-log10 of the hypergeometric p-value, corrected for multiple
testing using the Benjamini-Hochberg correction
"""
self.act_ = None
self.act_description = (
"activity values: -log10-transformed, BH-corrected "
"hypergeometric p-values"
)
self.pref_table = "count"
self.supported_tables = ["count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting Hypergeom")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
if set(df_X.dtypes) != set([np.dtype(int)]):
raise ValueError("need motif counts, not scores")
# calculate hypergeometric p-values
pvals = []
clusters = df_y[df_y.columns[0]].unique()
M = df_X.shape[0]
for cluster in clusters:
pos = df_X[df_y.iloc[:, 0] == cluster]
neg = df_X[df_y.iloc[:, 0] != cluster]
pos_true = (pos > 0).sum(0)
pos_false = (pos == 0).sum(0)
neg_true = (neg > 0).sum(0)
p = []
for pt, pf, nt in zip(pos_true, pos_false, neg_true):
n = pt + nt
N = pt + pf
x = pt - 1
p.append(hypergeom.sf(x, M, n, N))
pvals.append(p)
# correct for multipe testing
pvals = np.array(pvals)
fpr = multipletests(pvals.flatten(), method="fdr_bh")[1].reshape(pvals.shape)
# create output DataFrame
self.act_ = pd.DataFrame(-np.log10(fpr.T), columns=clusters, index=df_X.columns)
logger.info("Done")
@register_predictor("RF")
class RFMoap(Moap):
def __init__(self, ncpus=None):
"""Predict motif activities using a random forest classifier
Parameters
----------
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
feature importances from the model
"""
self.act_ = None
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.act_description = (
"activity values: feature importances " "from fitted Random Forest model"
)
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting RF")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
le = LabelEncoder()
y = le.fit_transform(df_y.iloc[:, 0].values)
clf = RandomForestClassifier(n_estimators=100, n_jobs=self.ncpus)
# Multiclass
if len(le.classes_) > 2:
orc = OneVsRestClassifier(clf)
orc.fit(df_X.values, y)
importances = np.array([c.feature_importances_ for c in orc.estimators_]).T
else: # Only two classes
clf.fit(df_X.values, y)
importances = np.array(
[clf.feature_importances_, clf.feature_importances_]
).T
for i, _ in enumerate(le.classes_):
diff = df_X.loc[y == i].quantile(q=0.75) - df_X.loc[y != i].quantile(q=0.75)
sign = (diff >= 0) * 2 - 1
importances[:, i] *= sign
# create output DataFrame
self.act_ = pd.DataFrame(
importances,
columns=le.inverse_transform(range(len(le.classes_))),
index=df_X.columns,
)
logger.info("Done")
@register_predictor("MultiTaskLasso")
class MultiTaskLassoMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using MultiTaskLasso.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Coefficients of the regression model.
"""
self.act_description = "activity values: coefficients of the" "regression model"
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting MultiTaskLasso")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
logger.debug("Scaling motif scores")
# Scale motif scores
df_X.loc[:, :] = scale(df_X, axis=0)
# logger.debug("Scaling y")
# Normalize across samples and features
# y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
model = Pipeline(
[
("scale", StandardScaler()),
(
"reg",
MultiTaskLassoCV(
fit_intercept=False, n_alphas=20, n_jobs=self.ncpus
),
),
]
)
logger.debug("Fitting model")
model.fit(df_X, df_y)
logger.info("Done")
self.act_ = pd.DataFrame(
model.steps[1][1].coef_, index=y.columns, columns=X.columns
).T
def predict(self, df_X):
return df_X.dot(self.act_.loc[df_X.columns])
@register_predictor("SVR")
class SVRMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using Support Vector Regression.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
SVR weights.
"""
self.act_description = "activity values: SVR weights"
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting SVR")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
logger.debug("Scaling motif scores")
# Scale motif scores
df_X.loc[:, :] = scale(df_X, axis=0)
# logger.debug("Scaling y")
# Normalize across samples and features
# y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
self.columns = df_y.columns
X = df_X.loc[y.index]
clf = LinearSVR()
self.model = MultiOutputRegressor(clf, n_jobs=1)
logger.debug("Fitting model")
self.model.fit(df_X, df_y)
logger.info("Done")
self.act_ = pd.DataFrame(
{c: e.coef_ for c, e in zip(df_y.columns, self.model.estimators_)},
index=X.columns,
)
def predict(self, df_X):
# print(self.model.predict(df_X) )
return pd.DataFrame(
self.model.predict(df_X), index=df_X.index, columns=self.columns
)
def moap(
inputfile,
method="hypergeom",
scoring=None,
outfile=None,
motiffile=None,
pfmfile=None,
genome=None,
fpr=0.01,
ncpus=None,
subsample=None,
zscore=True,
gc=True,
):
"""Run a single motif activity prediction algorithm.
Parameters
----------
inputfile : str
:1File with regions (chr:start-end) in first column and either cluster
name in second column or a table with values.
method : str, optional
Motif activity method to use. Any of 'hypergeom', 'lasso',
'bayesianridge',
'rf', 'xgboost'. Default is 'hypergeom'.
scoring: str, optional
Either 'score' or 'count'
outfile : str, optional
Name of outputfile to save the fitted activity values.
motiffile : str, optional
Table with motif scan results. First column should be exactly the same
regions as in the inputfile.
pfmfile : str, optional
File with motifs in pwm format. Required when motiffile is not
supplied.
genome : str, optional
Genome name, as indexed by gimme. Required when motiffile is not
supplied
fpr : float, optional
FPR for motif scanning
ncpus : int, optional
Number of threads to use. Default is the number specified in the config.
zscore : bool, optional
Use z-score normalized motif scores.
gc : bool optional
Use GC% bins for z-score.
Returns
-------
pandas DataFrame with motif activity
"""
if scoring and scoring not in ["score", "count"]:
raise ValueError("valid values are 'score' and 'count'")
if inputfile.endswith("feather"):
df = | pd.read_feather(inputfile) | pandas.read_feather |
import numpy as np
import pandas as pd
import pickle
from sklearn.feature_selection import SelectKBest,f_regression
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
from sklearn import metrics
###########################
# Folder Name Setting
###########################
folder = 'J:/DATAMINING/KAGGLE/MLSP_BirdClassification/'
essential_folder = folder+'essential_data/'
supplemental_folder = folder+'supplemental_data/'
dp_folder = folder+'DP/'
subm_folder = folder+ 'Submission/'
log_folder = folder+ 'log/'
###################################################
## Read the Essential Data
## labels, training-test split,file_names etc.
###################################################
# Each audio file has a unique recording identifier ("rec_id"), ranging from 0 to 644.
# The file rec_id2filename.txt indicates which wav file is associated with each rec_id.
rec2f = pd.read_csv(essential_folder + 'rec_id2filename.txt', sep = ',')
# There are 19 bird species in the dataset. species_list.txt gives each a number from 0 to 18.
species = pd.read_csv(essential_folder + 'species_list.txt', sep = ',')
num_species = 19
# The dataset is split into training and test sets.
# CVfolds_2.txt gives the fold for each rec_id. 0 is the training set, and 1 is the test set.
cv = pd.read_csv(essential_folder + 'CVfolds_2.txt', sep = ',')
# This is your main label training data. For each rec_id, a set of species is listed. The format is:
# rec_id,[labels]
raw = pd.read_csv(essential_folder + 'rec_labels_test_hidden.txt', sep = ';')
label = np.zeros(len(raw)*num_species)
label = label.reshape([len(raw),num_species])
for i in range(len(raw)):
line = raw.irow(i)
labels = line[0].split(',')
labels.pop(0) # rec_id == i
for c in labels:
if(c != '?'):
label[i,c] = 1
label = pd.DataFrame(label)
label['rec_id'] = cv.rec_id
label['fold'] = cv.fold
label['filename'] = rec2f.filename
spec_avg = label[label.fold ==0][range(num_species)].mean()
plt.plot(spec_avg,'go')
plt.plot(-np.log(spec_avg),'bo')
spec_num_features = -np.log(spec_avg)
hos = pd.read_csv(supplemental_folder + 'histogram_of_segments.txt', sep = ',',skiprows=1,header=0)
hos_features = ['hos_'+str(x) for x in range(100) ]
hos.columns = ['rec_id'] +hos_features
data = pd.merge(left = label, right = hos ,how = 'left', left_on = 'rec_id', right_on = 'rec_id')
data = data.fillna(0)
##############################
## TRAIN ##
##############################
train = data[data.fold==0]
pc = []
for i in range(len(train)):
s = train.filename.irow(i)[:5]
pc.append(int(s[2:s.find('_')] )) # PC1 - PC18
train['pc'] = pc
pkl_file = open(dp_folder + 'TRAIN_SPEC_FEATURES_freq5.pkl', 'rb')
tr_spec = pickle.load(pkl_file)
pkl_file.close()
spec_names = ['tr_spec_'+str(x) for x in range(tr_spec.shape[1]) ]
Spec_Df = pd.DataFrame(tr_spec,columns = spec_names )
Spec_Df['rec_id']= train.index
train2 = pd.merge(left = train, right = Spec_Df , left_index = True, right_on = 'rec_id')
pkl_file = open(dp_folder + 'TRAIN_LOG_SPEC_FEATURES_freq5.pkl', 'rb')
tr_log_spec = pickle.load(pkl_file)
pkl_file.close()
log_spec_names = ['tr_log_spec_'+str(x) for x in range(tr_log_spec.shape[1]) ]
Spec_Log_Df = pd.DataFrame(tr_log_spec,columns = log_spec_names )
Spec_Log_Df['rec_id']= train.index
train3 = pd.merge(left = train2, right = Spec_Log_Df , left_on = 'rec_id', right_on = 'rec_id')
##############################
## TEST ##
##############################
test = data[data.fold==1]
pc = []
for i in range(len(test)):
s = test.filename.irow(i)[:5]
pc.append(int(s[2:s.find('_')] )) # PC1 - PC18
test['pc'] = pc
pkl_file = open(dp_folder + 'TEST_SPEC_FEATURES_freq5.pkl', 'rb')
test_spec = pickle.load(pkl_file)
pkl_file.close()
Test_Spec_Df = pd.DataFrame(test_spec,columns = spec_names )
Test_Spec_Df['rec_id']= test.index
test2 = pd.merge(left = test, right = Test_Spec_Df , left_index = True, right_on = 'rec_id')
pkl_file = open(dp_folder + 'TEST_LOG_SPEC_FEATURES_freq5.pkl', 'rb')
test_log_spec = pickle.load(pkl_file)
pkl_file.close()
Test_Spec_Log_Df = pd.DataFrame(test_log_spec,columns = log_spec_names )
Test_Spec_Log_Df['rec_id']= test.index
test3 = pd.merge(left = test2, right = Test_Spec_Log_Df , left_on = 'rec_id', right_on = 'rec_id')
#######################################################
## PARAMETER OPTIMIZATION & SUBMISSION CREATION ##
#######################################################
CV_FOLDS = 15
RESULT = []
rs = 0
for ID in range(1):
for NUM_FEATURES in range(40,50,10):
for N_ESTIMATORS in range(500,501,100):
for MAX_FEATURES in range(4,5):
for MIN_SAMPLES_SPLIT in range(2,3):
cv = np.random.randint(0,CV_FOLDS,len(train))
train3['cv'] = cv
labeled_vector = []
predicted_vector = []
predicted_test_vector = []
for bird in range(num_species):
predicted_test_vector.append(np.zeros(len(test3)))
for c in range(CV_FOLDS):
df_10 = train3[train3.cv == c]
df_90 = train3[train3.cv != c]
X_90 = df_90[spec_names+hos_features+['pc']+log_spec_names]
X_10 = df_10[spec_names+hos_features+['pc']+log_spec_names]
T = test3[spec_names+hos_features+['pc']+log_spec_names]
for bird in range(num_species):
rs = rs+1
y_90 = df_90[bird]
y_10 = df_10[bird]
selector = SelectKBest(f_regression,NUM_FEATURES + 50 -int(spec_num_features[bird]*10))
selector.fit(X_90, y_90)
df_90_features = selector.transform(X_90)
df_10_features = selector.transform(X_10)
T_features = selector.transform(T)
rfr = RandomForestRegressor(n_estimators = N_ESTIMATORS, max_features = MAX_FEATURES, min_samples_split = MIN_SAMPLES_SPLIT,random_state = rs*100, verbose = 0)
rfr.fit(df_90_features,y_90)
p_10 = rfr.predict(df_10_features)
T_pred = rfr.predict(T_features)
predicted_vector = predicted_vector + list(p_10)
labeled_vector = labeled_vector + list(y_10)
predicted_test_vector[bird] = predicted_test_vector[bird] + T_pred/CV_FOLDS
fpr, tpr, thresholds = metrics.roc_curve(labeled_vector, predicted_vector, pos_label=1)
auc = metrics.auc(fpr,tpr)
RESULT.append([ID,NUM_FEATURES,N_ESTIMATORS,MAX_FEATURES,MIN_SAMPLES_SPLIT,CV_FOLDS,auc])
ResultDf = | pd.DataFrame(RESULT,columns=['ID','NUM_FEATURES','N_ESTIMATORS','MAX_FEATURES','MIN_SAMPLES_SPLIT','CV_FOLDS','AUC']) | pandas.DataFrame |
"""
SparseArray data structure
"""
from __future__ import division
import numbers
import operator
import re
from typing import Any, Callable, Union
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas._libs.sparse as splib
from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from pandas._libs.tslibs import NaT
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.cast import (
astype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
infer_dtype_from_scalar, maybe_convert_platform)
from pandas.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
pandas_dtype)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCSeries, ABCSparseSeries)
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.accessor import PandasDelegate, delegate_names
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
import pandas.io.formats.printing as printing
# ----------------------------------------------------------------------------
# Dtype
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the pandas ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
# type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.dtypes.common import (
pandas_dtype, is_string_dtype, is_scalar
)
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype('object')
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError("fill_value must be a scalar. Got {} "
"instead".format(fill_value))
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super(SparseDtype, self).__hash__()
def __eq__(self, other):
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, compat.string_types):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value and
isinstance(self.fill_value, type(other.fill_value)) or
isinstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
from pandas.core.dtypes.missing import isna
return isna(self.fill_value)
@property
def _is_numeric(self):
from pandas.core.dtypes.common import is_object_dtype
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self):
from pandas.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return 'Sparse[{}, {}]'.format(self.subtype.name, self.fill_value)
def __repr__(self):
return self.name
@classmethod
def construct_array_type(cls):
return SparseArray
@classmethod
def construct_from_string(cls, string):
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
msg = "Could not construct SparseDtype from '{}'".format(string)
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
result = SparseDtype(sub_type)
except Exception:
raise TypeError(msg)
else:
msg = ("Could not construct SparseDtype from '{}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead.")
if has_fill_value and str(result) != string:
raise TypeError(msg.format(string))
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype):
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(
r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$"
)
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()['subtype']
has_fill_value = m.groupdict()['fill_value'] or has_fill_value
elif dtype == "Sparse":
subtype = 'float64'
else:
raise ValueError("Cannot parse {}".format(dtype))
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype):
dtype = getattr(dtype, 'dtype', dtype)
if (isinstance(dtype, compat.string_types) and
dtype.startswith("Sparse")):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == 'Sparse'
def update_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the corret `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
fill_value = astype_nansafe(np.array(self.fill_value),
dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
str
"""
if isinstance(self.fill_value, compat.string_types):
return type(self.fill_value)
return self.subtype
# ----------------------------------------------------------------------------
# Array
_sparray_doc_kwargs = dict(klass='SparseArray')
def _get_fill(arr):
# type: (SparseArray) -> np.ndarray
"""
Create a 0-dim ndarray containing the fill value
Parameters
----------
arr : SparseArray
Returns
-------
fill_value : ndarray
0-dim ndarray with just the fill value.
Notes
-----
coerce fill_value to arr dtype if possible
int64 SparseArray can have NaN as fill_value if there is no missing
"""
try:
return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name):
# type: (SparseArray, SparseArray, Callable, str) -> Any
"""
Perform a binary operation between two arrays.
Parameters
----------
left : Union[SparseArray, ndarray]
right : Union[SparseArray, ndarray]
op : Callable
The binary operation to perform
name str
Name of the callable.
Returns
-------
SparseArray
"""
if name.startswith('__'):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
# dtype used to find corresponding sparse method
ltype = left.dtype.subtype
rtype = right.dtype.subtype
if not | is_dtype_equal(ltype, rtype) | pandas.core.dtypes.common.is_dtype_equal |
#+ 数据科学常用工具
import matplotlib as mpl
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.style as style
import seaborn as sns
from sklearn.preprocessing import PowerTransformer
import category_encoders as ce
from sklearn.model_selection import StratifiedKFold, KFold
from joblib import Parallel, delayed
import multiprocessing
from scipy import stats
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_categorical_dtype
from sklearn.model_selection import KFold
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
from openpyxl import load_workbook, Workbook
import xlrd
# ---------------------------------
# 可视化工具
# ---------------------------------
def set_format():
pd.set_eng_float_format(accuracy=2, use_eng_prefix=False)
def set_matplotlib():
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
mpl.rcParams['figure.dpi'] = 100
# style.available
# style.use('ggplot')
def whiteboard(row=1, col=1, dpi=100):
fig, ax = plt.subplots(row, col, figsize=(6.4, 4.8), dpi=dpi)
return (fig, ax)
def count_plot(df, var, type='bar'):
fig, ax = whiteboard()
counts = df[var].value_counts()
if type == 'bar':
counts.head(15).sort_values().plot.barh(ax=ax)
else:
counts.sort_index().plot.line(ax=ax)
ax.set_xlabel('count')
ax.set_ylabel('value')
ax.set_title(var)
def kde_by_target(df_raw, var, target, cut=0.99):
if cut is not None:
upper = df_raw[var].quantile(cut)
df = df_raw[df_raw[var] <= upper]
else:
df = df_raw
# 大多数的变量都有很长的尾部,为了看得清楚做截尾
fig, ax = whiteboard()
for y in df[target].unique():
sub = df[df[target] == y]
sns.distplot(sub[var], hist=False, ax=ax, label=str(y),
kde_kws={"lw": 0.7})
ax.legend()
def series_plot(series, xdate=True, xlabel='date', ylabel='', title=''):
fig, ax = whiteboard()
series.plot(ax=ax, linewidth=1.0)
if xdate:
fig.autofmt_xdate()
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_title(title)
def sns_sparse_xticks(plot_, freq):
for ind, label in enumerate(plot_.get_xticklabels()):
if ind % freq == 0: # every 10th label is kept
label.set_visible(True)
else:
label.set_visible(False)
# ---------------------------------
# 描述统计
# ---------------------------------
def count_na(df):
missing_dict = df.isna().sum()
return missing_dict[missing_dict > 0]
def groupby_info(df, col, target):
return df.groupby(target)[col].agg(mean=np.mean, median=np.median, mode=lambda x: stats.mode(x)[0][0], max=np.max,
min=np.min, std=np.std)
# ---------------------------------
# 特征清洗
# ---------------------------------
# 缺失值
def replace(x, from_, to_):
tmp = x.copy()
tmp[tmp == from_] = to_
return tmp
def group_fillna(df, col, target, method='mean'):
if method == 'mean':
result = df.groupby([target], sort=False)[col].apply(lambda x: x.fillna(x.mean()))
elif method == 'median':
result = df.groupby([target], sort=False)[col].apply(lambda x: x.fillna(x.median()))
return result
# 异常值
def windsorize(series, upper, lower):
return series.clip(lower=lower, upper=upper)
def cap(x, extreme=5):
# 生成分位数
width = (x.quantile(0.75) - x.quantile(0.25)) / 2
median = x.median()
return x.clip(median - extreme * width, median + extreme * width)
# ---------------------------------
# 单特征变换
# ---------------------------------
def box_cox(x_train, x_test=None):
bc = PowerTransformer(method='box-cox')
bc = bc.fit(x_train)
x_train_bc = bc.transform(x_train)
if x_test is not None:
x_test_bc = bc.transform(x_test)
else:
x_test_bc = None
return (x_train_bc, x_test_bc)
def standardize(x_train, x_test=None, cut=None):
"""
cut: 截断cut倍标准差
"""
avg, var = x_train.mean(), x_train.std()
x_train_s = (x_train - avg) / var
if cut is not None:
x_train_s = windsorize(x_train_s, cut, -cut)
if x_test is not None:
x_test_s = (x_test - avg) / var
if cut is not None:
x_test_s = windsorize(x_test_s, cut, -cut)
else:
x_test_s = None
return (x_train_s, x_test_s)
def bin(x, n_scatter=10):
"""
连续变量分箱
"""
result = pd.qcut(x, n_scatter)
return result
# ---------------------------------
# 特征编码
# ---------------------------------
# One-hot encoding for categorical columns with get_dummies
def one_hot_encoder(df, categorical_columns=None, nan_as_category=True, min_count=100, inplace=True):
original_columns = list(df.columns)
if categorical_columns is None: # 自动查找离散变量
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
result = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
new_columns = [c for c in result.columns if c not in original_columns]
cat_columns = [c for c in original_columns if c not in result.columns]
if not inplace: # 返回值包含原始离散特征
for c in cat_columns:
result[c] = df[c]
i = 0
for c in new_columns: # 合并稀少的类
if (result[c].sum() < min_count) or ((result.shape[0] - result[c].sum()) < min_count):
i += 1
del result[c]
new_columns.remove(c)
if i == 0:
del result[c] # 哑变量
return result, new_columns
# 连续特征离散化
def one_hot_encoder_continus(df, col, n_scatter=10, nan_as_category=True, min_count=100):
df[col + '_scatter'] = pd.qcut(df[col], n_scatter)
result, new_cols = one_hot_encoder(df, [col + '_scatter'], nan_as_category=nan_as_category, min_count=min_count,
inplace=True)
return result, new_cols
# count encoding
def count_encoding(li):
temp = pd.Series(li)
result = temp.map(temp.value_counts())
return result
# cv method for the following encoders
def cv_encoding(encoding_func, X, y, cols, target_type=None, n_splits=10, **kwargs):
if target_type is None:
if y.dtype == int or y.dtype == object:
target_type = 'cat'
else:
target_type = 'con'
if target_type == 'cat':
kf = StratifiedKFold(n_splits=n_splits)
split = kf.split(X, y)
else:
kf = KFold(n_splits=n_splits)
split = kf.split(X)
collect = []
for train_index, test_index in split:
X_train, X_test = X.loc[train_index], X.loc[test_index]
y_train, y_test = y.loc[train_index], y.loc[test_index]
collect.append(encoding_func(X_train, y_train, cols, X_test, **kwargs))
return pd.concat(collect)
# target encoding
def target_encoding(X_fit, y_fit, cols, X_test=None, smoothing=0):
"""
针对continuous和binomial target做均值编码
X_fit: 用来计算encoding的df, 包含cols
y_fit: encoding的target
X_test: 需要transform的对象
cols: 需要encoding的列
smoothing: prior权重
"""
if X_test is None:
X_test = X_fit
encoder = ce.TargetEncoder(cols=cols, smoothing=smoothing)
encoder.fit(X_fit, y_fit)
result = encoder.transform(X_test)
return result
# WOE encoding
def woe_encoding(X_fit, y_fit, cols, X_test=None, sigma=0):
"""
只针对binomial target
X_fit: 用来计算encoding的df, 包含cols
y_fit: encoding的target
X_test: 需要transform的对象
cols: 需要encoding的列
sigma: 添加噪声的标准差,防止过拟合
"""
if X_test is None:
X_test = X_fit
encoder = ce.WOEEncoder(cols=cols, sigma=sigma)
encoder.fit(X_fit, y_fit)
result = encoder.transform(X_test)
return result
# James-Stein encoding
def js_encoding(X_fit, y_fit, cols, X_test=None, model='independent'):
"""
只针对continuous target
X_fit: 用来计算encoding的df, 包含cols
y_fit: encoding的target
X_test: 需要transform的对象
cols: 需要encoding的列
model: 'pooled' or 'independent';pooled是假设所有个体具有相同的方差,与casi书中定义一致
"""
if X_test is None:
X_test = X_fit
encoder = ce.JamesSteinEncoder(cols=cols, model=model)
encoder.fit(X_fit, y_fit)
result = encoder.transform(X_test)
return result
# ---------------------------------
# Time Series Analysis
# ---------------------------------
# kalman filter rolling least square
def RLS(x, y, beta_init=None, R_init=None, delta=0.02, Ve=0.001, intercept=True):
n, p = x.shape
if intercept:
intercept_ = np.ones([n, 1])
x = np.hstack([intercept_, x])
p += 1
yhat = np.zeros(n)
e = np.zeros(n)
Q = np.zeros(n)
if R_init is None:
R = np.zeros([p, p])
else:
R = R_init
beta = np.zeros([p, n])
Vw = delta / (1 - delta) * np.eye(p)
# Ve =
# initialize
if beta_init is not None:
beta[:, 0] = beta_init
# kalman loop
for t in range(n):
if t > 0:
beta[:, t] = beta[:, t-1] # state prediction
R = P + Vw # state covariance prediction
xt = x[t, :]
yhat[t] = xt.dot(beta[:, t]) # measurement prediction
Q[t] = xt.dot(R).dot(xt.T) + Ve # measurement variance
e[t] = y[t] - yhat[t] # measurement residual
K = R.dot(xt) / Q[t] # kalman gain
beta[:, t] = beta[:, t] + K * e[t] # state update
P = (1 - K.dot(xt)) * R
return beta
# kalman filter rls with ols start
def ols_start_rls(x, y, start=100, delta=0.1, Ve=0.002, intercept=True):
x_start = x[:start, :]
y_start = y[:start]
if intercept:
intercept_ = np.ones([start, 1])
x_start = np.hstack([intercept_, x_start])
_, p = x_start.shape
beta_init = pinv(x_start.T.dot(x_start)).dot(x_start.T.dot(y_start))
e = y_start - x_start.dot(beta_init)
sig_hat_square = e.dot(e) / (start - p - 1)
R_init = sig_hat_square * pinv(x_start.T.dot(x_start))
x_tail = x[start:, :]
y_tail = y[start:]
beta_tail = RLS(x_tail, y_tail, beta_init=beta_init, R_init=R_init, delta=delta, Ve=Ve, intercept=intercept)
return (beta_init, beta_tail)
# ---------------------------------
# 其他
# ---------------------------------
# 查表名
def get_sheetname(fp):
xls = xlrd.open_workbook(fp, on_demand=True)
return xls.sheet_names()
def _mdf_func(func, name, group):
return func(group), name
# 并行df.groupby(col).apply
def applyParallel(dfGrouped, func):
retLst, top_index = zip(*Parallel(n_jobs=multiprocessing.cpu_count())(
delayed(_mdf_func)(func, name, group) for name, group in dfGrouped))
res = | pd.concat(retLst, axis=1) | pandas.concat |
import warnings
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Index, MultiIndex
import pandas._testing as tm
def test_drop(idx):
dropped = idx.drop([("foo", "two"), ("qux", "one")])
index = MultiIndex.from_tuples([("foo", "two"), ("qux", "one")])
dropped2 = idx.drop(index)
expected = idx[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = idx.drop(["bar"])
expected = idx[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop("foo")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop([("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(index)
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(["foo", "two"])
# partially correct argument
mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")])
with pytest.raises(KeyError, match=r"^10$"):
idx.drop(mixed_index)
# error='ignore'
dropped = idx.drop(index, errors="ignore")
expected = idx[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = idx.drop(["foo", "two"], errors="ignore")
expected = idx[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = idx.drop(["foo", ("qux", "one")])
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ["foo", ("qux", "one"), "two"]
with pytest.raises(KeyError, match=r"^'two'$"):
idx.drop(mixed_index)
dropped = idx.drop(mixed_index, errors="ignore")
expected = idx[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(idx):
index = idx[idx.get_loc("foo")]
dropped = index.droplevel(0)
assert dropped.name == "second"
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index.droplevel(0)
assert dropped.names == ("two", "three")
dropped = index.droplevel("two")
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list():
index = MultiIndex(
levels=[Index(range(4)), Index(range(4)), Index(range(4))],
codes=[
np.array([0, 0, 1, 2, 2, 2, 3, 3]),
np.array([0, 1, 0, 0, 0, 1, 0, 1]),
np.array([1, 0, 1, 1, 0, 0, 1, 0]),
],
names=["one", "two", "three"],
)
dropped = index[:2].droplevel(["three", "one"])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
msg = (
"Cannot remove 3 levels from an index with 3 levels: "
"at least one level must be left"
)
with pytest.raises(ValueError, match=msg):
index[:2].droplevel(["one", "two", "three"])
with pytest.raises(KeyError, match="'Level four not found'"):
index[:2].droplevel(["one", "four"])
def test_drop_not_lexsorted():
# GH 12078
# define the lexsorted version of the multi-index
tuples = [("a", ""), ("b1", "c1"), ("b2", "c2")]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=["b", "c"])
assert lexsorted_mi._is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(
columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]]
)
df = df.pivot_table(index="a", columns=["b", "c"], values="d")
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi._is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with | tm.assert_produces_warning(PerformanceWarning) | pandas._testing.assert_produces_warning |
import xml.etree.ElementTree as ET
import openpyxl
from openpyxl import Workbook, load_workbook
import pandas as pd
from pandas import ExcelWriter
import csv
import numpy as np
def get_armor(xml_file):
# CREATES TREE AND ROOT
tree = ET.parse(xml_file)
root = tree.getroot()
# head_armor = 'Head Armor'
# body_armor = 'Body Armor'
# arm_armor = 'Arm Armor'
# leg_armor = 'Leg Armor'
ar_2D_list = []
for ar in root.findall('Item'):
id_ar = ar.get('id')
name = ar.get('name').partition("}")[2]
culture = ar.get('culture').partition(".")[2]
if culture == "neutral_culture":
culture = "neutral"
weight = ar.get('weight')
# if num_of_ratings == 3:
# ar_rating_1 = ar.find('ItemComponent').find('Armor').get(armor_type_1)
# ar_rating_2 = ar.find('ItemComponent').find('Armor').get(armor_type_2)
# ar_rating_3 = ar.find('ItemComponent').find('Armor').get(armor_type_3)
# entry = [id_ar, name, culture, ar_rating_1, ar_rating_2, ar_rating_3, weight]
# elif num_of_ratings == 2:
# ar_rating_1 = ar.find('ItemComponent').find('Armor').get(armor_type_1)
# ar_rating_2 = ar.find('ItemComponent').find('Armor').get(armor_type_2)
# entry = [id_ar, name, culture, ar_rating_1, ar_rating_2, weight]
# elif num_of_ratings == 1:
# ar_rating_1 = ar.find('ItemComponent').find('Armor').get(armor_type_1)
# entry = [id_ar, name, culture, ar_rating_1, weight]
ar_rating_head = ar.find('ItemComponent').find('Armor').get('head_armor')
ar_rating_body = ar.find('ItemComponent').find('Armor').get('body_armor')
ar_rating_arm = ar.find('ItemComponent').find('Armor').get('arm_armor')
ar_rating_leg = ar.find('ItemComponent').find('Armor').get('leg_armor')
entry = [id_ar, name, culture, ar_rating_head, ar_rating_body, ar_rating_arm, ar_rating_leg, weight]
entry = ["0" if i == None else i for i in entry]
print(entry)
ar_2D_list.append(entry)
file = open('mb2.csv', 'w', newline ='', header=None)
with file:
write = csv.writer(file)
write.writerows(ar_2D_list)
df = pd.read_csv("mb2.csv")
ws_name = xml_file.partition('_')[0]
with ExcelWriter('MB2.xlsx', mode='a') as writer:
# at1 = armor_type_1.partition("_")
# at1 = at1[0].capitalize() + " " + at1[2].capitalize()
# if num_of_ratings >= 2:
# at2 = armor_type_2.partition("_")
# at2 = at2[0].capitalize() + " " + at2[2].capitalize()
# if num_of_ratings == 3:
# at3 = armor_type_3.partition("_")
# at3 = at3[0].capitalize() + " " + at3[2].capitalize()
# df.to_excel(writer, sheet_name=ws_name, index=False, header=["ID", "Name", "Culture", at1, at2, at3, "Weight"])
# elif num_of_ratings == 2:
# df.to_excel(writer, sheet_name=ws_name, index=False, header=["ID", "Name", "Culture", at1, at2, "Weight"])
# elif num_of_ratings == 1:
# df.to_excel(writer, sheet_name=ws_name, index=False, header=["ID", "Name", "Culture", at1, "Weight"])
df.to_excel(writer, sheet_name=ws_name, index=False, header=["ID", "Name", "Culture", "Head Armor", "Body Armor", "Arm Armor", "Leg Armor", "Weight"])
def get_npc(xml_file):
# CREATES TREE AND ROOT
tree = ET.parse(xml_file)
root = tree.getroot()
# INITIALIZES A 2D LIST TO STORE NPC'S AND THEIR STATS/EQUIPMENT; WILL BE USED TO PUT INTO A PANDAS DATAFRAME
npc_2D_list = []
for npc in root.findall('NPCCharacter'):
id_npc = npc.get('id')
culture = npc.get('culture').partition(".")[2]
name = npc.get('name').partition("}")[2]
troop_type = npc.get('default_group')
occupation = npc.get('occupation')
# CREATES THE AN ENTRY LIST; EACH ENTRY LIST REPRESENTS AN NPC
entry = [id_npc, culture, name, troop_type, occupation]
skills = npc.find('skills')
skill_dict = {}
# print(name)
for sk in skills.findall('skill'):
sk_id = sk.get('id')
print(sk_id)
val = sk.get('value')
print(val)
skill_dict[sk_id] = val
entry.append(skill_dict['OneHanded'])
entry.append(skill_dict['TwoHanded'])
entry.append(skill_dict['Polearm'])
entry.append(skill_dict['Bow'])
entry.append(skill_dict['Crossbow'])
entry.append(skill_dict['Throwing'])
entry.append(skill_dict['Riding'])
entry.append(skill_dict['Athletics'])
equipments = npc.find('Equipments')
for eq_roster in equipments.findall('EquipmentRoster'):
for eq in eq_roster:
slot = eq.get('slot')
id_eq = eq.get('id').partition(".")[2]
entry.append(slot + "_" + id_eq)
npc_2D_list.append(entry)
df = pd.DataFrame(npc_2D_list)
ws_name = xml_file.partition('.')[0]
print(ws_name)
with ExcelWriter('MB2.xlsx', mode='a') as writer:
df.to_excel(writer, sheet_name=ws_name, index=False)
def convert_id_to_name(xml_file):
# CREATES TREE AND ROOT
tree = ET.parse(xml_file)
root = tree.getroot()
eqpmnt_dict = {}
for child in root:
eqpmnt_id = child.get('id')
eqpmnt_name = child.get('name').partition('}')[2]
eqpmnt_dict[eqpmnt_id] = eqpmnt_name
print(eqpmnt_dict)
df = pd.read_excel('MB2.xlsx', sheet_name='npcs')
for rowIndex, row in df.iterrows():
for columnIndex, value in row.items():
if type(value) == str:
value_tag = value.partition('_')[0]
value_id = value.partition('_')[2]
if value_tag in ['Item0', 'Item1', 'Item2', 'Item3', 'Head', 'Cape', 'Body','Gloves', 'Leg']:
df.at[rowIndex, columnIndex] = eqpmnt_dict.get(value_id)
with ExcelWriter('MB2.xlsx', mode='a') as writer:
df.to_excel(writer, sheet_name='npcRevised', index=False)
def get_npc_armor_avg():
# ACCESS EVERY SHEET IN THE XLSX FILE
df_npc = pd.read_excel('MB2.xlsx', sheet_name='npcs')
df_head = pd.read_excel('MB2.xlsx', sheet_name='head')
df_shld = | pd.read_excel('MB2.xlsx', sheet_name='shoulder') | pandas.read_excel |
"""
Functions having to do with loading data from output of
files downloaded in scripts/download_data_glue.py
"""
import codecs
import csv
import json
import numpy as np
import pandas as pd
from allennlp.data import vocabulary
from jiant.utils.tokenizers import get_tokenizer
from jiant.utils.retokenize import realign_spans
def load_span_data(tokenizer_name, file_name, label_fn=None, has_labels=True):
"""
Load a span-related task file in .jsonl format, does re-alignment of spans, and tokenizes
the text.
Re-alignment of spans involves transforming the spans so that it matches the text after
tokenization.
For example, given the original text: [Mr., Porter, is, nice] and bert-base-cased
tokenization, we get [Mr, ., Por, ter, is, nice ]. If the original span indices was [0,2],
under the new tokenization, it becomes [0, 3].
The task file should of be of the following form:
text: str,
label: bool
target: dict that contains the spans
Args:
tokenizer_name: str,
file_name: str,
label_fn: function that expects a row and outputs a transformed row with labels
transformed.
Returns:
List of dictionaries of the aligned spans and tokenized text.
"""
rows = | pd.read_json(file_name, lines=True) | pandas.read_json |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.