prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""Functions for plotting sipper data."""
from collections import defaultdict
import datetime
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sipper import SipperError
#---dates and shading
def convert_dt64_to_dt(dt64):
"""Converts numpy datetime to standard datetime (needed for shade_darkness
function in most cases)."""
new_date = ((dt64 - np.datetime64('1970-01-01T00:00:00'))/
np.timedelta64(1, 's'))
new_date = datetime.datetime.utcfromtimestamp(new_date)
return new_date
def hours_between(start, end, convert=True):
"""
Create a range of hours between two dates.
Parameters
----------
start, end : datetime-like object
When to begin and end the data range
convert : bool, optional
Whether to convert the start/end arguments from numpy datetime to
standard datetime. The default is True.
Returns
-------
pandas DateTimeIndex
Index array of all hours between start and end.
"""
if convert:
start = convert_dt64_to_dt(start)
end = convert_dt64_to_dt(end)
rounded_start = datetime.datetime(year=start.year,
month=start.month,
day=start.day,
hour=start.hour)
rounded_end = datetime.datetime(year=end.year,
month=end.month,
day=end.day,
hour=end.hour)
return pd.date_range(rounded_start,rounded_end,freq='1H')
def is_day_or_night(time, period, lights_on=7, lights_off=19):
"""
Check if a datetime occured at day or night
Parameters
----------
time : datetime or pandas.Timestamp
time to check
period : str
'day' or 'night', which period to check if the date is part of,
based on the lights_on and lights_off arguments
lights_on : int, optional
Hour of the day (0-23) when lights turn on. The default is 7.
lights_off : int, optional
Hour of the day (0-23) when lights turn off. The default is 19.
Returns
-------
Bool
"""
lights_on = datetime.time(hour=lights_on)
lights_off = datetime.time(hour=lights_off)
val = False
#defaults to checking if at night
if lights_off > lights_on:
val = time.time() >= lights_off or time.time() < lights_on
elif lights_off < lights_on:
val = time.time() >= lights_off and time.time() < lights_on
#reverses if period='day'
return val if period=='night' else not val
def get_daynight_count(start_time, end_time, lights_on=7, lights_off=9):
"""
Compute the (fractional) number of completed light and dark periods
between two dates. Used for normalizing values grouped by day & nightime.
Parameters
----------
start_time : datetime
starting time
end_time : datetime
ending time
lights_on : int, optional
Hour of the day (0-23) when lights turn on. The default is 7.
lights_off : int, optional
Hour of the day (0-23) when lights turn off. The default is 19.
Returns
-------
dict
dictionary with keys "day" and "night", values are the
number of completed periods for each key.
"""
cuts = []
cuts.append(start_time)
loop_time = start_time.replace(minute=0,second=0)
while loop_time < end_time:
loop_time += pd.Timedelta(hours=1)
if loop_time.hour == lights_on:
cuts.append(loop_time)
elif loop_time.hour == lights_off:
cuts.append(loop_time)
cuts.append(end_time)
days = []
nights = []
if lights_off > lights_on:
day_hours = lights_off - lights_on
night_hours = 24 - day_hours
else:
night_hours = lights_on - lights_off
day_hours = 24 - night_hours
day_hours = pd.Timedelta(hours = day_hours)
night_hours = pd.Timedelta(hours = night_hours)
for i, t in enumerate(cuts[:-1]):
if is_day_or_night(t, 'day', lights_on, lights_off):
days.append((cuts[i+1] - t)/day_hours)
else:
nights.append((cuts[i+1] - t)/night_hours)
return {'day':sum(days),'night':sum(nights)}
def night_intervals(array, lights_on, lights_off, instead_days=False):
"""
Find intervals of a date-array corresponding to night time.
Parameters
----------
array : array-like
Array of datetimes (e.g. generated by hours_between).
lights_on : int
Integer between 0 and 23 representing when the light cycle begins.
lights_off : int
Integer between 0 and 23 representing when the light cycle ends.
instead_days : bool, optional
Return intervals during daytime instead of nighttime.
The default is False.
Returns
-------
night_intervals : list
List of tuples with structure (start of nighttime, end of nighttime).
"""
night_intervals = []
on_time = datetime.time(hour=lights_on)
off_time = datetime.time(hour=lights_off)
if on_time == off_time:
return night_intervals
else:
at_night = [is_day_or_night(i, 'night',
lights_on=lights_on,
lights_off=lights_off) for i in array]
if instead_days:
at_night = [not i for i in at_night]
if len(at_night) == 0:
return night_intervals
night_starts = []
night_ends = []
if at_night[0] == True:
night_starts.append(array[0])
for i, _ in enumerate(at_night[1:],start=1):
if at_night[i] == True and at_night[i-1] == False:
night_starts.append(array[i])
elif at_night[i] == False and at_night[i-1] == True:
night_ends.append(array[i])
if at_night[-1] == True:
night_ends.append(array[-1])
night_intervals = list(zip(night_starts, night_ends))
return night_intervals
def shade_darkness(ax, min_date, max_date, lights_on, lights_off,
convert=True):
"""
Shade the night periods of a matplotlib Axes with a datetime x-axis.
Parameters
----------
ax : matplotlib.axes.Axes
Plot Axes.
min_date : datetime
Earliest date to shade.
max_date : datetime
Latest date to shade.
lights_on : int
Integer between 0 and 23 representing when the light cycle begins.
lights_off : int
Integer between 0 and 23 representing when the light cycle ends.
convert : bool, optional
Whether to convert the start/end arguments from numpy datetime to
standard datetime. The default is True.
Returns
-------
None.
"""
hours_list = hours_between(min_date, max_date,convert=convert)
nights = night_intervals(hours_list, lights_on=lights_on,
lights_off=lights_off)
if nights:
for i, interval in enumerate(nights):
start = interval[0]
end = interval[1]
if start != end:
ax.axvspan(start,
end,
color='gray',
alpha=.2,
label='_'*i + 'lights off',
zorder=0)
def date_format_x(ax, start, end):
"""
Format the x-ticks of datetime plots created by FED3 Viz. Handles various
incoming dates by lowering the (time) frequency of ticks with longer
date ranges.
Parameters
----------
ax : matplotlib.axes.Axes
Graph Axes
start : datetime
Earliest x-position of the graph
end : datetime
Latest x-position of the graph
Returns
-------
None.
"""
quarter_hours = mdates.MinuteLocator(byminute=[0,15,30,45])
all_hours = mdates.HourLocator()
quarter_days = mdates.HourLocator(byhour=[0,6,12,18])
days = mdates.DayLocator()
two_days = mdates.DayLocator(interval=2)
three_days = mdates.DayLocator(interval=3)
months = mdates.MonthLocator()
d8_span = end - start
if d8_span < datetime.timedelta(hours=12):
xfmt = mdates.DateFormatter('%H:%M')
major = all_hours
minor = quarter_hours
elif ((d8_span >= datetime.timedelta(hours=12))
and (d8_span < datetime.timedelta(hours=24))):
xfmt = mdates.DateFormatter('%b %d %H:%M')
major = quarter_days
minor = all_hours
elif ((d8_span >= datetime.timedelta(hours=24))
and (d8_span < datetime.timedelta(days=3))):
xfmt = mdates.DateFormatter('%b %d %H:%M')
major = days
minor = quarter_days
elif (d8_span >= datetime.timedelta(days=3)
and (d8_span < datetime.timedelta(days=6))):
xfmt = mdates.DateFormatter('%b %d %H:%M')
major = two_days
minor = days
elif ((d8_span >= datetime.timedelta(days=6))
and (d8_span < datetime.timedelta(days=20))):
xfmt = mdates.DateFormatter('%b %d')
major = three_days
minor = days
elif d8_span >= datetime.timedelta(days=20):
xfmt = mdates.DateFormatter("%b '%y")
major = months
minor = three_days
plt.setp(ax.xaxis.get_majorticklabels(), rotation=45, ha='right')
ax.xaxis.set_major_locator(major)
ax.xaxis.set_major_formatter(xfmt)
ax.xaxis.set_minor_locator(minor)
#---interdrink interval helpers
def get_any_idi(sipper):
"""
Returns the interdrink intervals for a Sipper,
disregarding side or bottle contents
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
Returns
-------
idi_minutes : pandas.Series
array of the interdrink intervals in minutes
"""
data = sipper.data
combined = data['LeftCount'].diff() + data['RightCount'].diff()
combined.dropna(inplace=True)
combined = combined[combined > 0]
idi_delta = combined.index.to_series().diff().dropna()
idi_minutes = idi_delta.dt.total_seconds()/60
return idi_minutes
def get_side_idi(sipper, side):
"""
Returns the interdrink intervals for the left or right bottle of a Sipper
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
side : str ('left' or 'right')
side to return the interdrink intervals for
Returns
-------
idi_minutes : pandas.Series
array of the interdrink intervals in minutes
"""
data = sipper.data
col = 'LeftCount' if side.lower() == 'left' else 'RightCount'
diff = data[col].diff().dropna()
diff = diff[diff > 0]
idi_delta = diff.index.to_series().diff().dropna()
idi_minutes = idi_delta.dt.total_seconds()/60
return idi_minutes
def get_content_idi(sipper, content, df=pd.DataFrame()):
"""
Returns the interdrink intervals for specific bottle contents of a Sipper
Parameters
----------
sipper : Sipper
sipper data loaded into the Sipper class
content : str
str name of content to get values for
df : pandas.DataFrame, optional
a DataFrame of sipper data to get the values for, can be passed
when you want values for a modified version of data (e.g.
after doing a global date filter)
Returns
-------
idi_minutes : pandas.Series
array of the interdrink intervals in minutes
"""
vals = sipper.get_content_values(content, out='Count', df=df)
if vals.empty:
return vals
diff = vals.diff().dropna()
diff = diff[diff > 0]
idi_delta = diff.index.to_series().diff().dropna()
idi_minutes = idi_delta.dt.total_seconds()/60
return idi_minutes
def setup_idi_axes(ax, logx):
"""
Helper to prepare plots for interdrink interval histograms
Parameters
----------
ax : matplotlib.axes.Axes
plot axes
logx : bool
whether or not to create a logarithmic x-axis
Returns
-------
None.
"""
ax.set_xlabel('Minutes Between Drinks')
ax.set_title('Interdrink Interval Plot')
if logx:
lowest = -2
highest = 5
ax.set_xticks(range(lowest,highest))
ax.set_xticklabels([10**num for num in range(-2,5)], rotation=45)
ax.set_xlim(-2.5, 5.1)
else:
ax.set_xticks([0,300,600,900])
ax.set_xlim(-100,1000)
#---circadian helpers
def get_chronogram_vals(series, lights_on, lights_off):
"""
Convert a time series to chronongram values (i.e. averaged
by hour for the light cycle)
Parameters
----------
series : pandas.Series
time series data
lights_on : int
Integer from 0-23 denoting start of light cycle
lights_off : int
Integer from 0-23 denoting end of light cycle
Returns
-------
reindexed : pandas.Series
Series of chronogram values, with 0 being start of the light cycle
"""
byhour = series.groupby([series.index.hour]).sum()
byhourday = series.groupby([series.index.hour, series.index.date])
num_days_by_hour = byhourday.sum().index.get_level_values(0).value_counts()
byhour = byhour.divide(num_days_by_hour, axis=0)
new_index = list(range(lights_on, 24)) + list(range(0,lights_on))
reindexed = byhour.reindex(new_index)
reindexed.index.name = 'hour'
reindexed = reindexed.fillna(0)
return reindexed
#---averageing helpers
def preproc_averaging(data, averaging='datetime', avg_bins='1H',
agg='sum'):
"""
Average data for SipperViz
Parameters
----------
data : collection
collection of pandas.Series to average
averaging : str, optional
Style of averaging. The default is 'datetime'.
- 'datetime' = average in absolute time (no alignment, fails for
time series which did not cooccur)
- 'time' = align by time of day and then average
- 'elapsed' = align by start of recording and then average
avg_bins : str, optional
Bin size to use for downsampling. The default is '1H'.
agg : str, optional
Function to aggregate data after downsampling; this is a
string name of a function used by pandas for resampling.
The default is 'sum'.
Raises
------
SipperError
When "averaging" parameter is not recognized
Returns
-------
output : dict
Dictionary of results, with keys:
- 'x' : x posititions of data
- 'ys' : averaged data
"""
if averaging not in ['datetime','time','elapsed']:
raise SipperError('averaging must be "datetime", "time", or "elapsed"')
output = {}
output['ys'] = []
if averaging == 'datetime':
earliest_end = pd.Timestamp(2200,1,1,0,0,0)
latest_start = pd.Timestamp(1970,1,1,0,0,0)
for d in data:
if min(d.index) > latest_start:
latest_start = min(d.index)
if max(d.index) < earliest_end:
earliest_end = max(d.index)
for d in data:
if latest_start not in d.index:
d.loc[latest_start] = np.nan
r = d.resample(avg_bins).apply(agg)
r = r[(r.index >= latest_start) &
(r.index <= earliest_end)].copy()
output['ys'].append(r)
output['x'] = r.index
elif averaging == 'time':
earliest_start = pd.Timestamp(2200,1,1,0,0,0)
latest_end = pd.Timestamp(1970,1,1,0,0,0)
shifted = []
for d in data:
r = d.resample(avg_bins).apply(agg)
first = r.index[0]
aligned = pd.Timestamp(year=1970, month=1, day=1, hour=first.hour)
shift = first - aligned
r.index = [i-shift for i in r.index]
if r.index.min() < earliest_start:
earliest_start = r.index.min()
if r.index.max() > latest_end:
latest_end = r.index.max()
shifted.append(r)
full_dr = pd.date_range(earliest_start, latest_end, freq=avg_bins)
output['x'] = full_dr
for s in shifted:
reindexed = s.reindex(full_dr)
output['ys'].append(reindexed)
elif averaging == 'elapsed':
maxx = pd.Timedelta(0)
elapsed_data = []
for d in data:
origin = d.index[0]
elapsed = [i - origin for i in d.index]
d.index = elapsed
r = d.resample(avg_bins).apply(agg)
if r.index.max() > maxx:
longest_index = r.index
elapsed_data.append(r)
output['x'] = longest_index.total_seconds()/3600
for s in elapsed_data:
reindexed = s.reindex(longest_index)
reindexed.index = reindexed.index.total_seconds()/3600
output['ys'].append(reindexed)
return output
def format_averaging_axes(ax, averaging, xdata, shade_dark=True,
lights_on=7, lights_off=19):
"""
Helper function to setup axes for average plots in SipperViz
Parameters
----------
ax : matplotlib.axes.Axes
plot axes
averaging : str, optional
Style of averaging. The default is 'datetime', other options
are 'time' and 'elapsed'
xdata : array
x-positions of plotted data. If multiple lines were plotted,
this array should encompass all of them
shade_dark : bool, optional
Whether or not to shade nighttime periods of the ax.
Has no effect when averaging is 'elapsed'. The default is True.
lights_on : int
Integer from 0-23 denoting start of light cycle. The default is 7.
lights_off : int
Integer from 0-23 denoting end of light cycle. The default is 19.
Returns
-------
None.
"""
if averaging == 'datetime':
mindate = pd.Timestamp(2200,1,1,0,0,0)
maxdate = pd.Timestamp(1970,1,1,0,0,0)
for x in xdata:
if x.min() < mindate:
mindate = x.min()
if x.max() > maxdate:
maxdate = x.max()
ax.set_xlabel('Date')
date_format_x(ax, mindate, maxdate)
if shade_dark:
shade_darkness(ax, mindate, maxdate, lights_on, lights_off)
elif averaging == 'time':
mindate = | pd.Timestamp(2200,1,1,0,0,0) | pandas.Timestamp |
#----------------- Libraries -------------------#
import os
import sys
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from Preprocessing import Preprocessing
def kfold_decompose(data, kfold_n):
"""
This function uses kfold to split the data.
Args:
data (list): The data to split
kfold_n (int): number of fragments to be split
Returns:
list[dict]: a list of the split datasets
"""
X = np.array(data)
kf = KFold(n_splits=kfold_n, random_state=2, shuffle=True)
data_output = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
data_output.append({'train': X_train, 'test': X_test})
return data_output
def create_folders(data_label_true, data_label_fake, kfold_n, data_path):
"""
This function fragments the data and creates the repositories to store it.
Args:
data_label_true (list[dict]): true data text
data_label_fake (list[dict]): fake data text
kfold_n (int): number of data splits with kfold
"""
# cwd = os.path.dirname(os.path.abspath(__file__))
# data_path = os.path.join(cwd, os.pardir, 'data')
training = os.path.join(data_path, 'training')
os.mkdir(training)
prp = Preprocessing()
for i in tqdm(range(kfold_n)):
dataset_path = os.path.join(training, 'dataset_' + str(i+1))
os.mkdir(dataset_path)
os.mkdir(os.path.join(dataset_path, 'test'))
os.mkdir(os.path.join(dataset_path, 'train'))
os.mkdir(os.path.join(dataset_path, 'train', 'vc'))
| pd.DataFrame(data_label_true[i]['test']) | pandas.DataFrame |
import pandas as pd
from fuzzywuzzy import fuzz
import csv
import argparse
from timeit import default_timer as timer
def get_arguments():
parser = argparse.ArgumentParser(description='csv file identifying duplicates between new and old comments')
parser.add_argument('--new_comments_csv', '-i1', type=str, dest='new_comments_csv', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/new_comments_preprocessed.csv',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/new_comments_preprocessed.csv',
help="the input new comments csv file")
parser.add_argument('--output_csv', '-o', type=str, dest='output_csv', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/merged_duplicates.csv',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/merged_old_new_duplicates.csv',
help="the output csv file containing duplicate pairs")
parser.add_argument('--old_comments_csv', '-i2', type=str, dest='old_comments_csv', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/old_comments.csv',
default=r'/Users/vkolhatk/Data/GnM_CSVs/intermediate_csvs/old_comments_preprocessed.csv',
help="the input old comments csv file")
parser.add_argument('--threshold', '-t', type=int, dest='threshold', action='store',
#default='../../Sample_Resources/Sample_Comments_CSVs/old_comments.csv',
default=85,
help="Threshold for fuzzy matching scores ")
args = parser.parse_args()
return args
def identify(groupby_articles, args):
'''
:param groupby_articles: pandas groupbyobject by article_id
:param args: argparser object
Given a pandas groupby object, this module identifies duplicates and write the duplicate pairs to output csv mentioned in argparse.output_csv
doing the word segmentation.
'''
csv_header_list = ['article_id','author','comment_counter1','comment1','comment_counter2','comment2','weighted_score','token_sort_score']
with open(args.output_csv,'w',newline='',encoding='utf-8') as duplicatecsv:
writer = csv.writer(duplicatecsv)
writer.writerow(csv_header_list)
for arts in range(0,len(groupby_articles)):
for i in groupby_articles[arts].itertuples():
for m in groupby_articles[arts].itertuples():
if i.author!=m.author:
continue
if m.Index <= i.Index:
continue
try:
if len(i.text.decode('utf-8'))<=len(m.text.decode('utf-8'))/2 or len(m.text.decode('utf-8'))<=len(i.text.decode('utf-8'))/2:
continue
if any(match == "< this comment did not meet civility standards >" or match == "This comment has been deleted" for match in [i.text,m.text]):
continue
score = fuzz.UWRatio(i.text.decode('utf-8'),m.text.decode('utf-8'))
tsort_score = fuzz.token_sort_ratio(i.text.decode('utf-8'),m.text.decode('utf-8'),force_ascii=False)
if score>=args.threshold and tsort_score>=args.threshold:
writer.writerow([i.article_id,i.author,i.comment_counter,i.text,m.comment_counter,m.text,score,tsort_score])
except:
if len(str(i.text))<=len(str(m.text))/2 or len(str(m.text))<=len(str(i.text))/2:
continue
if any(match == "< this comment did not meet civility standards >" or match == "This comment has been deleted" for match in [i.text,m.text]):
continue
score = fuzz.UWRatio(str(i.text),str(m.text))
tsort_score = fuzz.token_sort_ratio(str(i.text),str(m.text),force_ascii=False)
if score>=args.threshold and tsort_score>=args.threshold:
writer.writerow([i.article_id,i.author,i.comment_counter,i.text,m.comment_counter,m.text,score,tsort_score])
#print(arts)
print('Output file written: ', args.output_csv)
if __name__=="__main__":
args = get_arguments()
start = timer()
print('Start time: ', start)
old = pd.read_csv(args.old_comments_csv)
new = | pd.read_csv(args.new_comments_csv) | pandas.read_csv |
import os
import glob
import pathlib
import re
import base64
import pandas as pd
from datetime import datetime, timedelta
# https://www.pythonanywhere.com/forums/topic/29390/ for measuring the RAM usage on pythonanywhere
class defichainAnalyticsModelClass:
def __init__(self):
workDir = os.path.abspath(os.getcwd())
self.dataPath = workDir[:-9] + '/data/'
# data for controller/views
self.dailyData = pd.DataFrame()
self.hourlyData = pd.DataFrame()
self.minutelyData = pd.DataFrame()
self.lastRichlist = None
self.snapshotData = None
self.changelogData = None
# last update of csv-files
self.updated_nodehubIO = None
self.updated_allnodes = None
self.updated_extractedRichlist = None
self.updated_tradingData = None
self.updated_blocktime = None
self.updated_dexHourly = None
self.update_dexMinutely = None
self.updated_daa = None
self.updated_LastRichlist = None
self.updated_dexVolume = None
self.updated_tokenCryptos = None
self.updated_twitterData = None
self.updated_twitterFollower = None
self.update_snapshotData = None
self.update_changelogData = None
self.update_incomeVisits = None
self.update_portfolioDownloads = None
self.update_promoDatabase = None
self.update_analyticsVisits = None
self.updated_hourlyDEXTrades = None
self.update_MNmonitor = None
self.updated_dfx = None
self.update_DFIsignal = None
# background image for figures
with open(workDir + "/assets/analyticsLandscapeGrey2.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode()
self.figBackgroundImage = "data:image/png;base64," + encoded_string # Add the prefix that plotly will want when using the string as source
#### DAILY DATA #####
def loadDailyData(self):
self.loadHourlyDEXdata()
self.loadDEXVolume()
self.loadDailyTradingData()
self.loadExtractedRichlistData()
self.calcOverallTVLdata()
self.loadDailyBlocktimeData()
self.loadDAAData()
self.loadTwitterData()
self.loadTwitterFollowerData()
self.loadIncomeVisitsData()
self.loadPortfolioDownloads()
self.loadPromoDatabase()
self.loadMNMonitorDatabase()
self.loadAnalyticsVisitsData()
self.loadDFIsignalDatabase()
def loadMNnodehub(self):
print('>>>> Start update nodehub.IO data ... <<<<')
filePath = self.dataPath + 'mnNodehub.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_nodehubIO:
nodehubData = pd.read_csv(filePath, index_col=0)
nodehubData.rename(columns={"amount": "nbMNNodehub"}, inplace=True)
ind2Delete = self.dailyData.columns.intersection(nodehubData.columns)
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(nodehubData['nbMNNodehub'], how='outer', left_index=True, right_index=True)
self.updated_nodehubIO = fileInfo.stat()
print('>>>> nodehub data loaded from csv-file <<<<')
def loadMNAllnodes(self):
print('>>>> Start update allnodes data ... <<<<')
filePath = self.dataPath + 'mnAllnodes.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_allnodes:
allnodesData = pd.read_csv(filePath, index_col=0)
allnodesData.set_index('date', inplace=True)
ind2Delete = self.dailyData.columns.intersection(allnodesData.columns)
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(allnodesData['nbMNAllnode'], how='outer', left_index=True, right_index=True)
self.updated_allnodes = fileInfo.stat()
print('>>>> allnodes data loaded from csv-file <<<<')
def loadExtractedRichlistData(self):
self.loadMNnodehub() # number masternode hosted by nodehub must be load here to ensure correct values for other and relative representation
self.loadMNAllnodes() # number masternode hosted by Allnodes must be load here to ensure correct values for other and relative representation
print('>>>> Start update extracted richlist data ... <<<<')
filePath = self.dataPath + 'extractedDFIdata.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_extractedRichlist:
extractedRichlist = pd.read_csv(filePath, index_col=0)
ind2Delete = self.dailyData.columns.intersection(extractedRichlist.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(extractedRichlist, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.dailyData['nbMNOther'] = self.dailyData['nbMnId']-self.dailyData['nbMnCakeId']-self.dailyData['nbMydefichainId']-self.dailyData['nbMNNodehub'].fillna(0)-self.dailyData['nbMNAllnode'].fillna(0)
self.dailyData['nbMNnonCake'] = self.dailyData['nbMnId']-self.dailyData['nbMnCakeId']
self.dailyData['nbMnCakeIdRelative'] = self.dailyData['nbMnCakeId']/self.dailyData['nbMnId']*100
self.dailyData['nbMNOtherRelative'] = self.dailyData['nbMNOther'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMydefichainRelative'] = self.dailyData['nbMydefichainId'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNNodehubRelative'] = self.dailyData['nbMNNodehub'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNAllnodeRelative'] = self.dailyData['nbMNAllnode'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNlocked10Relative'] = self.dailyData['nbMNlocked10'] / self.dailyData['nbMnId'] * 100
self.dailyData['nbMNlocked5Relative'] = self.dailyData['nbMNlocked5'] / self.dailyData['nbMnId'] * 100
# extracting DFI in Liquidity-Mining
lmCoins = pd.DataFrame(index=self.dailyData.index)
lmCoins['BTC_pool'] = self.hourlyData.groupby('Date')['BTC-DFI_reserveB'].first()
lmCoins['ETH_pool'] = self.hourlyData.groupby('Date')['ETH-DFI_reserveB'].first()
lmCoins['USDT_pool'] = self.hourlyData.groupby('Date')['USDT-DFI_reserveB'].first()
lmCoins['DOGE_pool'] = self.hourlyData.groupby('Date')['DOGE-DFI_reserveB'].first()
lmCoins['LTC_pool'] = self.hourlyData.groupby('Date')['LTC-DFI_reserveB'].first()
lmCoins['USDC_pool'] = self.hourlyData.groupby('Date')['USDC-DFI_reserveB'].first()
lmCoins['overall'] = lmCoins['BTC_pool'] + lmCoins['ETH_pool'] + lmCoins['USDT_pool'] + lmCoins['DOGE_pool'].fillna(0) + lmCoins['LTC_pool'].fillna(0) + lmCoins['USDC_pool'] .fillna(0)
self.dailyData['lmDFI'] = lmCoins['overall']
# sum of addresses and DFI
self.dailyData['nbOverall'] = self.dailyData['nbMnId'] + self.dailyData['nbOtherId']
self.dailyData['circDFI'] = self.dailyData['mnDFI'] + self.dailyData['otherDFI'] \
+ self.dailyData['tokenDFI'].fillna(0) + self.dailyData['lmDFI'].fillna(0) + self.dailyData['erc20DFI'].fillna(0) \
- (self.dailyData['nbMNlocked10']+self.dailyData['nbMNlocked5']).fillna(0)*20000
self.dailyData['totalDFI'] = self.dailyData['circDFI'] + self.dailyData['fundDFI'] + self.dailyData['foundationDFI'].fillna(0) \
+ self.dailyData['burnedDFI'].fillna(method="ffill") + (self.dailyData['nbMNlocked10']+self.dailyData['nbMNlocked5']).fillna(0)*20000
# calc market cap data in USD and BTC
print('>>>>>>>> Update market cap in loadExtractedRichlistData... <<<<<<<<')
self.dailyData['marketCapUSD'] = self.dailyData['circDFI']*self.dailyData['DFIPriceUSD']
self.dailyData['marketCapBTC'] = self.dailyData['marketCapUSD'] / self.dailyData['BTCPriceUSD']
# calculate daily change in addresses and DFI amount
self.dailyData['diffDate'] = pd.to_datetime(self.dailyData.index).to_series().diff().values
self.dailyData['diffDate'] = self.dailyData['diffDate'].fillna(pd.Timedelta(seconds=0)) # set nan-entry to timedelta 0
self.dailyData['diffDate'] = self.dailyData['diffDate'].apply(lambda x: float(x.days))
self.dailyData['diffNbOther'] = self.dailyData['nbOtherId'].diff() / self.dailyData['diffDate']
self.dailyData['diffNbMN'] = self.dailyData['nbMnId'].diff() / self.dailyData['diffDate']
self.dailyData['diffNbNone'] = None
self.dailyData['diffotherDFI'] = self.dailyData['otherDFI'].diff() / self.dailyData['diffDate']
self.dailyData['diffmnDFI'] = self.dailyData['mnDFI'].diff() / self.dailyData['diffDate']
self.dailyData['difffundDFI'] = self.dailyData['fundDFI'].diff() / self.dailyData['diffDate']
self.dailyData['difffoundationDFI'] = self.dailyData['foundationDFI'].diff() / self.dailyData['diffDate']
self.dailyData['diffLMDFI'] = self.dailyData['lmDFI'].diff() / self.dailyData['diffDate']
self.updated_extractedRichlist = fileInfo.stat()
print('>>>> Richlist data loaded from csv-file <<<<')
def calcOverallTVLdata(self):
self.dailyData['tvlMNDFI'] = self.dailyData['nbMnId'] * ((pd.to_datetime(self.dailyData.index)<pd.Timestamp('2021-03-02')) * 1 * 1000000 + \
(pd.to_datetime(self.dailyData.index)>=pd.Timestamp('2021-03-02')) * 1 * 20000)
dexLockedDFI = (self.hourlyData['BTC-DFI_lockedDFI']+self.hourlyData['ETH-DFI_lockedDFI']+self.hourlyData['USDT-DFI_lockedDFI'] +
self.hourlyData['DOGE-DFI_lockedDFI'].fillna(0)+self.hourlyData['LTC-DFI_lockedDFI'].fillna(0) +
self.hourlyData['BCH-DFI_lockedDFI'].fillna(0) + self.hourlyData['USDC-DFI_lockedDFI'].fillna(0))
dexLockedDFI.index = dexLockedDFI.index.floor('D').astype(str) # remove time information, only date is needed
self.dailyData['tvlDEXDFI'] = dexLockedDFI.groupby(level=0).first()
def loadDailyTradingData(self):
print('>>>> Start update trading data ... <<<<')
filePath = self.dataPath + 'dailyTradingResultsDEX.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_tradingData:
dailyTradingResults = pd.read_csv(self.dataPath+'dailyTradingResultsDEX.csv',index_col=0)
ind2Delete = self.dailyData.columns.intersection(dailyTradingResults.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyTradingResults, how='outer', left_index=True, right_index=True) # add new columns to daily table
# calc market cap data in USD and BTC (same as in loadExtractedRichlistData to get updated price information
if 'circDFI' in self.dailyData.columns:
print('>>>>>>>> Update market cap in loadDailyTradingData... <<<<<<<<')
self.dailyData['marketCapUSD'] = self.dailyData['circDFI']*self.dailyData['DFIPriceUSD']
self.dailyData['marketCapBTC'] = self.dailyData['marketCapUSD'] / self.dailyData['BTCPriceUSD']
self.updated_tradingData = fileInfo.stat()
print('>>>> Trading data loaded from csv-file <<<<')
def loadDailyBlocktimeData(self):
print('>>>> Start update blocktime data ... <<<<')
filePath = self.dataPath + 'BlockListStatistics.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_blocktime:
dailyBlocktimeData = pd.read_csv(filePath, index_col=0)
dailyBlocktimeData['tps'] = dailyBlocktimeData['txCount'] / (24 * 60 * 60)
ind2Delete = self.dailyData.columns.intersection(dailyBlocktimeData.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyBlocktimeData, how='outer', left_index=True,right_index=True) # add new columns to daily table
self.updated_blocktime = fileInfo.stat()
print('>>>> Blocktime data loaded from csv-file <<<<')
def loadDAAData(self):
print('>>>> Start update DAA data ... <<<<')
filePath = self.dataPath + 'analyzedDataDAA.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_daa:
dailyDAAData = pd.read_csv(filePath, index_col=0)
ind2Delete = self.dailyData.columns.intersection(dailyDAAData.columns) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dailyDAAData, how='outer', left_index=True, right_on='Date') # add new columns to daily table
self.dailyData.set_index('Date', inplace=True)
self.dailyData.sort_index(inplace=True)
self.updated_daa = fileInfo.stat()
print('>>>> DAA data loaded from csv-file <<<<')
def loadTwitterData(self):
print('>>>> Start update twitter data ... <<<<')
filePath = self.dataPath + 'analyzedTwitterData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_twitterData:
twitterData = pd.read_csv(filePath, index_col=0)
columns2update = ['overall_Activity', 'defichain_Activity', 'dfi_Activity', 'overall_Likes', 'overall_UniqueUserOverall', 'overall_UniqueUserTweet', 'overall_UniqueUserReply', 'overall_UniqueUserRetweet']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(twitterData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_twitterData = fileInfo.stat()
print('>>>> Twitter data loaded from csv-file <<<<')
def loadTwitterFollowerData(self):
print('>>>> Start update twitter follower data ... <<<<')
filePath = self.dataPath + 'TwitterData_follower.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_twitterFollower:
twitterFollowData = pd.read_csv(filePath, index_col=0)
twitterFollowData.set_index('Date',inplace=True)
columns2update = ['Follower', 'followedToday', 'unfollowedToday']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(twitterFollowData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_twitterFollower = fileInfo.stat()
print('>>>> Twitter data loaded from csv-file <<<<')
def loadIncomeVisitsData(self):
print('>>>> Start update income visits data ... <<<<')
filePath = self.dataPath + 'dataVisitsIncome.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_incomeVisits:
incomeVisitsData = pd.read_csv(filePath, index_col=0)
incomeVisitsData.rename(columns={'0': 'incomeVisits'}, inplace=True)
incomeVisitsData.set_index(incomeVisitsData.index.str[:10], inplace=True) # just use date information without hh:mm
columns2update = ['incomeVisits']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(incomeVisitsData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_incomeVisits = fileInfo.stat()
print('>>>> Income visits data loaded from csv-file <<<<')
def loadPortfolioDownloads(self):
print('>>>> Start update portfolio downloads data ... <<<<')
filePath = self.dataPath + 'dataPortfolioDownloads.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_portfolioDownloads:
portfolioRawData = pd.read_csv(filePath)
columns2update = ['PortfolioWindows', 'PortfolioMac', 'PortfolioLinux']
dfPortfolioData = pd.DataFrame(index=portfolioRawData['DateCaptured'].unique(), columns=columns2update)
dfPortfolioData['PortfolioWindows'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Windows.sum()
dfPortfolioData['PortfolioMac'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Mac.sum()
dfPortfolioData['PortfolioLinux'] = portfolioRawData.groupby(portfolioRawData.DateCaptured).Linux.sum()
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dfPortfolioData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_portfolioDownloads = fileInfo.stat()
print('>>>> Portfolio downloads data loaded from csv-file <<<<')
def loadPromoDatabase(self):
print('>>>> Start update DefiChain promo database ... <<<<')
filePath = self.dataPath + 'defichainPromoData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_promoDatabase:
promoRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['postActive', 'mediaActive', 'incentivePointsToday', 'incentiveUsers']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(promoRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_promoDatabase = fileInfo.stat()
print('>>>> DefiChain promo database loaded from csv-file <<<<')
def loadMNMonitorDatabase(self):
print('>>>> Start update masternode monitor database ... <<<<')
filePath = self.dataPath + 'masternodeMonitorData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_MNmonitor:
monitorRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['nbMasternodes', 'nbAccounts']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(monitorRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_MNmonitor = fileInfo.stat()
print('>>>> MN Monitor database loaded from csv-file <<<<')
def loadAnalyticsVisitsData(self):
print('>>>> Start update raw data analytics visits ... <<<<')
filePath = self.dataPath + 'rawDataUserVisit.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_analyticsVisits:
analyticsRawVisitsData = pd.read_csv(filePath, index_col=0)
analyticsRawVisitsData['visitDate'] = pd.to_datetime(analyticsRawVisitsData.visitTimestamp).dt.date
analyticsVisitData = analyticsRawVisitsData.groupby('visitDate').count()
analyticsVisitData.rename(columns={'visitTimestamp': 'analyticsVisits'}, inplace=True)
columns2update = ['analyticsVisits']
analyticsVisitData.index = analyticsVisitData.index.map(str) # change index from dt to str format
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(analyticsVisitData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_analyticsVisits = fileInfo.stat()
print('>>>> Analytics visits data loaded from csv-file <<<<')
def loadDFIsignalDatabase(self):
print('>>>> Start update DFI-signal database ... <<<<')
filePath = self.dataPath + 'dfiSignalData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_DFIsignal:
dfiSignalRawData = pd.read_csv(filePath, index_col=0)
columns2update = ['user_count','masternode_count','messages_sent','commands_received','minted_blocks']
# delete existing information and add new one
ind2Delete = self.dailyData.columns.intersection(columns2update) # check if columns exist
self.dailyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.dailyData = self.dailyData.merge(dfiSignalRawData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.update_DFIsignal = fileInfo.stat()
print('>>>> DFI-Signal database loaded from csv-file <<<<')
#### HOURLY DATA ####
def loadHourlyData(self):
self.loadHourlyDEXdata()
self.loadDEXVolume()
self.loadTokenCrypto()
self.loadHourlyDEXTrades()
self.loadDFXdata()
def loadHourlyDEXdata(self):
print('>>>> Start update hourly DEX data ... <<<<')
filePath = self.dataPath + 'LMPoolData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_dexHourly:
hourlyDEXData = pd.read_csv(filePath, index_col=0)
hourlyDEXData['timeRounded'] = pd.to_datetime(hourlyDEXData.Time).dt.floor('H')
hourlyDEXData.set_index(['timeRounded'], inplace=True)
hourlyDEXData['reserveA_DFI'] = hourlyDEXData['reserveA'] / hourlyDEXData['DFIPrices']
for poolSymbol in hourlyDEXData.symbol.dropna().unique():
df2Add = hourlyDEXData[hourlyDEXData.symbol == poolSymbol]
df2Add = df2Add.drop(columns=['Time', 'symbol'])
# calculate locked DFI and corresponding values
df2Add = df2Add.assign(lockedDFI=df2Add['reserveB'] + df2Add['reserveA_DFI'])
df2Add = df2Add.assign(lockedUSD=df2Add['lockedDFI']*hourlyDEXData[hourlyDEXData.symbol == 'USDT-DFI'].DFIPrices)
df2Add = df2Add.assign(lockedBTC=df2Add['lockedDFI'] * hourlyDEXData[hourlyDEXData.symbol == 'BTC-DFI'].DFIPrices)
# calculate relative price deviations
df2Add = df2Add.assign(relPriceDevCoingecko=((df2Add['DFIPrices'] - df2Add['reserveA/reserveB'])/df2Add['DFIPrices']))
df2Add = df2Add.assign(relPriceDevBittrex=((df2Add['DFIPricesBittrex'] - df2Add['reserveA/reserveB']) / df2Add['DFIPricesBittrex']))
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = poolSymbol+'_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(colNamesNew) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.hourlyData['Date'] = pd.to_datetime(self.hourlyData.index).strftime('%Y-%m-%d')
self.updated_dexHourly = fileInfo.stat()
print('>>>> Hourly DEX data loaded from csv-file <<<<')
def loadDEXVolume(self):
print('>>>> Start update DEX volume data ... <<<<')
filePath = self.dataPath + 'DEXVolumeData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_dexVolume:
volumeData = pd.read_csv(filePath, index_col=0)
volumeData['timeRounded'] = pd.to_datetime(volumeData.Time).dt.floor('H')
volumeData.set_index(['timeRounded'], inplace=True)
for poolSymbol in volumeData['base_name'].unique():
df2Add = volumeData[volumeData['base_name']==poolSymbol][['base_volume', 'quote_volume']]
df2Add['VolTotal'] = df2Add[['base_volume', 'quote_volume']].sum(axis=1)
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = poolSymbol + '_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(colNamesNew) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
# calculate total volume after merge of data
self.hourlyData['VolTotal'] = self.hourlyData['BTC_VolTotal']*0 # only use rows with data; BTC was the first pool and have to most data (beside ETH, USDT)
for poolSymbol in volumeData['base_name'].unique():
self.hourlyData['VolTotal'] = self.hourlyData['VolTotal'] + self.hourlyData[poolSymbol+'_'+'VolTotal'].fillna(0)
self.hourlyData['VolTotalCoingecko'] = volumeData[volumeData['base_name']=='BTC']['coingeckoVolume']
self.updated_dexVolume = fileInfo.stat()
print('>>>> DEX volume data loaded from csv-file <<<<')
def loadHourlyDEXTrades(self):
print('>>>> Start update hourly DEX trade data ... <<<<')
filePath = self.dataPath + 'hourlyDEXTrades.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_hourlyDEXTrades:
hourlyTrades = pd.read_csv(filePath, index_col=0)
hourlyTrades.fillna(0, inplace=True)
hourlyTrades.index = pd.to_datetime(hourlyTrades.index).tz_localize(None)
columns2update = []
currName = ['BTC', 'ETH', 'USDT', 'DOGE', 'LTC', 'BCH', 'USDC', 'DFI']
for ind in range(7):
hourlyTrades['volume'+currName[ind]+'buyDFI'] = hourlyTrades[currName[ind]+'pool_base'+currName[ind]] * hourlyTrades[currName[ind]+'-USD']
hourlyTrades['volume'+currName[ind]+'sellDFI'] = hourlyTrades[currName[ind]+'pool_quote'+currName[ind]] * hourlyTrades[currName[ind]+'-USD']
columns2update.extend(['volume'+currName[ind]+'buyDFI', 'volume'+currName[ind]+'sellDFI'])
hourlyTrades['volumeOverallbuyDFI'] = hourlyTrades['volumeBTCbuyDFI']+hourlyTrades['volumeETHbuyDFI']+hourlyTrades['volumeUSDTbuyDFI'] + \
hourlyTrades['volumeDOGEbuyDFI']+hourlyTrades['volumeLTCbuyDFI']+hourlyTrades['volumeBCHbuyDFI'] + \
hourlyTrades['volumeUSDCbuyDFI']
hourlyTrades['volumeOverallsellDFI'] = hourlyTrades['volumeBTCsellDFI']+hourlyTrades['volumeETHsellDFI']+hourlyTrades['volumeUSDTsellDFI'] + \
hourlyTrades['volumeDOGEsellDFI']+hourlyTrades['volumeLTCsellDFI']+hourlyTrades['volumeBCHsellDFI'] + \
hourlyTrades['volumeUSDCsellDFI']
columns2update.extend(['volumeOverallbuyDFI', 'volumeOverallsellDFI'])
ind2Delete = self.hourlyData.columns.intersection(columns2update) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(hourlyTrades[columns2update], how='outer', left_index=True, right_index=True) # delete existing columns to add new ones
self.updated_hourlyDEXTrades = fileInfo.stat()
print('>>>> DEX volume data loaded from csv-file <<<<')
def loadTokenCrypto(self):
print('>>>> Start update token data ... <<<<')
filePath = self.dataPath + 'TokenData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_tokenCryptos:
tokenData = pd.read_csv(filePath, index_col=0)
tokenData['timeRounded'] = pd.to_datetime(tokenData.Time).dt.floor('H')
tokenData.set_index(['timeRounded'], inplace=True)
for coinSymbol in tokenData['symbol'].unique():
df2Add = tokenData[tokenData['symbol']==coinSymbol][['Burned', 'minted', 'Collateral']]
df2Add['tokenDefiChain'] = df2Add['minted'] - df2Add['Burned'].fillna(0)
df2Add['diffToken'] = df2Add['Collateral']-df2Add['minted']+df2Add['Burned'].fillna(0)
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = coinSymbol + '_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(colNamesNew) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_tokenCryptos = fileInfo.stat()
print('>>>> DAT Cryptos data loaded from csv-file <<<<')
def loadDFXdata(self):
print('>>>> Start update DFX data ... <<<<')
filePath = self.dataPath + 'dfxData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.updated_dfx:
dfxData = pd.read_csv(filePath, index_col=0)
dfxData['timeRounded'] = pd.to_datetime(dfxData.index).floor('H')
dfxData.set_index(['timeRounded'], inplace=True)
columns2update = ['dfxBuyRoutes', 'dfxSellRoutes', 'dfxBuyVolume', 'dfxSellVolume', 'dfxBuyVolumeCHF', 'dfxSellVolumeCHF']
# delete existing information and add new one
ind2Delete = self.hourlyData.columns.intersection(columns2update) # check if columns exist
self.hourlyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.hourlyData = self.hourlyData.merge(dfxData[columns2update], how='outer', left_index=True, right_index=True) # add new columns to daily table
self.updated_dfx = fileInfo.stat()
print('>>>> DFX data loaded from csv-file <<<<')
#### MINUTELY DATA ####
def loadMinutelyData(self):
self.loadMinutelyDEXdata()
def loadMinutelyDEXdata(self):
filePath = self.dataPath + 'LMPoolData_ShortTerm.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_dexMinutely:
minutelyDEXData = pd.read_csv(filePath, index_col=0)
minutelyDEXData['timeRounded'] = pd.to_datetime(minutelyDEXData.Time).dt.floor('min') #.dt.strftime('%Y-%m-%d %H:%M')
minutelyDEXData.set_index(['timeRounded'], inplace=True)
for poolSymbol in minutelyDEXData.symbol.unique():
df2Add = minutelyDEXData[minutelyDEXData.symbol == poolSymbol]
df2Add = df2Add.drop(columns=['Time', 'symbol'])
# calculate relative price deviations
df2Add = df2Add.assign(relPriceDevCoingecko=((df2Add['DFIPrices'] - df2Add['reserveA/reserveB'])/df2Add['DFIPrices']))
df2Add = df2Add.assign(relPriceDevBittrex=((df2Add['DFIPricesBittrex'] - df2Add['reserveA/reserveB']) / df2Add['DFIPricesBittrex']))
# add prefix to column names for pool identification
colNamesOrig = df2Add.columns.astype(str)
colNamesNew = poolSymbol+'_' + colNamesOrig
df2Add = df2Add.rename(columns=dict(zip(colNamesOrig, colNamesNew)))
# delete existing information and add new one
ind2Delete = self.minutelyData.columns.intersection(colNamesNew) # check if columns exist
self.minutelyData.drop(columns=ind2Delete, inplace=True) # delete existing columns to add new ones
self.minutelyData = self.minutelyData.merge(df2Add, how='outer', left_index=True, right_index=True) # add new columns to daily table
self.minutelyData.dropna(axis=0, how='all',inplace=True)
self.update_dexMinutely = fileInfo.stat()
print('>>>> Minutely DEX data loaded from csv-file <<<<')
#### NO TIMESERIES ####
def loadNoTimeseriesData(self):
self.loadLastRichlist()
self.loadSnapshotData()
self.loadChangelogData()
def loadLastRichlist(self):
filePath = self.dataPath + 'Richlist/'
listCSVFiles = glob.glob(filePath + "*_01-*.csv") # get all csv-files generated at night
newestDate = self.dailyData['nbMnId'].dropna().index.max() # find newest date in extracted Data
file2Load = [x for x in listCSVFiles if re.search(newestDate, x)] # find corresponding csv-file of richlist
fname = pathlib.Path(file2Load[0])
if fname.stat() != self.updated_LastRichlist:
self.lastRichlist = pd.read_csv(file2Load[0]) # load richlist
# date for information/explanation
self.lastRichlist['date'] = pd.to_datetime(newestDate)
self.updated_LastRichlist = fname.stat()
print('>>>>>>>>>>>>> Richlist ', file2Load[0], ' loaded <<<<<<<<<<<<<')
def loadSnapshotData(self):
filePath = self.dataPath + 'snapshotData.csv'
fileInfo = pathlib.Path(filePath)
if fileInfo.stat() != self.update_snapshotData:
self.snapshotData = | pd.read_csv(filePath, index_col=0) | pandas.read_csv |
import pandas as pd
import os
import requests as req
import sys
import re
import dask.dataframe as dd
from lxml import etree
import io
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s|%(name)s|%(levelname)s|%(message)s',
datefmt='%m-%d %H:%M',
# filename='/temp/myapp.log',
filemode='w')
# console = logging.StreamHandler()
logger = logging.getLogger('EdgarAnalyzer')
logger.setLevel('INFO')
# logger.addHandler(console)
dir_curr = os.path.abspath(os.path.dirname(__file__))
def ticker2cik(symbol):
symbol = str(symbol).upper()
cik = ticker2cik_sec(symbol) if ticker2cik_file(symbol) is None else ticker2cik_file(symbol)
return cik
def conv_list(i):
list_res = None
if isinstance(i, str) | isinstance(i, int):
list_res = [i]
elif isinstance(i, list):
list_res = i
return list_res
def ticker2cik_file(symbol):
symbol = str(symbol).upper()
path_cik_mapping = os.path.join(dir_curr, 'config', 'cik_mapping.csv')
df_mapping = pd.read_csv(path_cik_mapping).set_index('Ticker')['CIK']
if symbol in df_mapping.index:
if df_mapping[[symbol]].shape[0] == 1:
cik = str(df_mapping[symbol]).zfill(10)
else:
logger.warning('non-unique CIK for Symbol={s} in cik_mapping.csv'.format(s=symbol))
cik = ticker2cik_sec(symbol)
else:
logger.warning('Symbol not found in cik_mapping.csv.')
cik = None
return cik
def ticker2cik_sec(symbol, update_mapping=True):
symbol = str(symbol).upper()
try:
uri = "http://www.sec.gov/cgi-bin/browse-edgar"
resp = req.get(uri, {'CIK': symbol, 'action': 'getcompany'})
results = re.compile(r'.*CIK=(\d{10}).*').findall(str(resp.content))
cik = str(results[0])
except Exception as ex:
logger.error(ex)
logger.error('Symbol not found in SEC')
cik = None
if update_mapping and (cik is not None):
update_cik(symbol, cik)
return cik
def update_cik(symbol, cik):
logger.warning('update cik_mapping symbol={s}, cik={c}'.format(s=symbol, c=cik))
symbol = str(symbol).upper()
path_cik_mapping = os.path.join(dir_curr, 'config', 'cik_mapping.csv')
df_mapping = pd.read_csv(path_cik_mapping).set_index('Ticker')
if symbol in df_mapping.index:
df_mapping = df_mapping.drop(symbol)
df_mapping.loc[symbol] = int(cik)
df_mapping.to_csv(path_cik_mapping)
def download_list(list_path, dir_report, uri='https://www.sec.gov/Archives/', force_download=False, threads_number=8):
from multiprocessing.pool import ThreadPool
list_file = [os.path.join(dir_report, p) for p in list_path]
if not force_download:
list_path = [p for p in list_path if not os.path.exists(os.path.join(dir_report, p))]
# list_url = [uri+p for p in list_path]
# list_file = [os.path.join(dir, p) for p in list_path]
def download_url(p):
r = req.get(uri + p, stream=True)
path_save = os.path.join(dir_report, p)
logger.info('downloading {f}'.format(f=path_save))
if r.status_code == 200:
dir_name = os.path.dirname(path_save)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
with open(path_save, 'w') as f:
f.write(r.content.decode('latin-1'))
# with open(path_save, 'wb') as f:
# for chunk in r:
# f.write(chunk)
else:
logger.error('error downloading {f}'.format(f=uri + p))
return path_save
with ThreadPool(threads_number) as th:
results = list(th.imap_unordered(download_url, list_path))
#res = [p for p in list_path if os.path.exists(os.path.join(dir_report, p))]
list_res = []
for f in list_file:
if os.path.exists(f):
list_res.append(f)
else:
logger.error('cannot find filing file: '+f)
#results = list(ThreadPool(threads_number).imap_unordered(download_url, list_path))
# for l in results:
# logger.info('downloaded '+l)
return list_res
def re_string(keyword, data):
s = re.search(r'<{kw}>([\s\S]*?)\n'.format(kw=keyword), data)
res = s.group(1) if s else None
return res
def re_tag(tag, data, find_all=False):
s = re.search(r'<{tag}>\n([\s\S]*?)\n<\/{tag}>'.format(tag=tag), data)
res = s.group(1) if s else None
return res
def node2dict(node):
d = {}
for c in node.iterchildren():
key = c.tag.split('}')[1] if '}' in c.tag else c.tag
value = c.text if c.text else node2dict(c)
d[key] = value
return d
def parse_ins(txt_ins, has_dimension=False):
xbrl_ins = re_tag('XBRL', txt_ins)
if xbrl_ins is None:
xbrl_ins = re_tag('XML', txt_ins)
xbrl_ins = xbrl_ins.replace('>\n', '>')
r_ins = etree.fromstring(xbrl_ins.encode('utf-8'))
ns_ins = {k:v for k,v in r_ins.nsmap.items() if k is not None}
if 'xbrli' not in ns_ins.keys():
logger.info('fix missing namespace xbrli. {s}'.format(s=ns_ins))
ns_ins['xbrli'] = "http://www.xbrl.org/2003/instance"
list_context = r_ins.findall(r'xbrli:context', namespaces=ns_ins)
list_period = [dict(i.attrib, **node2dict(i.find('xbrli:period', namespaces=ns_ins))) for i in list_context]
df_period = pd.DataFrame(list_period)
# if 'id' not in df_period.columns:
# print(r_ins[:10])
# print(r_ins.findall('context')[:10])
# print(len(list_context))
# print(len(list_period))
# print(df_period.head())
df_period = df_period.set_index('id')
# df_period.head()
list_unit = r_ins.findall('xbrli:unit', namespaces=ns_ins)
df_unit = pd.DataFrame([dict(i.attrib, **{'unit': i[0].text.split(':')[-1]})
for i in list_unit]).set_index('id')
# df_unit
list_dim = r_ins.xpath('.//*[@dimension]')
df_dim = pd.DataFrame([dict(d.attrib, **{'member': d.text,
'id': d.getparent().getparent().getparent().attrib['id']})
for d in list_dim]).set_index('id')
# df_dim.head()
list_measure = r_ins.xpath('.//*[@contextRef]')
df_measure = pd.DataFrame([dict(i.attrib, **{'measure': i.tag, 'value': i.text}) for i in list_measure])
# df_measure.head()
df_merge = df_measure.join(df_period, on='contextRef').join(df_unit, on='unitRef').join(df_dim, on='contextRef')
ns_reverse = {v: k for k, v in ns_ins.items()}
df_merge['ns'] = df_merge.measure.apply(lambda ns: ns_reverse[re.search('{(.*)}', ns).group(1)])
df_merge['item'] = df_merge['ns'] +":" +df_merge.measure.apply(lambda x: x.split('}')[-1])
# df_merge['endDate'] = df_merge.endDate
df_merge.endDate.update(df_merge.instant)
df_merge.startDate.update(df_merge.instant)
#parse dtype
df_merge.endDate = pd.to_datetime(df_merge.endDate, infer_datetime_format=True)
df_merge.startDate = pd.to_datetime(df_merge.startDate, infer_datetime_format=True)
df_merge.value = pd.to_numeric(df_merge.value, errors='ignore', downcast='integer')
df_merge.decimals = pd.to_numeric(df_merge.decimals, errors='ignore', downcast='integer')
# re.search('{(.*)}', ns).group(1)
# df_merge.head()
df_ins = df_merge[['item', 'startDate', 'endDate', 'value', 'decimals', 'unit', 'ns',
'dimension', 'member']].drop_duplicates()
if not has_dimension:
df_ins = df_ins[df_ins.dimension.isna()]
return df_ins
def parse_header(header, include_filer=False):
dict_replace = {'\t': "", ":": "|", '<': "", '>': '|'}
for k, v in dict_replace.items():
header = header.replace(k, v)
srs_header = pd.read_csv(io.StringIO(header), sep='|', header=None).set_index(0)[1]
if not include_filer:
srs_header = srs_header['ACCEPTANCE-DATETIME':'FILER'].dropna()
for i in srs_header.index:
if ('DATETIME' in i):
srs_header[i] = pd.to_datetime(srs_header[i])
elif ('DATE' in i) or ('PERIOD' in i):
srs_header[i] = | pd.to_datetime(srs_header[i]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 11:41:44 2018
@author: MichaelEK
"""
import types
import pandas as pd
import numpy as np
import json
from pdsf import sflake as sf
from utils import split_months
def process_allo(param, permit_use):
"""
Function to process the consented allocation from the input tables from Accela and others.
More descriptions in the code below.
Parameters
----------
param : dict
Input parameters
permit_use : DataFrame
DataFrame from the output of the process_use_types function
Returns
-------
DataFrame
"""
run_time_start = pd.Timestamp.today().strftime('%Y-%m-%d %H:%M:%S')
print(run_time_start)
#######################################
### Read in source data and update accela tables in ConsentsReporting db
print('--Reading in source data...')
## Make object to contain the source data
db = types.SimpleNamespace()
for t in param['misc']['AllocationProcessing']['tables']:
p = param['source data'][t]
print(p['table'])
if p['schema'] != 'public':
stmt = 'select {cols} from "{schema}"."{table}"'.format(schema=p['schema'], table=p['table'], cols=json.dumps(p['col_names'])[1:-1])
else:
stmt = 'select {cols} from "{table}"'.format(table=p['table'], cols=json.dumps(p['col_names'])[1:-1])
setattr(db, t, sf.read_table(p['username'], p['password'], p['account'], p['database'], p['schema'], stmt))
##################################################
### Sites
print('--Process Waps')
## takes
wap_allo1 = db.wap_allo.copy()
wap1 = wap_allo1['Wap'].unique()
waps = wap1[~pd.isnull(wap1)].copy()
## Check that all Waps exist in the USM sites table
usm_waps1 = db.waps[db.waps.isin(waps)].copy()
# usm_waps1[['NzTmX', 'NzTmY']] = usm_waps1[['NzTmX', 'NzTmY']].astype(int)
if len(wap1) != len(usm_waps1):
miss_waps = set(wap1).difference(set(usm_waps1.Wap))
print('Missing {} Waps in USM'.format(len(miss_waps)))
wap_allo1 = wap_allo1[~wap_allo1.Wap.isin(miss_waps)].copy()
##################################################
### Permit table
print('--Process Permits')
'''
WILCO:
Selection FromDate and toDate was a bit of a pain in the ass i remember for the Rakaia as well. I don't think there is any filtering done here below yet, but maybe it is
good to consider that:
1) Some consents may have never been active between the FromDate and ToDate. The 'Given Effect To' field can help with that. If the given effect to is larger than the
toDate, then that consent was never exercised and (at least for modelling purposes) should be dropped from the list of consents.
2) If the Given Effect To date is larger than the fromDate, then set FromDate equal to Given Effect To.
3) For parent and child consents (orginal and renewals) it is good to check the FromDate and ToDate. In the Ecan database the FromDate of the renewal is most of the time
equal to the ToDate of the parent (original record), which would lead to double accounting for that day. For the Rakaia I fixed this by making sure that sure that
the toDate is always 1 day before the frommDate of the child consent.
Below I have inserted some (commented) code that I used in my Rakaia work, so not sure whether you want to use this yes/no.
'''
# #-Select consents that were active between sdate and edate
# print 'Filter consents that were active between %s and %s...' %(sdate.strftime('%d-%m-%Y'), edate.strftime('%d-%m-%Y'))
# df1 = df.loc[(df['toDate']>pd.Timestamp(sdate)) & (df['fmDate']<=pd.Timestamp(edate))]
# #-If 'Given Effect To' date is later than 'toDate', then consent was never active in between the fmDate-toDate period, and is therefore removed from the dataframe
# df1.loc[(df1['Given Effect To'] > df1['toDate']),:]=np.nan
# df2 = df1.dropna(how='all')
# #-If 'Given Effect To' date is later than 'fmDate', then the 'fmDate' field is set to 'Given Effect To'
# df2.loc[(df2['fmDate'] < df2['Given Effect To']),['fmDate']]= df2['Given Effect To']
#
# #-Unique consent numbers of 'OriginalRecord'
# ori_records = pd.unique(df2['OriginalRecord'])
# df2_columns = list(df2.columns)
# fmDate_index = df2_columns.index('fmDate')
# toDate_index = df2_columns.index('toDate')
# #-Make sure toDate is always 1 day before the fmDate of the child consent. Required to make sure that a consent isn't active twice on one day
# for c in ori_records:
# #-select the consents that belong to the same group (have same parent so to speak)
# df_short = df2.loc[df2['OriginalRecord']==c]
# for i in range(0,len(df_short)-1):
# toDate = df_short.iloc[i,toDate_index] #-toDate of current record
# fmDate = df_short.iloc[i+1,fmDate_index] #-fromDate of child record
# if toDate == fmDate: #-cannot be equal. If so, then decrease the todate of the current record with one day
# df_short.iloc[i, toDate_index] = toDate - dt.timedelta(days=1)
# df2.loc[df2['OriginalRecord']==c] = df_short
# #-get rid of old dataframes
# df = df2.copy()
# df1 = None; df2 = None; del df1, df2
#
# #-For consents that are active for one day, the toDate may now (because of extracting one day from toDate) be smaller than fmDate. Those records are removed
# df = df.loc[df['toDate']>=df['fmDate']]
## Clean data
permits2 = db.permit.copy()
permits2['FromDate'] = pd.to_datetime(permits2['FromDate'], infer_datetime_format=True, errors='coerce')
permits2['ToDate'] = pd.to_datetime(permits2['ToDate'], infer_datetime_format=True, errors='coerce')
## Filter data
permits2 = permits2[permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull()].copy()
# permits2 = permits2[(permits2['FromDate'] > '1950-01-01') & (permits2['ToDate'] > '1950-01-01') & (permits2['ToDate'] > permits2['FromDate']) & permits2.NzTmX.notnull() & permits2.NzTmY.notnull() & permits2.ConsentStatus.notnull() & permits2.RecordNumber.notnull() & permits2['EcanID'].notnull()].copy()
## Convert datetimes to date
permits2.loc[permits2['FromDate'].isnull(), 'FromDate'] = pd.Timestamp('1900-01-01')
permits2.loc[permits2['ToDate'].isnull(), 'ToDate'] = pd.Timestamp('1900-01-01')
##################################################
### Parent-Child
print('--Process Parent-child table')
## Clean data
pc1 = db.parent_child.copy()
## Filter data
pc1 = pc1.drop_duplicates()
pc1 = pc1[pc1['ParentRecordNumber'].notnull() & pc1['ChildRecordNumber'].notnull()]
## Check foreign keys --> what are foreign keys?
crc1 = permits2.RecordNumber.unique()
pc0 = pc1[pc1.ParentRecordNumber.isin(crc1) & pc1.ChildRecordNumber.isin(crc1)].copy()
#################################################
### AllocatedRatesVolumes
print('--Process Allocation data')
## Rates
# Clean data
wa1 = wap_allo1.copy()
# Check foreign keys
wa4 = wa1[wa1.RecordNumber.isin(crc1)].copy()
# Find the missing Waps per consent
crc_wap_mis1 = wa4.loc[wa4.Wap.isnull(), 'RecordNumber'].unique()
crc_wap4 = wa4[['RecordNumber', 'Wap']].drop_duplicates()
for i in crc_wap_mis1:
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, i)].ChildRecordNumber.values
wap1 = []
while (len(crc2) > 0) & (len(wap1) == 0):
wap1 = crc_wap4.loc[np.in1d(crc_wap4.RecordNumber, crc2), 'Wap'].values
crc2 = pc0[np.in1d(pc0.ParentRecordNumber, crc2)].ChildRecordNumber.values
if len(wap1) > 0:
wa4.loc[wa4.RecordNumber == i, 'Wap'] = wap1[0]
wa4 = wa4[wa4.Wap.notnull()].copy()
## Distribute the months
# Since the tables in accela have no explicit primary/composite keys, it is possible that the eventual composite key 'RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap' does not fully caapture the Accela data set. It is possible that the rates also change by month. This occurs in less than 100 consents ever, so the simplification seems justified. The below code splits the consents out by each month that the consent is allowed to be active by the appropriate rates and volumes listed in the Accela table. Then the mean is taken over all months to ensure that there is only one value for 'RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap'.
cols1 = wa4.columns.tolist()
from_mon_pos = cols1.index('FromMonth')
to_mon_pos = cols1.index('ToMonth')
allo_rates_list = []
for val in wa4.itertuples(False, None):
from_month = int(val[from_mon_pos])
to_month = int(val[to_mon_pos])
if from_month > to_month:
mons = list(range(1, to_month + 1))
else:
mons = range(from_month, to_month + 1)
d1 = [val + (i,) for i in mons]
allo_rates_list.extend(d1)
col_names1 = wa4.columns.tolist()
col_names1.extend(['Month'])
wa5 = pd.DataFrame(allo_rates_list, columns=col_names1).drop(['FromMonth', 'ToMonth'], axis=1)
# Mean of all months
grp1 = wa5.groupby(['RecordNumber', 'TakeType', 'SwAllocationBlock', 'Wap'])
mean1 = grp1[['WapRate', 'AllocatedRate', 'VolumeDaily', 'VolumeWeekly', 'Volume150Day']].mean().round(2)
include1 = grp1['IncludeInSwAllocation'].first()
mon_min = grp1['Month'].min()
mon_min.name = 'FromMonth'
mon_max = grp1['Month'].max()
mon_max.name = 'ToMonth'
wa6 = pd.concat([mean1, mon_min, mon_max, include1], axis=1).reset_index()
# wa6['HydroGroup'] = 'Surface Water'
## Rename allocation blocks !!!!!! Need to be changed later!!!!
# av1.rename(columns={'GwAllocationBlock': 'AllocationBlock'}, inplace=True)
# wa6.rename(columns={'SwAllocationBlock': 'AllocationBlock'}, inplace=True)
# wa6.replace({'SwAllocationBlock': {'In Waitaki': 'A'}}, inplace=True)
## Combine volumes with rates !!! Needs to be changed later!!!
# wa7 = pd.merge(av1, wa6, on=['RecordNumber', 'TakeType'])
## Add in stream depletion
waps = db.waps.copy()
wa7 = | pd.merge(wa6, waps, on='Wap') | pandas.merge |
import random
import spotipy
import requests
import pandas as pd
from sklearn import metrics
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
class Recommend:
'''
Arguments -
client_id - unique client ID
client_secret - unique secret key
username - unique Spotify username
'''
def __init__(self, client_id = None, client_secret = None, username = None):
self.client_id = client_id
self.client_secret = client_secret
self.username = username
self.url = 'https://api.spotify.com/v1/recommendations?'
self.market = 'US'
self.sp = spotipy.Spotify(auth = self.generate_token())
def generate_token(self):
post_response = requests.post('https://accounts.spotify.com/api/token', {
'grant_type': 'client_credentials',
'client_id': self.client_id,
'client_secret': self.client_secret,
})
post_respose_json = post_response.json()
token = post_respose_json['access_token']
return token
def print_response(self, query):
token = self.generate_token()
response = requests.get(query, headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"})
json_response = response.json()
try:
print('Recommendations:\n')
for i, j in enumerate(json_response['tracks']):
print(f"{i+1}) \"{j['name']}\" : {j['artists'][0]['name']}")
print()
except:
print(json_response)
def byArtistSpotify(self, artist = None, number = None):
if artist is None:
print('Enter the artist as a string argument\n')
if number is None:
number = 10
artist_result = self.sp.search(artist)
try:
seed_artists = artist_result['tracks']['items'][0]['artists'][0]['uri'][15:]
seed_genres = []
seed_genres_entire = self.sp.artist(seed_artists)
if len(seed_genres_entire) < 3:
seed_genres = seed_genres_entire
else:
for i in seed_genres_entire['genres'][:3]:
seed_genres.append(i)
query = f'{self.url}limit={number}&market={self.market}&seed_genres={seed_genres}&seed_artists={seed_artists}'
print(self.print_response(query))
except:
print('Seed artists for given artist could not be generated\n')
def byTrackSpotify(self, track_URI = None, number = None):
if track_URI is None:
print('Enter the track_URI as a string argument\n')
if number is None:
number = 10
track_ID = track_URI.split(':')[2]
try:
meta = self.sp.track(track_ID)
artist = meta['album']['artists'][0]['name']
artist_result = self.sp.search(artist)
try:
seed_artists = artist_result['tracks']['items'][0]['artists'][0]['uri'][15:]
seed_genres = []
seed_genres_entire = self.sp.artist(seed_artists)
if len(seed_genres_entire) < 3:
seed_genres = seed_genres_entire
else:
for i in seed_genres_entire['genres'][:3]:
seed_genres.append(i)
query = f'{self.url}limit={number}&market={self.market}&seed_genres={seed_genres}&seed_artists={seed_artists}&seed_tracks={track_ID}'
print(self.print_response(query))
except:
print('Seed artist for given track could not be generated\n')
except:
print('Recheck track_URI argument\n')
def byPlaylistSpotify(self, playlist_URL = None, number = None):
if number is None:
number = 10
if playlist_URL is None:
print('Recheck playlist_URL argument\n')
playlist_id = playlist_URL[34:]
df = pd.DataFrame(columns = ['Name', 'Album', 'Artist', 'Year', 'Duration', 'Danceability', 'Energy'])
track_ids = []
for i in self.sp.playlist(playlist_id)['tracks']['items']:
track_ids.append(i['track']['id'])
for i in track_ids:
meta = self.sp.track(i)
features = self.sp.audio_features(i)
track_dict = {
'Name' : meta['name'],
'Album' : meta['album']['name'],
'Artist' : meta['album']['artists'][0]['name'],
'Year' : meta['album']['release_date'][0:4],
'Duration' : meta['duration_ms'] * 0.001,
'Danceability' : features[0]['danceability'],
'Energy' : features[0]['energy']
}
df = df.append(track_dict, ignore_index = True, sort = False)
common_artist = self.sp.search(df['Artist'].value_counts().head(1))
seed_artists = common_artist['tracks']['items'][0]['artists'][0]['uri'][15:]
seed_genres = []
seed_genres_entire = self.sp.artist(seed_artists)
if len(seed_genres_entire) < 3:
seed_genres = seed_genres_entire
else:
for i in seed_genres_entire['genres'][:3]:
seed_genres.append(i)
seed_tracks = random.choice(track_ids)
target_danceability = round(df['Danceability'].mean(), 1)
target_energy = round(df['Energy'].mean(), 1)
try:
query = f'{self.url}limit={number}&market={self.market}&seed_genres={seed_genres}'
query += f'&target_danceability={target_danceability}'
query += f'&target_energy={target_energy}'
query += f'&seed_artists={seed_artists}&seed_tracks={seed_tracks}'
print(self.print_response(query))
except:
print('Query could not be executed\n')
def byAudioFeaturesSpotify(self, target_acousticness = None, target_danceability = None, target_duration_ms = None, target_energy = None, target_instrumentalness = None, target_key = None, target_liveness = None, target_loudness = None, target_mode = None, target_popularity = None, target_speechiness = None, target_tempo = None, target_time_signature = None, target_valence = None, artist = None):
if artist is None:
print('Enter the artist as a string argument\n')
artist_result = self.sp.search(artist)
try:
seed_artists = artist_result['tracks']['items'][0]['artists'][0]['uri'][15:]
seed_genres = []
seed_genres_entire = self.sp.artist(seed_artists)
if len(seed_genres_entire) < 3:
seed_genres = seed_genres_entire
else:
for i in seed_genres_entire['genres'][:3]:
seed_genres.append(i)
query = f'{self.url}limit={10}&market={self.market}&seed_genres={seed_genres}'
if target_acousticness is not None:
query += f'&target_acousticness={target_acousticness}'
if target_danceability is not None:
query += f'&target_danceability={target_danceability}'
if target_duration_ms is not None:
query += f'target_duration_ms={target_duration_ms}'
if target_energy is not None:
query += f'target_energy={target_energy}'
if target_instrumentalness is not None:
query += f'target_instrumentalness={target_instrumentalness}'
if target_key is not None:
query += f'target_key={target_key}'
if target_liveness is not None:
query += f'target_liveness={target_liveness}'
if target_loudness is not None:
query += f'target_loudness={target_loudness}'
if target_mode is not None:
query += f'target_mode={target_mode}'
if target_popularity is not None:
query += f'target_popularity={target_popularity}'
if target_speechiness is not None:
query += f'target_speechiness={target_speechiness}'
if target_tempo is not None:
query += f'target_tempo={target_tempo}'
if target_time_signature is not None:
query += f'target_time_signature={target_time_signature}'
if target_valence is not None:
query += f'target_valence={target_valence}'
query += f'&seed_artists={seed_artists}'
print(self.print_response(query))
except:
print('Seed artists for given artist could not be generated\n')
def byTrack(self, track_URL = None, number = None, query = None, cluster = None):
if track_URL is None:
print('Recheck track_URL argument\n')
track_ID = track_URL[31:].split('?')[0]
if number is None:
number = 10
if query is None and cluster is None:
print('Specify method of recommendation as boolean argument\n')
if query is True and cluster is True:
print('Specify single method of recommendation as boolean argument\n')
if query == True:
meta = self.sp.track(track_ID)
features = self.sp.audio_features(track_ID)
target_year = meta['album']['release_date'][0:4]
target_popularity = meta['popularity']
target_danceability = features[0]['danceability']
target_energy = features[0]['energy']
tracks_df = pd.read_csv('tracks.csv')
try:
results = pd.DataFrame()
results = tracks_df.loc[(tracks_df['popularity'] >= max(0, target_popularity - 2))
& (tracks_df['popularity'] < target_popularity + 1)
& (tracks_df['energy'] >= max(0, target_energy - 0.1))
& (tracks_df['energy'] < target_energy + 0.1)
& (tracks_df['danceability'] >= max(0, target_danceability - 0.1))
& (tracks_df['danceability'] < target_danceability + 0.1)
& (tracks_df['release_date'].str.startswith(str(target_year))),['name', 'popularity', 'artists', 'release_date']
]
count = 1
results = results.sort_values(by = ['popularity'], ascending = False).head(number)
print('Recommendations :')
for result in range(len(results)):
print(count, ') ', results.iloc[result, 0], ' : ', results.iloc[result, 2], sep = '')
count += 1
except:
print('Recommendations could not be generated\n')
if cluster == True:
clusters_df = pd.read_csv('Clusters/Clusters.csv')
cluster = int(clusters_df.loc[clusters_df['id'] == track_ID].iloc[0, 5])
target_popularity = int(clusters_df.loc[clusters_df['id'] == track_ID].iloc[0, 4])
results = | pd.DataFrame() | pandas.DataFrame |
from numpy import nan
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from shapely.geometry import Point
from pymove import MoveDataFrame, conversions
from pymove.utils.constants import (
DATETIME,
DIST_TO_PREV,
GEOMETRY,
LATITUDE,
LONGITUDE,
SPEED_TO_PREV,
TIME_TO_PREV,
TRAJ_ID,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 1],
]
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def test_lat_meters():
expected = 98224.0229295811
lat_in_meters = conversions.lat_meters(39.984094)
assert(lat_in_meters == expected)
def test_list_to_str():
expected = 'banana,maca,laranja'
joined_list = conversions.list_to_str(['banana', 'maca', 'laranja'])
assert(joined_list == expected)
def test_list_to_csv_str():
expected = 'banana 1:maca 2:laranja'
joined_list = conversions.list_to_svm_line(['banana', 'maca', 'laranja'])
assert(joined_list == expected)
def test_lon_to_x_spherical():
expected = -4285978.172767829
assert(conversions.lon_to_x_spherical(-38.501597) == expected)
def test_lat_to_y_spherical():
expected = -423086.2213610324
assert(conversions.lat_to_y_spherical(-3.797864) == expected)
def test_x_to_lon_spherical():
expected = -38.50159697513617
assert(conversions.x_to_lon_spherical(-4285978.17) == expected)
def test_y_to_lat_spherical():
expected = -35.89350841198311
assert(conversions.y_to_lat_spherical(-4285978.17) == expected)
def test_geometry_points_to_lat_and_lon():
move_df = DataFrame(
data=[['1', Point(116.36184, 39.77529)],
['2', Point(116.36298, 39.77564)],
['3', Point(116.33767, 39.83148)]],
columns=[TRAJ_ID, GEOMETRY],
)
expected_geometry_drop = DataFrame(
data=[['1', 116.36184, 39.77529],
['2', 116.36298, 39.77564],
['3', 116.33767, 39.83148]],
columns=[TRAJ_ID, LONGITUDE, LATITUDE]
)
expected_with_geometry = DataFrame(
data=[['1', Point(116.36184, 39.77529), 116.36184, 39.77529],
['2', Point(116.36298, 39.77564), 116.36298, 39.77564],
['3', Point(116.33767, 39.83148), 116.33767, 39.83148]],
columns=[TRAJ_ID, GEOMETRY, LONGITUDE, LATITUDE]
)
new_move_df = conversions.geometry_points_to_lat_and_lon(
move_df, inplace=False, drop_geometry=True
)
assert_frame_equal(new_move_df, expected_geometry_drop)
new_move_df2 = conversions.geometry_points_to_lat_and_lon(
move_df, inplace=False, drop_geometry=False
)
assert_frame_equal(new_move_df2, expected_with_geometry)
def test_lat_and_lon_decimal_degrees_to_decimal():
move_df = DataFrame(
data=[['0', '28.0N', '94.8W'],
['1', '41.3N', '50.4W'],
['1', '40.8N', '47.5W']],
columns=['id', 'lat', 'lon']
)
expected = DataFrame(
data=[['0', 28.0, -94.8],
['1', 41.3, -50.4],
['1', 40.8, -47.5]],
columns=['id', 'lat', 'lon'],
)
new_move_df = conversions.lat_and_lon_decimal_degrees_to_decimal(move_df)
assert_frame_equal(new_move_df, expected)
def test_ms_to_kmh():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153,
1.0,
49.284551
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403788,
5.0,
5.330727
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.000000,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.ms_to_kmh(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.ms_to_kmh(move_df, new_label='converted_speed', inplace=True)
expected.rename(columns={SPEED_TO_PREV: 'converted_speed'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_kmh_to_ms():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403788,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.000000,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.kmh_to_ms(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.kmh_to_ms(move_df, new_label='converted_speed', inplace=True)
expected.rename(columns={SPEED_TO_PREV: 'converted_speed'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_meters_to_kilometers():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
0.013690153134343689,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.007403787866531697,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.meters_to_kilometers(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.meters_to_kilometers(
move_df, new_label='converted_distance', inplace=True
)
expected.rename(columns={DIST_TO_PREV: 'converted_distance'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_kilometers_to_meters():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.kilometers_to_meters(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.kilometers_to_meters(
move_df, new_label='converted_distance', inplace=True
)
expected.rename(columns={DIST_TO_PREV: 'converted_distance'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_seconds_to_minutes():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
0.016666666666666666,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
0.08333333333333333,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.seconds_to_minutes(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.seconds_to_minutes(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_minute_to_seconds():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.minute_to_seconds(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.minute_to_seconds(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_minute_to_hours():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
0.0002777777777777778,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
0.0013888888888888887,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.minute_to_hours(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.minute_to_hours(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_hours_to_minute():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
0.016666666666666666,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
0.08333333333333334,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.hours_to_minute(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.hours_to_minute(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_seconds_to_hours():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
0.0002777777777777778,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
0.001388888888888889,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.seconds_to_hours(move_df, inplace=False)
assert_frame_equal(new_move_df, expected)
conversions.seconds_to_hours(move_df, new_label='converted_time', inplace=True)
expected.rename(columns={TIME_TO_PREV: 'converted_time'}, inplace=True)
assert_frame_equal(move_df, expected)
def test_hours_to_seconds():
move_df = _default_move_df()
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
7.403787866531697,
5.0,
1.480758
],
[
1,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan],
],
columns=[TRAJ_ID,
LATITUDE,
LONGITUDE,
DATETIME,
DIST_TO_PREV,
TIME_TO_PREV,
SPEED_TO_PREV],
index=[0, 1, 2, 3],
)
new_move_df = conversions.hours_to_seconds(move_df, inplace=False)
| assert_frame_equal(new_move_df, expected) | pandas.testing.assert_frame_equal |
""" Fred View """
__docformat__ = "numpy"
import argparse
from typing import List
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
from fredapi import Fred
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
valid_date,
plot_autoscale,
)
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.config_terminal import API_FRED_KEY
from gamestonk_terminal import feature_flags as gtff
register_matplotlib_converters()
api_map = {
"gdp": "GDP",
"t10": "DGS10",
"t1": "DGS1",
"t5": "DGS5",
"t30": "DGS30",
"mort30": "MORTGAGE30US",
"fedrate": "FEDFUNDS",
"moodAAA": "AAA",
"usdcad": "DEXCAUS",
"unemp": "UNRATE",
}
title_map = {
"gdp": "Gross Domestic Product",
"t10": "10-Year Treasury Constant Maturity Rate",
"t1": "1-Year Treasury Constant Maturity Rate",
"t5": "5-Year Treasury Constant Maturity Rate",
"t30": "30-Year Treasury Constant Maturity Rate",
"mort30": "30-Year Mortgage Rate",
"fedrate": "Effective Federal Funds Rate",
"moodAAA": "Moody's Seasoned AAA Corporate Bond Yield",
"usdcad": "Canada / U.S. Foreign Exchange Rate",
"unemp": "Unemployment Rate",
}
def get_fred_data(other_args: List[str], choice: str):
"""Displace Fred data and graph for a selected chosen data series
Parameters
----------
other_args : List[str]
argparse other args
choice : str
Fred data series: "gdp","unemp", "t1", "t5", "t10", "t30", "mort30",
"fedrate", "moodAAA", "usdcad",
"""
fred = Fred(api_key=API_FRED_KEY)
parser = argparse.ArgumentParser(
add_help=False,
prog="Custom",
description="""
Custom Data
""",
)
parser.add_argument(
"-s",
dest="start_date",
type=valid_date,
default="2019-01-01",
required=False,
help="Starting date (YYYY-MM-DD) of data",
)
parser.add_argument(
"--noplot",
action="store_false",
default=True,
dest="noplot",
help="Suppress output plot",
)
parser.add_argument(
"--hidedata",
action="store_false",
default=True,
dest="hidedata",
help="Suppress data display plot",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
string_to_get = api_map[choice]
title = title_map[choice]
data = fred.get_series(string_to_get, ns_parser.start_date)
data = pd.DataFrame(data, columns=[f"{string_to_get}"])
data.index.name = "Date"
if ns_parser.hidedata:
print(data)
print("")
if ns_parser.noplot:
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
plt.plot(data.index, data.iloc[:, 0], "-ok")
plt.xlabel("Time")
plt.xlim(data.index[0], data.index[-1])
plt.ylabel(f"{string_to_get}")
plt.grid(b=True, which="major", color="#666666", linestyle="-")
plt.minorticks_on()
plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
plt.title(title)
plt.show()
print("")
if gtff.USE_ION:
plt.ion()
except SystemExit:
print("")
except Exception as e:
print(e)
print("")
return
def custom_data(other_args: List[str]):
"""Displace Fred data for a custom series request
Parameters
----------
other_args : List[str]
argparse other args
"""
fred = Fred(api_key=API_FRED_KEY)
parser = argparse.ArgumentParser(
add_help=False,
prog="Custom",
description="""
Custom Data
""",
)
parser.add_argument(
"-i", "--id", dest="series_id", required=True, type=str, help="FRED Series ID"
)
parser.add_argument(
"-s",
dest="start_date",
type=valid_date,
default="2019-01-01",
required=False,
help="Starting date (YYYY-MM-DD) of data",
)
parser.add_argument(
"--noplot",
action="store_false",
default=True,
dest="noplot",
help="Suppress output plot",
)
parser.add_argument(
"--hidedata",
action="store_false",
default=True,
dest="hidedata",
help="Suppress data display plot",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
data = fred.get_series(ns_parser.series_id, ns_parser.start_date)
data = | pd.DataFrame(data, columns=[f"{ns_parser.series_id}"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module contains classes that model the behavior of equities (stocks)
and stock market indices. Many methods that give usefull insights about the stocks
and indices behavior are implemented, ranging from fundamental and technical analysis
to time series analysis. The data used to conduct the analysis is historical date-to-date
historical data updated manualy by the user, using the classes in update_data.py module.
Financial statements (income statement and balance sheet) are also provided for the stocks.
The stock objects can be used to construct portfolios (see the portfolio.py module) and as the
underlying asset for derivatives such as options and futures contracts (see the derivatives.py module).
"""
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from scipy import stats
from bs4 import BeautifulSoup
import urllib3
#from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import style
from time_series_models import LeastSquares
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
style.use('ggplot')
register_matplotlib_converters()
class Index:
"""
"""
def __init__(self, quote):
"""
This is the constructor of the Index base class
"""
#self.__http = urllib3.PoolManager()
urllib3.disable_warnings()
self.quote = quote
def get_current_price(self):
"""
This method returns the current price of an asset based on the price
indicated by Yahoo Finance. It makes an http request.
"""
http = urllib3.PoolManager()
urllib3.disable_warnings()
S = http.request('GET', 'https://finance.yahoo.com/quote/' + self.quote + '?p=^' + self.quote)
soup = BeautifulSoup(S.data, 'lxml')
J = soup.find('span', class_ = 'Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)')
return float(J.text.replace(',', ''))
def get_prices(self, return_dates = False):
"""
This method opens the historical price data file of the asset
and returns a time series of the adjusting closing prices.
Parameters:
----------
return_dates: bool, defaults to False.
Returns:
-------
close_prices: ndarray, a numpy array containing the closing prices.
dates: ndarray, a numpy array containing the dates.
"""
#df = pd.read_csv('data/historical_data/' + self.quote + '.dat')
df = pd.read_csv('data/historical_data/{}.dat'.format(self.quote))
close_prices = np.array(df['Adj Close'])
dates = np.array(df['Date'])
if return_dates:
return close_prices, dates
else:
return close_prices
def get_volume(self):
"""
This method returns a time series of the daily volume.
Returns:
-------
volume: ndarray, a numpy array containing the daily volume.
"""
volume = np.array(pd.read_csv('data/historical_data/{}.dat'.format(self.quote))['Volume'])
return volume
#return np.array(pd.read_csv('data/historical_data/' + self.quote + '.dat')['Volume'])
def calc_log_returns(self):
"""
This method calculates the log returns of the asset based on daily historical
closing prices. Many other methods and calculations are based on this time series.
Returns:
-------
log_returns: ndarray, a numpy array containing the log returns.
"""
closeDF = pd.read_csv('data/historical_data/' + self.quote + '.dat')['Adj Close']
log_returns = np.array(np.log(closeDF / closeDF.shift(1)).dropna())
return log_returns
def calc_exp_return(self, annualized = True):
"""
This method calculates the daily expected return of the asset based
on the historical returns. The annualized return is also possible to calculate.
Paramaters:
----------
annualized: bool, if set to True, the method returns the annualized return.
It defaults to True.
Returns:
-------
exp_return: float, the expected return (daily or annualized).
"""
log_returns = self.calc_log_returns()
if annualized:
exp_return = log_returns.mean() * 252
else:
exp_return = log_returns.mean()
return exp_return
def calc_std(self, annualized = True):
"""
This method calculates the daily standard deviation of an asset based on the
historical returns. The annualized return is also possible to calculate.
Paramaters:
----------
annualized: bool, if set to True, the method returns the annualized standard deviation.
It defaults to True.
Returns:
-------
standard_dev: float, the standard deviation (daily or annualized).
"""
log_returns = self.calc_log_returns()
if annualized:
standard_dev = log_returns.std() * np.sqrt(252)
else:
standard_dev = log_returns.std()
return standard_dev
def calc_skewness(self):
"""
This method calculates the skewness of the asset based on the historical returns.
Returns:
-------
skewness: float, the skewness.
"""
skewness = stats.skew(self.calc_log_returns())
return skewness
def calc_kurtosis(self):
"""
This method calculates the kurtosis of the asset based on the historical returns.
Returns:
-------
kurtosis: float, the kurtosis.
"""
kurtosis = stats.kurtosis(self.calc_log_returns())
return kurtosis
def calc_corr_coef(self, asset):
"""
This method calculates the correlation coefficient between two assets.
Both assets must have a calcLogReturns method, thus the asset object
passed as parameter mey either be a Stock object or an Index object.
Parameters:
----------
asset: Stock or Index object, the asset of which the log returns are used to calculate
the correlation coefficient between the two assets.
Returns:
-------
corr_coef: float, the correlation coefficient between the two assets.
"""
corr_coef = np.corrcoef(self.calc_log_returns(), asset.calc_log_returns())[0][1]
return corr_coef
def calc_ACF(self, lags):
"""
This method calculates the autocorreation of the asset up to a predefined lag.
Parameters:
----------
lags: int, the max lag of the autocorrelations to be calculated.
Returns:
-------
acf: ndarray, a numpy array of the autocorrelations.
"""
log_returns = self.calc_log_returns()
acf = np.array([np.corrcoef(log_returns[lag:], log_returns[:-lag])[0][1] for lag in lags])
return acf
def calc_PACF(self, lags):
"""
This method calculates the partial autocorreation of the asset up to a predefined lag.
Parameters:
----------
lags: int, the max lag of the partial autocorrelations to be calculated.
Returns:
-------
pacf: ndarray, a numpy array of the partial autocorrelations.
"""
log_returns = self.calc_log_returns()
regressor = LeastSquares()
pacf = []
for lag in lags:
X = np.array([log_returns[i:-(lag - i)] for i in range(lag)]).T
y = log_returns[lag:]
regressor.fit(X, y)
pacf.append(regressor.coefs[1])
pacf = np.array(pacf)
return pacf
def test_normality(self):
"""
This method returns the t-statistic and the p-value of the normality test of the
asset's returns.
Returns:
-------
results: ndarray, a numpy array of the normality test results.
"""
results = stats.normaltest(self.calc_log_returns())
return results
def test_autocorrelation(self, lags):
"""
"""
ACF = self.calc_ACF(lags)
n = len(self.calc_log_returns())
Q = []
p_values = []
for lag in lags:
autocorrs = ACF[:lag]
k = np.arange(1, len(lags[:lag]) + 1)
q = n * (n + 2) * ((autocorrs ** 2) / (n - k)).sum()
p = 1 - stats.chi2.cdf(q, lag)
Q.append(q)
p_values.append(p)
return (np.array(Q), np.array(p_values))
def test_partial_autocorrelation(self, lags):
PACF = self.calc_PACF(lags)
n = len(self.calc_log_returns())
Q = []
p_values = []
for lag in lags:
partial_autocorrs = PACF[:lag]
k = np.arange(1, len(lags[:lag]) + 1)
q = n * (n + 2) * ((partial_autocorrs ** 2) / (n - k)).sum()
p = 1 - stats.chi2.cdf(q, lag)
Q.append(q)
p_values.append(p)
return (np.array(Q), np.array(p_values))
def test_stationarity(self, number_of_subsamples):
log_returns = self.calc_log_returns()
n = len(log_returns)
A = np.arange(0, n, n / number_of_subsamples)
A = np.array([int(i) for i in A])
subsamples = [log_returns[A[i]:A[i + 1]] for i in range(len(A) - 1)]
subsamples.append(log_returns[A[-1]:])
results = [{'mean': round(subsample.mean(), 5), 'std': round(subsample.std(), 5)} for subsample in subsamples]
for i in results:
print(i)
def calc_sharpe_ratio(self, rf):
return (self.calc_exp_return() - rf) / self.calc_std()
def descriptive_stats(self):
closeDF = pd.read_csv('data/historical_data/' + self.quote + '.dat')['Adj Close']
log_returns = np.log(closeDF / closeDF.shift(1)).dropna()
desc = log_returns.describe()
skewness = self.calc_skewness()
kurtosis = self.calc_kurtosis()
print('-----Descriptive Statistics for ' + self.quote + '-----')
print('count\t', desc['count'])
print('mean\t', round(desc['mean'], 6))
print('std\t', round(desc['std'], 6))
print('skew\t', round(skewness, 6))
print('kurt\t', round(kurtosis, 6))
print('min\t', round(desc['min'], 6))
print('max\t', round(desc['max'], 6))
print('25%\t', round(desc['25%'], 6))
print('50%\t', round(desc['50%'], 6))
print('75%\t', round(desc['75%'], 6))
def plot_price(self):
closeDF, dates = self.get_prices(return_dates = True)
rolling_mean = pd.DataFrame(closeDF).rolling(window = 60, min_periods = 0).mean()
dates = | pd.to_datetime(dates) | pandas.to_datetime |
# coding: utf8
import torch
import pandas as pd
import numpy as np
from os import path
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import abc
from clinicadl.tools.inputs.filename_types import FILENAME_TYPE
import os
import nibabel as nib
import torch.nn.functional as F
from scipy import ndimage
import socket
from utils import get_dynamic_image
from .batchgenerators.transforms.color_transforms import ContrastAugmentationTransform, BrightnessTransform, \
GammaTransform, BrightnessGradientAdditiveTransform, LocalSmoothingTransform
from .batchgenerators.transforms.crop_and_pad_transforms import CenterCropTransform, RandomCropTransform, \
RandomShiftTransform
from .batchgenerators.transforms.noise_transforms import RicianNoiseTransform, GaussianNoiseTransform, \
GaussianBlurTransform
from .batchgenerators.transforms.spatial_transforms import Rot90Transform, MirrorTransform, SpatialTransform
from .batchgenerators.transforms.abstract_transforms import Compose
from .batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter
from .data_tool import hilbert_2dto3d_cut, hilbert_3dto2d_cut, hilbert_2dto3d, hilbert_3dto2d, linear_2dto3d_cut, \
linear_3dto2d_cut, linear_2dto3d, linear_3dto2d
#################################
# Datasets loaders
#################################
class MRIDataset(Dataset):
"""Abstract class for all derived MRIDatasets."""
def __init__(self, caps_directory, data_file,
preprocessing, transformations=None):
self.caps_directory = caps_directory
self.transformations = transformations
self.diagnosis_code = {
'CN': 0,
'AD': 1,
'sMCI': 0,
'pMCI': 1,
'MCI': 2,
'unlabeled': -1}
self.preprocessing = preprocessing
self.num_fake_mri = 0
if not hasattr(self, 'elem_index'):
raise ValueError(
"Child class of MRIDataset must set elem_index attribute.")
if not hasattr(self, 'mode'):
raise ValueError(
"Child class of MRIDataset must set mode attribute.")
# Check the format of the tsv file here
if isinstance(data_file, str):
self.df = | pd.read_csv(data_file, sep='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 17:24:49 2020
@author: larabreitkreutz
"""
import pathlib
import os
import pandas as pd
import inspect
src_file_path = inspect.getfile(lambda: None)
#run add_datetime.py and load data here
# Creates an empty list
filelist = []
# Iterates over the files in each folder and appends the file's name to "filelist"
for path in pathlib.Path(os.path.dirname(os.path.abspath('___file___'))).iterdir():
print(path)
file_str = str(path)
if 'datetime.csv' in file_str:
folder = os.path.basename(file_str)
filelist.append(folder)
# Creates a main empty dataframe
main_df = pd.DataFrame(columns=['Long', 'Lat', 'Time', 'ITP'])
# Creates an empty list and reads each file as a CSV, appending it to the main dataframe
info = []
for filename in filelist:
info = | pd.read_csv(filename) | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_stats_utils.ipynb (unless otherwise specified).
__all__ = ['cPreProcessing', 'cStationary', 'cErrorMetrics']
# Cell
import numpy as np
import pandas as pd
from scipy.stats import boxcox, pearsonr
from scipy.special import inv_boxcox
from pandas.tseries.frequencies import to_offset
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tools.eval_measures import aic, bic
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
# Cell
class cPreProcessing():
"""
Parent class.
Methods for dealing with irregularly spaced or missing data.
"""
def __init__(self):
pass
def fget_regular_times(self, df, timestep):
"""
Generate dataframe of regularly spaced times (to impute to)
(From fbprophet/forecaster/make_future_dataframe)
Parameters
----------
df = [pandas dataframe]
timestep = [datetime timedelta object]
Returns
-------
regtimes = [pandas DatetimeIndex] of datetimes regularly spaced at timestep
"""
# normalize start date to midnight
start_date = df.ds.min().normalize()
# round up end date by one extra timestep
end_date = (df.ds.max() + timestep).normalize()
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html
regtimes = pd.date_range(start=start_date, end=end_date, freq=to_offset(timestep))
return regtimes
def finterleaf(self, df, impute_times):
"""
Interleaf dataframe with new prediction times
Set values at prediction dates as NaN so can use imputer
Parameters
----------
df = [pandas dataframe]
impute_times = [pandas DatetimeIndex] (format of regtimes)
Returns
-------
dft = pandas dataframe (format for use in fimpute)
"""
# if impute_times taken from fmake_regular_times()
if type(impute_times) == pd.core.indexes.datetimes.DatetimeIndex:
impute_times = pd.DataFrame(impute_times)
impute_times.columns = ["ds"]
# set date index
df.set_index('ds', inplace=True)
impute_times.set_index('ds', inplace=True)
# combine (interleaf)
dft = pd.concat([df, impute_times], sort=True)
dft.sort_values(by=["ds"], inplace=True)
# remove duplicate entries
dft = dft[dft.index.duplicated() == False]
return dft
def fimpute(self, df, method="time"):
"""
Imputation of data to new timestamps with NaN value.
Parameters
----------
df = dataframe containing original data and NaNs at timestamps for imputation
timestamps are the df index
Returns
-------
dfi = imputed dataframe
"""
# interpolates halfway, doesn´t account for weighting towards closer time
if method == "interp":
dfi = df.interpolate()
# missing values given mean value over whole time series
if method == "mean":
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(df)
dfi = imp.transform(df)
# linear interpolation weighted by timestamp proximity
if method == "time":
dfi = df.interpolate(method='time')
# smoothing
if method == "akima":
dfi = df.interpolate(method='akima')
return dfi
def fmake_regular_freq(self, df, timestep, method="time"):
"""
Interpolate data so regular update frequency throughout dataset.
(Deal with missing values)
Some python functions (e.g. seasonal_decompose, AutoArima) require a data "freq" argument
to determine seasonality. (Can be inferred from df.index.freq, df.index.inferred_freq)
Such functions require a constant data frequency.
Parameters
----------
df = irregularly space dataframe (with datestamp name "ds")
timestep = desired update frequency of data (timedelta object)
method = imputation method
Returns
-------
dff = imputed regularly spaced [pandas dataframe]
"""
# 0. preprocess: if dataframe alreay has time as index: reset and add as column
if df.index.name == "ds":
# df_lim["ds"] = df_lim.index
df.reset_index(level=0, inplace=True)
# 1. include in dataset times where you want to impute (and set to NaN values)
impute_times = self.fget_regular_times(df, timestep)
dft = self.finterleaf(df, impute_times)
# 2. impute with chosen method
dfi = self.fimpute(dft, method=method)
# 3. remove original data not at correct timestep
dff = dfi[dfi.index.isin(impute_times) == True]
if dff.index.freq == None:
dff.index.freq = | to_offset(timestep) | pandas.tseries.frequencies.to_offset |
import pandas as pd
import numpy as np
# Analytics
# import timeit
# Load data locally
df_orig = pd.read_excel(r'result_data_x.xlsx', names=['index', 'type', 'date', 'code', \
'filter_one', 'filter_two', 'filter_three', 'filter_four', 'recommendation', 'easiness', 'overall', 'question_one', \
'rec_sc', 'eas_sc', 'sentiment', 'lang', 'question_one_filtered_lemmas'])
# Load data on server
# df_orig = pd.read_excel(r'bokeh-dash-x/result_data_x.xlsx', names=['index', 'type', 'date', 'code', \
# 'filter_one', 'filter_two', 'filter_three', 'filter_four', 'recommendation', 'easiness', 'overall', 'question_one', \
# 'rec_sc', 'eas_sc', 'sentiment', 'lang', 'question_one_filtered_lemmas'])
# Transform filtered lemmas string into list of strings
df_orig['question_one_filtered_lemmas'] = df_orig.loc[~df_orig['question_one_filtered_lemmas'].isna()]['question_one_filtered_lemmas'].apply(lambda x: x[2:-2].split("', '"))
# Create dictionary of all plots, filter lock, filters
general_dict = {}
# Set initial filters to all
general_dict['full_name_filter_list'] = sorted(df_orig.filter_one.unique().tolist())
general_dict['full_service_filter_list'] = sorted(df_orig.filter_two.unique().tolist())
general_dict['full_factory_filter_list'] = sorted(df_orig.filter_three.unique().tolist())
general_dict['full_segment_filter_list'] = sorted(df_orig.filter_four.unique().tolist())
general_dict['type_list'] = sorted(df_orig.type.unique())
general_dict['individual_filters'] = {}
general_dict['individual_df'] = {}
general_dict['individual_filtered_df'] = {}
general_dict['individual_filtered_df_bary'] = {}
general_dict['bary_points'] = {}
general_dict['bary_p'] = {}
general_dict['individual_filtered_df_bary_p'] = {}
general_dict['individual_service_filter_list'] = {}
general_dict['typed_name_filter_list'] = {}
general_dict['typed_service_filter_list'] = {}
general_dict['typed_factory_filter_list'] = {}
general_dict['typed_segment_filter_list'] = {}
for type_filter in general_dict['type_list']:
# general_dict['individual_filters'][type_filter] = np.concatenate((df_orig.filter_one.unique(),df_orig.filter_two.unique(),df_orig.filter_three.unique(),df_orig.filter_four.unique())).tolist() + [type_filter]
general_dict['individual_filters'][type_filter] = np.concatenate((df_orig.filter_one.unique(),df_orig.filter_two.unique(),df_orig.filter_three.unique(),df_orig.filter_four.unique())).tolist() + [type_filter]
general_dict['individual_df'][type_filter] = df_orig.loc[(df_orig['type'].isin([type_filter]))]
general_dict['individual_filtered_df'][type_filter] = df_orig.loc[(df_orig['type'].isin([type_filter]))]
general_dict['individual_service_filter_list'][type_filter] = sorted(general_dict['individual_df'][type_filter].filter_two.unique().tolist())
general_dict['typed_name_filter_list'][type_filter] = general_dict['full_name_filter_list']
general_dict['typed_service_filter_list'][type_filter] = general_dict['full_service_filter_list']
general_dict['typed_factory_filter_list'][type_filter] = general_dict['full_factory_filter_list']
general_dict['typed_segment_filter_list'][type_filter] = general_dict['full_segment_filter_list']
general_dict['freq_df'] = {}
general_dict['freq_words_slice'] = {}
general_dict['freq_source'] = {}
general_dict['d_pv'], general_dict['d_uv'], general_dict['d_nv'] = (dict(),dict(),dict())
general_dict['wordcloud'], general_dict['words_plot'] = (dict(),dict())
general_dict['dict_freq_pv'], general_dict['d_freq_pv'] = (dict(),dict())
###################################################################################
################################## Visual Imports #################################
from bokeh.models import ColumnDataSource, Callback, Toggle, BoxAnnotation, LabelSet, Label, HoverTool, DataTable, TableColumn, Image, TapTool, Tap, HBar, Plot, Div, CDSView, GroupFilter, MultiChoice, MultiSelect, CheckboxButtonGroup, BooleanFilter, IndexFilter, RadioButtonGroup, Button, CustomJS
from bokeh.plotting import figure, curdoc
from bokeh.layouts import column, row, Spacer, gridplot
###################################################################################
###################################################################################
###################################################################################
################################## Common Methods #################################
def update_filters():
# common_filters = [general_dict['filter_3'].labels[i] for i in general_dict['filter_3'].active] + general_dict['filter_1'].value + general_dict['filter_4'].value
common_filters = general_dict['filter_4'].value
for type_filter in general_dict['type_list']:
# uncommon_filters = general_dict['individual_filters_vis'][type_filter].value
# general_dict['individual_filters'][type_filter] = common_filters + uncommon_filters# + [type_filter]
general_dict['individual_filters'][type_filter] = common_filters
# def update_filter(type_filter):
# common_filters = [general_dict['filter_3'].labels[i] for i in general_dict['filter_3'].active] + general_dict['filter_1'].value + general_dict['filter_4'].value
# uncommon_filters = general_dict['individual_filters_vis'][type_filter].value
# general_dict['individual_filters'][type_filter] = common_filters + uncommon_filters# + [type_filter]
def filter_df(type_filter):
filter_list = general_dict['individual_filters'][type_filter]
# general_dict['individual_filtered_df'][type_filter] = general_dict['individual_df'][type_filter].loc[(general_dict['individual_df'][type_filter]['filter_one'].isin(filter_list) & general_dict['individual_df'][type_filter]['filter_two'].isin(filter_list) & general_dict['individual_df'][type_filter]['filter_three'].isin(filter_list) & general_dict['individual_df'][type_filter]['filter_four'].isin(filter_list))].copy()
general_dict['individual_filtered_df'][type_filter] = general_dict['individual_df'][type_filter].loc[(general_dict['individual_df'][type_filter]['filter_four'].isin(filter_list))].copy()
def filter_dfs():
for type_filter in general_dict['type_list']:
filter_df(type_filter)
###################################################################################
############################## Visual 1 - Points Plot #############################
#---------------------------------------------------------------------------------#
#------------------------------- Static Background -------------------------------#
def create_points_plot_layout(points_plot_name):
# Create points plot figure
general_dict[points_plot_name] = figure(x_range=(0, 10), y_range=(0, 10), plot_width=300, plot_height=300, sizing_mode='scale_width', match_aspect=True, tools=['tap'], output_backend="webgl", title=points_plot_name.split('_')[0], title_location="above")#, lod_factor=1)
general_dict[points_plot_name].title.align = "center"
general_dict[points_plot_name].title.text_font_size = "16px"
general_dict[points_plot_name].title.text_font_style='bold'
# Hide real grid
general_dict[points_plot_name].xgrid.grid_line_color = None
general_dict[points_plot_name].ygrid.grid_line_color = None
# Define grid lines
general_dict[points_plot_name].xaxis.ticker = list(range(11))
general_dict[points_plot_name].yaxis.ticker = list(range(11))
# Create color zones
ba5 = BoxAnnotation(bottom=9, top=10, left=0, right=10, fill_alpha=0.3, fill_color='#538d22', level='underlay', line_color=None) # green
ba4 = BoxAnnotation(bottom=0, top=9, left=9, right=10, fill_alpha=0.3, fill_color='#538d22', level='underlay', line_color=None) # green
ba3 = BoxAnnotation(bottom=7, top=9, left=0, right=9, fill_alpha=1, fill_color='#fbe5d6', level='underlay', line_color=None) # orange
ba2 = BoxAnnotation(bottom=0, top=7, left=7, right=9, fill_alpha=1, fill_color='#fbe5d6', level='underlay', line_color=None) # orange
ba1 = BoxAnnotation(bottom=0, top=7, left=0, right=7, fill_alpha=0.3, fill_color='#bf0603', level='underlay',line_color=None) # red
general_dict[points_plot_name].add_layout(ba1)
general_dict[points_plot_name].add_layout(ba2)
general_dict[points_plot_name].add_layout(ba3)
general_dict[points_plot_name].add_layout(ba4)
general_dict[points_plot_name].add_layout(ba5)
#----------------------------- ^ Static Background ^ -----------------------------#
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
#-------------------------------- Utility Methods --------------------------------#
def calculate_points(type_filter):
df_tempy = general_dict['individual_filtered_df'][type_filter].copy()
if len(df_tempy) == 0:
barycenter = np.array([-10.0, -10.0])
bary_data = pd.DataFrame(columns=['recommendation', 'easiness'])
general_dict['individual_filtered_df_bary'][type_filter] = bary_data
general_dict['individual_filtered_df_bary_p'][type_filter] = barycenter
general_dict['individual_filtered_df'][type_filter] = df_tempy
return pd.DataFrame(columns=[])
arr_slice = df_tempy[['recommendation', 'easiness']].values
lidx = np.ravel_multi_index(arr_slice.T,arr_slice.max(0)+1)
unq,unqtags,counts = np.unique(lidx,return_inverse=True,return_counts=True)
df_tempy["visual_sum"] = counts[unqtags]
# Create visual barycenter with edges
barycenter = df_tempy[['recommendation', 'easiness']].astype({'recommendation':'float32', 'easiness':'float32'}).mean().to_numpy()
# Create barycenter dataframe
bary_numpy = df_tempy[['recommendation', 'easiness']].astype({'recommendation':'float32', 'easiness':'float32'}).to_numpy()
row_bary = [barycenter[0], barycenter[1]]
row_empty = np.empty((1,bary_numpy.shape[1]))
row_empty.fill(np.nan)
bary_numpy = np.insert(bary_numpy, range(1, len(bary_numpy)+1, 1), row_bary, axis=0)
bary_numpy = np.insert(bary_numpy, range(2, len(bary_numpy), 2), row_empty, axis=0)
bary_data = pd.DataFrame(bary_numpy, columns=['recommendation', 'easiness'])
general_dict['individual_filtered_df'][type_filter] = df_tempy
general_dict['individual_filtered_df_bary'][type_filter] = bary_data
general_dict['individual_filtered_df_bary_p'][type_filter] = barycenter
#------------------------------ ^ Utility Methods ^ ------------------------------#
#---------------------------------------------------------------------------------#
# Create data table structure
data_columns = [
# TableColumn(field="filter_one", title="Name"),
# TableColumn(field="filter_two", title="Service"),
# TableColumn(field="filter_three", title="Factory"),
TableColumn(field="filter_four", title="Segment"),
TableColumn(field="recommendation", title="Recommendation"),
TableColumn(field="easiness", title="Easiness"),
]
# data_source = ColumnDataSource(pd.DataFrame(columns=['filter_one', 'filter_two', 'filter_three', 'filter_four', 'recommendation', 'easiness']))
data_source = ColumnDataSource( | pd.DataFrame(columns=['filter_four', 'recommendation', 'easiness']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from ..auth.auth import read_credential_file, load_db_info
import os
import json
"""
auth = {}
auth_dict = {}
env_dict = {}
if os.path.exists(os.path.expanduser('~/.fastteradata')):
auth = json.load(open(os.path.expanduser('~/.fastteradata')))
auth_dict = auth["auth_dict"]
env_dict = auth["env_dict"]
"""
auth, auth_dict, env_dict = read_credential_file()
def combine_files_base(combine_type=None):
import os
concat_str = ""
file_delim = ""
remove_cmd = ""
#Making special exceptions for windows computers
if os.name == "nt":
file_delim = "\\"
remove_cmd = "del "
else:
file_delim = "/"
remove_cmd = "rm "
if combine_type == "vertical":
if os.name == "nt":
concat_str += "type "
else:
concat_str += "cat "
elif combine_type == "horizontal":
#because windoes does not have a nice way to do this, we are going to read in and combine in python instead
"""
if os.name == "nt":
concat_str += " "
else:
concat_str += "paste -d '|' "
"""
concat_str = False
else:
raise Exception("Internal Bug: Invalid combine_type")
return(concat_str, file_delim, remove_cmd)
def combine_partitioned_file(script_files, combine_type=""):
concat_str, file_delim, remove_cmd = combine_files_base(combine_type=combine_type)
#First we need to add data into the file path to locate our correct files
data_files = []
for file in script_files:
l = file.split("/")
l.insert(-1,"data")
l[-1] = l[-1][7:]
data_files.append(file_delim.join(l))
#Now Build up concat string
#Remove the partition value from the filepath
if concat_str:
for f in data_files:
concat_str += f"{f} "
concat_str += "> "
form = data_files[0].split("/")
last_form = form[-1].split("_")
del last_form[-2]
fixed = "_".join(last_form)
form[-1] = fixed
#join and execute command
if concat_str:
concat_str += file_delim.join(form)
c = concat_str.split(" ")
#print("concat stringg.....")
concat_str = concat_str.replace("\\\\","\\")
concat_str = concat_str.replace("//","/")
#print(concat_str)
#print(data_files)
#clean data_files
data_files = [x.replace("\\\\","\\") for x in data_files]
data_files = [x.replace("//","/") for x in data_files]
return(concat_str, data_files, remove_cmd)
def concat_files(concat_str):
from subprocess import call
call(concat_str, shell=True)
return
def concat_files_horizontal(data_file, data_files, col_list, primary_keys, dtype_dict):
_df = pd.DataFrame()
#Combine data files in memory
print("Concatenating Horizontally")
for clist, d_file in zip(col_list,data_files):
#print(d_file)
#print(clist)
#print(primary_keys)
df = pd.DataFrame()
try:
df = | pd.read_csv(d_file, names=clist, sep="|", dtype=dtype_dict, na_values=["?","","~","!","null"]) | pandas.read_csv |
# fix_processing_gaps.py
# short script to fix missing tiles for each variable
# to be run post completion of processing with process_tiles.py
# <NAME> <EMAIL> 11 May 2021
# NB: uses scandir for speed, on OPALS Shell an install might be required with
# python -m pip install scandir --user
# Dependencies
import os
import glob
import pandas
import re
import scandir
import copy
import sys
import subprocess
import shutil
from dklidar import settings
## 1) Determine output folder structure
# Status
print('#' * 80 + 'Check EcoDes-DK processing outputs for completness' + '\n\n')
print('Preparing environment...'),
# Initiate list
folders = []
# Check for subfolders present (max depth = 1)
for folder in scandir.scandir(settings.output_folder):
if folder.is_dir():
sub_folders = [sub_folder.path for sub_folder in scandir.scandir(folder.path) if sub_folder.is_dir()]
if len(sub_folders) > 0:
for sub_folder in sub_folders:
folders.append(sub_folder)
else:
folders.append(folder.path)
# Remove tile_footprints folder if present
folders = [folder for folder in folders if not bool(re.match('.*tile_footprints.*', folder))]
folders = [folder for folder in folders if not bool(re.match('.*point_source_proportion.*', folder))]
folders = [folder for folder in folders if not bool(re.match('.*point_source_counts.*', folder))]
## Get reference set of tiles based on dtm_10m
dtm_10m = [folder for folder in folders if bool(re.match('.*dtm_10m.*', folder))][0]
dtm_10m_tiles = [re.sub('.*_(\d*_\d*).tif', '\g<1>', file_name) for file_name in glob.glob(dtm_10m + '/*.tif')]
dtm_10m_tiles = set(dtm_10m_tiles)
print(' done.')
## 2) Check completeness of tiles for all variables
# Status
print('Scanning tiles for...')
# Initiate empty dictionary
missing_tiles = {}
# Scan folders for missing tiles
for folder in folders:
variable_name = re.sub('.*[\\\\\/]', '', folder)
print('\t' + variable_name)
tiles = [re.sub('.*_(\d*_\d*).tif', '\g<1>', file_name) for file_name in glob.glob(folder + '/*.tif')]
tiles = set(tiles)
tiles_missing = dtm_10m_tiles - tiles
missing_tiles.update({variable_name: tiles_missing})
# Status
print('Scan complete.\n')
print('Exporting missing tile_ids to csv...'),
# Save missing tiles for each variable to csv
missing_tiles_df_list = []
for variable in missing_tiles.keys():
missing_diles_df_local = pandas.DataFrame(missing_tiles[variable], columns = ['tile_id'])
missing_diles_df_local['variable'] = variable
missing_tiles_df_list.append(missing_diles_df_local)
# Concatenate list of dfs into one df and export to csv
missing_tiles_df = | pandas.concat(missing_tiles_df_list) | pandas.concat |
import numpy as np
import pandas as pd
import multiprocessing
import time
from sklearn.metrics import pairwise_distances
import scanpy as sc
from sklearn.metrics.pairwise import pairwise_kernels
import json
from random import sample
import random
from . import iONMF
import sys
import re
import umap
from datetime import datetime
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential, load_model
from keras.utils import np_utils
import numba
from sklearn.utils import resample
from scipy.sparse import csr_matrix
from .utils import *
import os
import pkg_resources
def gluer(ref_obj,
query_obj,
joint_rank=20,
joint_max_iter=200,
joint_random_seed=21,
mnn_ref=30,
mnn_query=30,
filter_n1=50,
filter_n2=50,
N=3,
n_jobs=1,
n_features=15000,
is_impute=True,
filter_n_features=[15000, 15000],
pairs=None,
deep_random_seed=44,
deepmodel_epoch=500,
batch_categories=['1', '2'],
model=None,
validation_split=.1,
verbose=0):
"""Short summary.
Parameters
----------
ref_obj : h5ad file
The AnnData data object of the reference data.
query_obj : type
Description of parameter `query_obj`.
joint_rank : type
Description of parameter `joint_rank`.
joint_max_iter : type
Description of parameter `joint_max_iter`.
joint_random_seed : type
Description of parameter `joint_random_seed`.
mnn_ref : type
Description of parameter `mnn_ref`.
mnn_query : type
Description of parameter `mnn_query`.
filter_n1 : type
Description of parameter `filter_n1`.
filter_n2 : type
Description of parameter `filter_n2`.
N : type
Description of parameter `N`.
n_jobs : type
Description of parameter `n_jobs`.
n_features : type
Description of parameter `n_features`.
is_impute : type
Description of parameter `is_impute`.
filter_n_features : type
Description of parameter `filter_n_features`.
pairs : type
Description of parameter `pairs`.
deep_random_seed : type
Description of parameter `deep_random_seed`.
deepmodel_epoch : type
Description of parameter `deepmodel_epoch`.
batch_categories : type
Description of parameter `batch_categories`.
model : type
Description of parameter `model`.
validation_split : type
Description of parameter `validation_split`.
verbose : type
Description of parameter `verbose`.
query_obj.var.sort_values(by : type
Description of parameter `query_obj.var.sort_values(by`.
query_obj.var.sort_values(by : type
Description of parameter `query_obj.var.sort_values(by`.
: common_feature].to_numpy()
Description of parameter ``.
: common_feature_selected].to_numpy()
Description of parameter ``.
common_feature_selected].to_numpy( : type
Description of parameter `common_feature_selected].to_numpy(`.
Returns
-------
type
Description of returned object.
"""
start_time_all = time.time()
sys.stdout.write("=========================================== Gluer =================================================\n" +
"Four steps are as follows:\n" +
"Step 1: Jointly dimension reduction model\n" +
"Step 2: Search the cell pairs between the reference and the query\n" +
"Step 3: Run the deep learning model\n" +
"Step 4: Summarize the output\n" +
"===================================================================================================\n")
sys.stdout.flush()
common_feature = np.intersect1d(ref_obj.var.sort_values(by=['vst_variance_standardized'],
ascending=False).index.values[:n_features],
query_obj.var.sort_values(by=['vst_variance_standardized'],
ascending=False).index.values[:n_features])
common_feature_selected = np.intersect1d(ref_obj.var.sort_values(by=['vst_variance_standardized'],
ascending=False).index.values[:filter_n_features[0]],
query_obj.var.sort_values(by=['vst_variance_standardized'],
ascending=False).index.values[:filter_n_features[1]])
data_ref_raw = getDF(ref_obj)
data_query_raw = getDF(query_obj)
# prepare the reference data and query data for the integration
data_ref = data_ref_raw.loc[:, common_feature].to_numpy()
data_query = [data_query_raw.loc[:, common_feature].to_numpy()]
data_ref_selected = data_ref_raw.loc[:, common_feature_selected].to_numpy()
data_query_selected = data_query_raw.loc[:, common_feature_selected].to_numpy()
if is_impute:
weights = getWeight(ref_obj.obsm['umap_cell_embeddings'])
data_ref = np.dot(data_ref.T, weights).T
# prepare thes dataset for the jointly dimension reduction
sys.stdout.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
" >> Step 1: Jointly dimension reduction model ... ")
start_time = time.time()
dataset = {'data' + str(i + 1): data.T for i, data in enumerate(data_query)}
dataset['ref'] = data_ref.T
# setup the jointly dimension reduction models
model_joint = iONMF.iONMF(rank=joint_rank,
max_iter=joint_max_iter,
alpha=1,
random_seed=21)
model_joint.fit(dataset)
msg = "Done %s mins \n" % round((time.time() - start_time) / 60, 2)
sys.stdout.write(msg)
sys.stdout.flush()
N_ref_obj = data_ref.shape[0]
# define the list to store the intermediate results
data_ref_name = "ref"
data_ref = dataset[data_ref_name].T
pair_ref_query_list = list()
model_deepLearning_list = list()
y_pred_ref_list = list()
y_pred_ref_list.append(model_joint.basis_[data_ref_name].T)
for j in range(1, len(dataset)):
sys.stdout.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
" >> Step 2-" + str(j) +
": Search the cell pairs ... ")
data_query_name = "data" + str(j)
data_query = dataset[data_query_name].T
if pairs is None:
# calculate the similarity between reference data and query data
similarity_ref_query = pd.DataFrame(
pairwise_kernels(
model_joint.basis_[data_ref_name].T,
model_joint.basis_[data_query_name].T,
metric='cosine')
)
# raw similarity
similarity_selected = pd.DataFrame(
pairwise_kernels(data_ref_selected,
data_query_selected,
metric='cosine')
)
# find out the cell pairs between reference data and query data
ref_pair, query_pair = find_mutual_nn(similarity_ref_query,
N1=mnn_ref,
N2=mnn_query,
n_jobs=n_jobs)
pair_ref_query = | pd.DataFrame([ref_pair, query_pair]) | pandas.DataFrame |
"""Helper methods."""
import copy
import glob
import errno
import os.path
import time
import calendar
import numpy
import pandas
import matplotlib.colors
from matplotlib import pyplot
import keras
import tensorflow.keras as tf_keras
import tensorflow.keras.layers as layers
import tensorflow.python.keras.backend as K
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter
from scipy.spatial.distance import cdist
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.metrics import roc_auc_score
from sklearn.metrics import auc as sklearn_auc
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, \
SGDClassifier
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.cluster import KMeans, AgglomerativeClustering
from ai2es_xai_course.plotting import evaluation_plotting
from ai2es_xai_course.utils import keras_metrics as custom_metrics
# TODO(thunderhoser): Split this into different modules.
# Variable names.
METADATA_COLUMNS_ORIG = [
'Step_ID', 'Track_ID', 'Ensemble_Name', 'Ensemble_Member', 'Run_Date',
'Valid_Date', 'Forecast_Hour', 'Valid_Hour_UTC'
]
EXTRANEOUS_COLUMNS_ORIG = [
'Duration', 'Centroid_Lon', 'Centroid_Lat', 'Centroid_X', 'Centroid_Y',
'Storm_Motion_U', 'Storm_Motion_V', 'Matched', 'Max_Hail_Size',
'Num_Matches', 'Shape', 'Location', 'Scale'
]
TARGET_NAME_ORIG = 'RVORT1_MAX-future_max'
TARGET_NAME = 'max_future_vorticity_s01'
BINARIZED_TARGET_NAME = 'strong_future_rotation_flag'
AREA_NAME = 'area_km2'
MAJOR_AXIS_NAME = 'major_axis_km'
MINOR_AXIS_NAME = 'minor_axis_km'
ORIENTATION_NAME = 'orientation_deg'
METADATA_COLUMNS_ORIG_TO_NEW = {
'Step_ID': 'storm_object_name',
'Track_ID': 'storm_cell_name',
'Ensemble_Name': 'ensemble_name',
'Ensemble_Member': 'ensemble_member_name',
'Run_Date': 'init_time_string',
'Valid_Date': 'valid_time_string',
'Forecast_Hour': 'lead_time_hours',
'Valid_Hour_UTC': 'valid_hour'
}
TARGET_COLUMNS_ORIG_TO_NEW = {
TARGET_NAME_ORIG: TARGET_NAME
}
PREDICTOR_COLUMNS_ORIG_TO_NEW = {
'REFL_COM_mean': 'composite_refl_mean_dbz',
'REFL_COM_max': 'composite_refl_max_dbz',
'REFL_COM_min': 'composite_refl_min_dbz',
'REFL_COM_std': 'composite_refl_stdev_dbz',
'REFL_COM_percentile_10': 'composite_refl_prctile10_dbz',
'REFL_COM_percentile_25': 'composite_refl_prctile25_dbz',
'REFL_COM_percentile_50': 'composite_refl_median_dbz',
'REFL_COM_percentile_75': 'composite_refl_prctile75_dbz',
'REFL_COM_percentile_90': 'composite_refl_prctile90_dbz',
'U10_mean': 'u_wind_10metres_mean_m_s01',
'U10_max': 'u_wind_10metres_max_m_s01',
'U10_min': 'u_wind_10metres_min_m_s01',
'U10_std': 'u_wind_10metres_stdev_m_s01',
'U10_percentile_10': 'u_wind_10metres_prctile10_m_s01',
'U10_percentile_25': 'u_wind_10metres_prctile25_m_s01',
'U10_percentile_50': 'u_wind_10metres_median_m_s01',
'U10_percentile_75': 'u_wind_10metres_prctile75_m_s01',
'U10_percentile_90': 'u_wind_10metres_prctile90_m_s01',
'V10_mean': 'v_wind_10metres_mean_m_s01',
'V10_max': 'v_wind_10metres_max_m_s01',
'V10_min': 'v_wind_10metres_min_m_s01',
'V10_std': 'v_wind_10metres_stdev_m_s01',
'V10_percentile_10': 'v_wind_10metres_prctile10_m_s01',
'V10_percentile_25': 'v_wind_10metres_prctile25_m_s01',
'V10_percentile_50': 'v_wind_10metres_median_m_s01',
'V10_percentile_75': 'v_wind_10metres_prctile75_m_s01',
'V10_percentile_90': 'v_wind_10metres_prctile90_m_s01',
'T2_mean': 'temperature_2metres_mean_kelvins',
'T2_max': 'temperature_2metres_max_kelvins',
'T2_min': 'temperature_2metres_min_kelvins',
'T2_std': 'temperature_2metres_stdev_kelvins',
'T2_percentile_10': 'temperature_2metres_prctile10_kelvins',
'T2_percentile_25': 'temperature_2metres_prctile25_kelvins',
'T2_percentile_50': 'temperature_2metres_median_kelvins',
'T2_percentile_75': 'temperature_2metres_prctile75_kelvins',
'T2_percentile_90': 'temperature_2metres_prctile90_kelvins',
'area': AREA_NAME,
'eccentricity': 'eccentricity',
'major_axis_length': MAJOR_AXIS_NAME,
'minor_axis_length': MINOR_AXIS_NAME,
'orientation': ORIENTATION_NAME
}
MAE_KEY = 'mean_absolute_error'
RMSE_KEY = 'root_mean_squared_error'
MEAN_BIAS_KEY = 'mean_bias'
MAE_SKILL_SCORE_KEY = 'mae_skill_score'
MSE_SKILL_SCORE_KEY = 'mse_skill_score'
MAX_PEIRCE_SCORE_KEY = 'max_peirce_score'
AUC_KEY = 'area_under_roc_curve'
MAX_CSI_KEY = 'max_csi'
BRIER_SCORE_KEY = 'brier_score'
BRIER_SKILL_SCORE_KEY = 'brier_skill_score'
PREDICTORS_KEY = 'predictor_matrix'
PERMUTED_FLAGS_KEY = 'permuted_flags'
PERMUTED_INDICES_KEY = 'permuted_predictor_indices'
PERMUTED_COSTS_KEY = 'permuted_cost_matrix'
DEPERMUTED_INDICES_KEY = 'depermuted_predictor_indices'
DEPERMUTED_COSTS_KEY = 'depermuted_cost_matrix'
HIT_INDICES_KEY = 'hit_indices'
MISS_INDICES_KEY = 'miss_indices'
FALSE_ALARM_INDICES_KEY = 'false_alarm_indices'
CORRECT_NULL_INDICES_KEY = 'correct_null_indices'
# Plotting constants.
FIGURE_WIDTH_INCHES = 10
FIGURE_HEIGHT_INCHES = 10
LARGE_FIGURE_WIDTH_INCHES = 15
LARGE_FIGURE_HEIGHT_INCHES = 15
DEFAULT_GRAPH_LINE_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
DEFAULT_GRAPH_LINE_WIDTH = 2
BAR_GRAPH_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
BAR_GRAPH_EDGE_WIDTH = 2
BAR_GRAPH_FONT_SIZE = 14
BAR_GRAPH_FONT_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
GREEN_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
ORANGE_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
PURPLE_COLOUR = numpy.array([117, 112, 179], dtype=float) / 255
GREY_COLOUR = numpy.full(3, 152. / 255)
HISTOGRAM_EDGE_WIDTH = 1.5
HISTOGRAM_FACE_COLOUR = numpy.full(3, 152. / 255)
HISTOGRAM_FACE_COLOUR = matplotlib.colors.to_rgba(HISTOGRAM_FACE_COLOUR, 0.5)
HISTOGRAM_EDGE_COLOUR = numpy.full(3, 152. / 255)
FONT_SIZE = 20
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
# Misc constants.
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
DATE_FORMAT = '%Y%m%d'
DATE_FORMAT_REGEX = '[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]'
GRID_SPACING_KM = 3.
RADIANS_TO_DEGREES = 180. / numpy.pi
RANDOM_SEED = 6695
LAMBDA_TOLERANCE = 1e-10
ELU_FUNCTION_NAME = 'elu'
RELU_FUNCTION_NAME = 'relu'
SELU_FUNCTION_NAME = 'selu'
TANH_FUNCTION_NAME = 'tanh'
SIGMOID_FUNCTION_NAME = 'sigmoid'
ACTIVATION_FUNCTION_NAMES = [
ELU_FUNCTION_NAME, RELU_FUNCTION_NAME, SELU_FUNCTION_NAME,
TANH_FUNCTION_NAME, SIGMOID_FUNCTION_NAME
]
KERNEL_INITIALIZER_NAME = 'glorot_uniform'
BIAS_INITIALIZER_NAME = 'zeros'
METRIC_FUNCTION_LIST = [
custom_metrics.accuracy, custom_metrics.binary_accuracy,
custom_metrics.binary_csi, custom_metrics.binary_frequency_bias,
custom_metrics.binary_pod, custom_metrics.binary_pofd,
custom_metrics.binary_peirce_score, custom_metrics.binary_success_ratio,
custom_metrics.binary_focn
]
# TODO(thunderhoser): Remove word "binary" from these scores.
METRIC_FUNCTION_DICT = {
'accuracy': custom_metrics.accuracy,
'binary_accuracy': custom_metrics.binary_accuracy,
'binary_csi': custom_metrics.binary_csi,
'binary_frequency_bias': custom_metrics.binary_frequency_bias,
'binary_pod': custom_metrics.binary_pod,
'binary_pofd': custom_metrics.binary_pofd,
'binary_peirce_score': custom_metrics.binary_peirce_score,
'binary_success_ratio': custom_metrics.binary_success_ratio,
'binary_focn': custom_metrics.binary_focn
}
DEFAULT_NEURON_COUNTS = numpy.array([1000, 178, 32, 6, 1], dtype=int)
DEFAULT_DROPOUT_RATES = numpy.array([0.5, 0.5, 0.5, 0.5, 0])
DEFAULT_INNER_ACTIV_FUNCTION_NAME = copy.deepcopy(RELU_FUNCTION_NAME)
DEFAULT_INNER_ACTIV_FUNCTION_ALPHA = 0.2
DEFAULT_OUTPUT_ACTIV_FUNCTION_NAME = copy.deepcopy(SIGMOID_FUNCTION_NAME)
DEFAULT_OUTPUT_ACTIV_FUNCTION_ALPHA = 0.
DEFAULT_L1_WEIGHT = 0.
DEFAULT_L2_WEIGHT = 0.001
PLATEAU_PATIENCE_EPOCHS = 5
PLATEAU_LEARNING_RATE_MULTIPLIER = 0.6
PLATEAU_COOLDOWN_EPOCHS = 0
EARLY_STOPPING_PATIENCE_EPOCHS = 10
LOSS_PATIENCE = 0.
DEFAULT_NUM_BOOTSTRAP_REPS = 1000
ORIGINAL_COST_KEY = 'orig_cost_estimates'
BEST_PREDICTORS_KEY = 'best_predictor_names'
BEST_COSTS_KEY = 'best_cost_matrix'
STEP1_PREDICTORS_KEY = 'step1_predictor_names'
STEP1_COSTS_KEY = 'step1_cost_matrix'
BACKWARDS_FLAG_KEY = 'is_backwards_test'
def _tabular_file_name_to_date(csv_file_name):
"""Parses date from name of tabular file.
:param csv_file_name: Path to input file.
:return: date_string: Date (format "yyyymmdd").
"""
pathless_file_name = os.path.split(csv_file_name)[-1]
date_string = pathless_file_name.replace(
'track_step_NCARSTORM_d01_', ''
).replace('-0000.csv', '')
# Verify.
time_string_to_unix(time_string=date_string, time_format=DATE_FORMAT)
return date_string
def _remove_future_data(predictor_table):
"""Removes future data from predictors.
:param predictor_table: pandas DataFrame with predictor values. Each row is
one storm object.
:return: predictor_table: Same but with fewer columns.
"""
predictor_names = list(predictor_table)
columns_to_remove = [p for p in predictor_names if 'future' in p]
return predictor_table.drop(columns_to_remove, axis=1, inplace=False)
def _lambdas_to_sklearn_inputs(lambda1, lambda2):
"""Converts lambdas to input arguments for scikit-learn.
:param lambda1: L1-regularization weight.
:param lambda2: L2-regularization weight.
:return: alpha: Input arg for scikit-learn model.
:return: l1_ratio: Input arg for scikit-learn model.
"""
return lambda1 + lambda2, lambda1 / (lambda1 + lambda2)
def _get_reliability_curve(actual_values, predicted_values, num_bins,
max_bin_edge, invert=False):
"""Computes reliability curve for one target variable.
E = number of examples
B = number of bins
:param actual_values: length-E numpy array of actual values.
:param predicted_values: length-E numpy array of predicted values.
:param num_bins: Number of bins (points in curve).
:param max_bin_edge: Value at upper edge of last bin.
:param invert: Boolean flag. If True, will return inverted reliability
curve, which bins by target value and relates target value to
conditional mean prediction. If False, will return normal reliability
curve, which bins by predicted value and relates predicted value to
conditional mean observation (target).
:return: mean_predictions: length-B numpy array of x-coordinates.
:return: mean_observations: length-B numpy array of y-coordinates.
:return: example_counts: length-B numpy array with num examples in each bin.
"""
max_bin_edge = max([max_bin_edge, numpy.finfo(float).eps])
bin_cutoffs = numpy.linspace(0., max_bin_edge, num=num_bins + 1)
bin_index_by_example = numpy.digitize(
actual_values if invert else predicted_values, bin_cutoffs, right=False
) - 1
bin_index_by_example[bin_index_by_example < 0] = 0
bin_index_by_example[bin_index_by_example > num_bins - 1] = num_bins - 1
mean_predictions = numpy.full(num_bins, numpy.nan)
mean_observations = numpy.full(num_bins, numpy.nan)
example_counts = numpy.full(num_bins, -1, dtype=int)
for i in range(num_bins):
these_example_indices = numpy.where(bin_index_by_example == i)[0]
example_counts[i] = len(these_example_indices)
mean_predictions[i] = numpy.mean(
predicted_values[these_example_indices]
)
mean_observations[i] = numpy.mean(actual_values[these_example_indices])
return mean_predictions, mean_observations, example_counts
def _add_colour_bar(
axes_object, colour_map_object, values_to_colour, min_colour_value,
max_colour_value, colour_norm_object=None,
orientation_string='vertical', extend_min=True, extend_max=True):
"""Adds colour bar to existing axes.
:param axes_object: Existing axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
:param values_to_colour: numpy array of values to colour.
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`,
defining the scale of the colour map. If `colour_norm_object is None`,
will assume that scale is linear.
:param orientation_string: Orientation of colour bar ("vertical" or
"horizontal").
:param extend_min: Boolean flag. If True, the bottom of the colour bar will
have an arrow. If False, it will be a flat line, suggesting that lower
values are not possible.
:param extend_max: Same but for top of colour bar.
:return: colour_bar_object: Colour bar (instance of
`matplotlib.pyplot.colorbar`) created by this method.
"""
if colour_norm_object is None:
colour_norm_object = matplotlib.colors.Normalize(
vmin=min_colour_value, vmax=max_colour_value, clip=False
)
scalar_mappable_object = pyplot.cm.ScalarMappable(
cmap=colour_map_object, norm=colour_norm_object
)
scalar_mappable_object.set_array(values_to_colour)
if extend_min and extend_max:
extend_string = 'both'
elif extend_min:
extend_string = 'min'
elif extend_max:
extend_string = 'max'
else:
extend_string = 'neither'
if orientation_string == 'horizontal':
padding = 0.075
else:
padding = 0.05
colour_bar_object = pyplot.colorbar(
ax=axes_object, mappable=scalar_mappable_object,
orientation=orientation_string, pad=padding, extend=extend_string,
shrink=0.8
)
colour_bar_object.ax.tick_params(labelsize=FONT_SIZE)
return colour_bar_object
def _get_points_in_roc_curve(observed_labels, forecast_probabilities):
"""Creates points for ROC curve.
E = number of examples
T = number of binarization thresholds
:param observed_labels: length-E numpy array of class labels (integers in
0...1).
:param forecast_probabilities: length-E numpy array with forecast
probabilities of label = 1.
:return: pofd_by_threshold: length-T numpy array of POFD (probability of
false detection) values.
:return: pod_by_threshold: length-T numpy array of POD (probability of
detection) values.
"""
assert numpy.all(numpy.logical_or(
observed_labels == 0, observed_labels == 1
))
assert numpy.all(numpy.logical_and(
forecast_probabilities >= 0, forecast_probabilities <= 1
))
observed_labels = observed_labels.astype(int)
binarization_thresholds = numpy.linspace(0, 1, num=1001, dtype=float)
num_thresholds = len(binarization_thresholds)
pofd_by_threshold = numpy.full(num_thresholds, numpy.nan)
pod_by_threshold = numpy.full(num_thresholds, numpy.nan)
for k in range(num_thresholds):
these_forecast_labels = (
forecast_probabilities >= binarization_thresholds[k]
).astype(int)
this_num_hits = numpy.sum(numpy.logical_and(
these_forecast_labels == 1, observed_labels == 1
))
this_num_false_alarms = numpy.sum(numpy.logical_and(
these_forecast_labels == 1, observed_labels == 0
))
this_num_misses = numpy.sum(numpy.logical_and(
these_forecast_labels == 0, observed_labels == 1
))
this_num_correct_nulls = numpy.sum(numpy.logical_and(
these_forecast_labels == 0, observed_labels == 0
))
try:
pofd_by_threshold[k] = (
float(this_num_false_alarms) /
(this_num_false_alarms + this_num_correct_nulls)
)
except ZeroDivisionError:
pass
try:
pod_by_threshold[k] = (
float(this_num_hits) / (this_num_hits + this_num_misses)
)
except ZeroDivisionError:
pass
pod_by_threshold = numpy.array([1.] + pod_by_threshold.tolist() + [0.])
pofd_by_threshold = numpy.array([1.] + pofd_by_threshold.tolist() + [0.])
return pofd_by_threshold, pod_by_threshold
def _get_points_in_perf_diagram(observed_labels, forecast_probabilities):
"""Creates points for performance diagram.
E = number of examples
T = number of binarization thresholds
:param observed_labels: length-E numpy array of class labels (integers in
0...1).
:param forecast_probabilities: length-E numpy array with forecast
probabilities of label = 1.
:return: pod_by_threshold: length-T numpy array of POD (probability of
detection) values.
:return: success_ratio_by_threshold: length-T numpy array of success ratios.
"""
assert numpy.all(numpy.logical_or(
observed_labels == 0, observed_labels == 1
))
assert numpy.all(numpy.logical_and(
forecast_probabilities >= 0, forecast_probabilities <= 1
))
observed_labels = observed_labels.astype(int)
binarization_thresholds = numpy.linspace(0, 1, num=1001, dtype=float)
num_thresholds = len(binarization_thresholds)
pod_by_threshold = numpy.full(num_thresholds, numpy.nan)
success_ratio_by_threshold = numpy.full(num_thresholds, numpy.nan)
for k in range(num_thresholds):
these_forecast_labels = (
forecast_probabilities >= binarization_thresholds[k]
).astype(int)
this_num_hits = numpy.sum(numpy.logical_and(
these_forecast_labels == 1, observed_labels == 1
))
this_num_false_alarms = numpy.sum(numpy.logical_and(
these_forecast_labels == 1, observed_labels == 0
))
this_num_misses = numpy.sum(numpy.logical_and(
these_forecast_labels == 0, observed_labels == 1
))
try:
pod_by_threshold[k] = (
float(this_num_hits) / (this_num_hits + this_num_misses)
)
except ZeroDivisionError:
pass
try:
success_ratio_by_threshold[k] = (
float(this_num_hits) / (this_num_hits + this_num_false_alarms)
)
except ZeroDivisionError:
pass
pod_by_threshold = numpy.array([1.] + pod_by_threshold.tolist() + [0.])
success_ratio_by_threshold = numpy.array(
[0.] + success_ratio_by_threshold.tolist() + [1.]
)
return pod_by_threshold, success_ratio_by_threshold
def _do_activation(input_values, function_name, slope_param=0.2):
"""Runs input array through activation function.
:param input_values: numpy array (any shape).
:param function_name: Name of activation function.
:param slope_param: Slope parameter (alpha) for activation function. Used
only for eLU and ReLU.
:return: output_values: Same as `input_values` but post-activation.
"""
assert function_name in ACTIVATION_FUNCTION_NAMES
input_object = K.placeholder()
if function_name == ELU_FUNCTION_NAME:
function_object = K.function(
[input_object],
[layers.ELU(alpha=slope_param)(input_object)]
)
elif function_name == RELU_FUNCTION_NAME:
function_object = K.function(
[input_object],
[layers.LeakyReLU(alpha=slope_param)(input_object)]
)
else:
function_object = K.function(
[input_object],
[layers.Activation(function_name)(input_object)]
)
return function_object([input_values])[0]
def _get_weight_regularizer(l1_weight, l2_weight):
"""Creates regularizer for neural-net weights.
:param l1_weight: L1 regularization weight. This "weight" is not to be
confused with those being regularized (weights learned by the net).
:param l2_weight: L2 regularization weight.
:return: regularizer_object: Instance of `keras.regularizers.l1_l2`.
"""
l1_weight = numpy.nanmax(numpy.array([l1_weight, 0.]))
l2_weight = numpy.nanmax(numpy.array([l2_weight, 0.]))
return keras.regularizers.l1_l2(l1=l1_weight, l2=l2_weight)
def _get_dense_layer(num_output_units, weight_regularizer=None):
"""Creates dense (fully connected) layer.
:param num_output_units: Number of output units (or "features" or
"neurons").
:param weight_regularizer: Will be used to regularize weights in the new
layer. This may be instance of `keras.regularizers` or None (if you
want no regularization).
:return: layer_object: Instance of `keras.layers.Dense`.
"""
return keras.layers.Dense(
num_output_units, activation=None, use_bias=True,
kernel_initializer=KERNEL_INITIALIZER_NAME,
bias_initializer=BIAS_INITIALIZER_NAME,
kernel_regularizer=weight_regularizer,
bias_regularizer=weight_regularizer
)
def _get_activation_layer(function_name, slope_param=0.2):
"""Creates activation layer.
:param function_name: Name of activation function.
:param slope_param: Slope parameter (alpha) for activation function. Used
only for eLU and ReLU.
:return: layer_object: Instance of `keras.layers.Activation`,
`keras.layers.ELU`, or `keras.layers.LeakyReLU`.
"""
assert function_name in ACTIVATION_FUNCTION_NAMES
if function_name == ELU_FUNCTION_NAME:
return keras.layers.ELU(alpha=slope_param)
if function_name == RELU_FUNCTION_NAME:
if slope_param <= 0:
return keras.layers.ReLU()
return keras.layers.LeakyReLU(alpha=slope_param)
return keras.layers.Activation(function_name)
def _get_dropout_layer(dropout_fraction):
"""Creates dropout layer.
:param dropout_fraction: Fraction of weights to drop.
:return: layer_object: Instance of `keras.layers.Dropout`.
"""
assert dropout_fraction > 0.
assert dropout_fraction < 1.
return keras.layers.Dropout(rate=dropout_fraction)
def _get_batch_norm_layer():
"""Creates batch-normalization layer.
:return: Instance of `keras.layers.BatchNormalization`.
"""
return keras.layers.BatchNormalization(
axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True
)
def _mkdir_recursive_if_necessary(directory_name=None, file_name=None):
"""Creates directory if necessary (i.e., doesn't already exist).
This method checks for the argument `directory_name` first. If
`directory_name` is None, this method checks for `file_name` and extracts
the directory.
:param directory_name: Path to local directory.
:param file_name: Path to local file.
"""
if directory_name is None:
directory_name = os.path.dirname(file_name)
if directory_name == '':
return
try:
os.makedirs(directory_name)
except OSError as this_error:
if this_error.errno == errno.EEXIST and os.path.isdir(directory_name):
pass
else:
raise
def apply_gaussian_filter(input_matrix, e_folding_radius_grid_cells):
"""Applies Gaussian filter to any-dimensional grid.
:param input_matrix: numpy array with any dimensions.
:param e_folding_radius_grid_cells: e-folding radius (num grid cells).
:return: output_matrix: numpy array after smoothing (same dimensions as
input).
"""
assert e_folding_radius_grid_cells >= 0.
return gaussian_filter(
input_matrix, sigma=e_folding_radius_grid_cells, order=0, mode='nearest'
)
def create_paneled_figure(
num_rows, num_columns, figure_width_inches=FIGURE_WIDTH_INCHES,
figure_height_inches=FIGURE_HEIGHT_INCHES,
horizontal_spacing=0.075, vertical_spacing=0., shared_x_axis=False,
shared_y_axis=False, keep_aspect_ratio=True):
"""Creates paneled figure.
This method only initializes the panels. It does not plot anything.
J = number of panel rows
K = number of panel columns
:param num_rows: J in the above discussion.
:param num_columns: K in the above discussion.
:param figure_width_inches: Width of the entire figure (including all
panels).
:param figure_height_inches: Height of the entire figure (including all
panels).
:param horizontal_spacing: Spacing (in figure-relative coordinates, from
0...1) between adjacent panel columns.
:param vertical_spacing: Spacing (in figure-relative coordinates, from
0...1) between adjacent panel rows.
:param shared_x_axis: Boolean flag. If True, all panels will share the same
x-axis.
:param shared_y_axis: Boolean flag. If True, all panels will share the same
y-axis.
:param keep_aspect_ratio: Boolean flag. If True, the aspect ratio of each
panel will be preserved (reflect the aspect ratio of the data plotted
therein).
:return: figure_object: Figure handle (instance of
`matplotlib.figure.Figure`).
:return: axes_object_matrix: J-by-K numpy array of axes handles (instances
of `matplotlib.axes._subplots.AxesSubplot`).
"""
figure_object, axes_object_matrix = pyplot.subplots(
num_rows, num_columns, sharex=shared_x_axis, sharey=shared_y_axis,
figsize=(figure_width_inches, figure_height_inches)
)
if num_rows == num_columns == 1:
axes_object_matrix = numpy.full(
(1, 1), axes_object_matrix, dtype=object
)
if num_rows == 1 or num_columns == 1:
axes_object_matrix = numpy.reshape(
axes_object_matrix, (num_rows, num_columns)
)
pyplot.subplots_adjust(
left=0.02, bottom=0.02, right=0.98, top=0.95,
hspace=horizontal_spacing, wspace=vertical_spacing
)
if not keep_aspect_ratio:
return figure_object, axes_object_matrix
for i in range(num_rows):
for j in range(num_columns):
axes_object_matrix[i][j].set(aspect='equal')
return figure_object, axes_object_matrix
def time_string_to_unix(time_string, time_format):
"""Converts time from string to Unix format.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param time_string: Time string.
:param time_format: Format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: unix_time_sec: Time in Unix format.
"""
return calendar.timegm(time.strptime(time_string, time_format))
def time_unix_to_string(unix_time_sec, time_format):
"""Converts time from Unix format to string.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param unix_time_sec: Time in Unix format.
:param time_format: Desired format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: time_string: Time string.
"""
return time.strftime(time_format, time.gmtime(unix_time_sec))
def find_tabular_files(directory_name, first_date_string, last_date_string):
"""Finds CSV files with tabular data.
:param directory_name: Name of directory with tabular files.
:param first_date_string: First date ("yyyymmdd") in range.
:param last_date_string: Last date ("yyyymmdd") in range.
:return: csv_file_names: 1-D list of paths to tabular files.
"""
first_time_unix_sec = time_string_to_unix(
time_string=first_date_string, time_format=DATE_FORMAT
)
last_time_unix_sec = time_string_to_unix(
time_string=last_date_string, time_format=DATE_FORMAT
)
csv_file_pattern = '{0:s}/track_step_NCARSTORM_d01_{1:s}-0000.csv'.format(
directory_name, DATE_FORMAT_REGEX
)
csv_file_names = glob.glob(csv_file_pattern)
csv_file_names.sort()
file_date_strings = [_tabular_file_name_to_date(f) for f in csv_file_names]
file_times_unix_sec = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings
], dtype=int)
good_indices = numpy.where(numpy.logical_and(
file_times_unix_sec >= first_time_unix_sec,
file_times_unix_sec <= last_time_unix_sec
))[0]
return [csv_file_names[k] for k in good_indices]
def read_tabular_file(csv_file_name):
"""Reads tabular data from CSV file.
:param csv_file_name: Path to input file.
:return: metadata_table: pandas DataFrame with metadata. Each row is one
storm object.
:return: predictor_table: pandas DataFrame with predictor values. Each row
is one storm object.
:return: target_table: pandas DataFrame with target values. Each row is one
storm object.
"""
predictor_table = pandas.read_csv(csv_file_name, header=0, sep=',')
predictor_table.drop(EXTRANEOUS_COLUMNS_ORIG, axis=1, inplace=True)
metadata_table = predictor_table[METADATA_COLUMNS_ORIG]
predictor_table.drop(METADATA_COLUMNS_ORIG, axis=1, inplace=True)
target_table = predictor_table[[TARGET_NAME_ORIG]]
predictor_table.drop([TARGET_NAME_ORIG], axis=1, inplace=True)
predictor_table = _remove_future_data(predictor_table)
metadata_table.rename(columns=METADATA_COLUMNS_ORIG_TO_NEW, inplace=True)
predictor_table.rename(columns=PREDICTOR_COLUMNS_ORIG_TO_NEW, inplace=True)
target_table.rename(columns=TARGET_COLUMNS_ORIG_TO_NEW, inplace=True)
predictor_table[AREA_NAME] *= GRID_SPACING_KM ** 2
predictor_table[MAJOR_AXIS_NAME] *= GRID_SPACING_KM
predictor_table[MINOR_AXIS_NAME] *= GRID_SPACING_KM
predictor_table[ORIENTATION_NAME] *= RADIANS_TO_DEGREES
return metadata_table, predictor_table, target_table
def read_many_tabular_files(csv_file_names):
"""Reads tabular data from many CSV files.
:param csv_file_names: 1-D list of paths to input files.
:return: metadata_table: See doc for `read_tabular_file`.
:return: predictor_table: Same.
:return: target_table: Same.
"""
num_files = len(csv_file_names)
list_of_metadata_tables = [pandas.DataFrame()] * num_files
list_of_predictor_tables = [pandas.DataFrame()] * num_files
list_of_target_tables = [ | pandas.DataFrame() | pandas.DataFrame |
import os
from matplotlib import pyplot as plt
from pandas import DataFrame
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import OneHotEncoder
import category_encoders as ce
import numpy as np
from app import db
from app.base.db_models.ModelEncodedColumns import ModelEncodedColumns
from app.base.db_models.ModelFeatures import ModelFeatures
from mylib.db_helper.AttributesHelper import add_encoded_column_values
from mylib.utiles.CVSReader import get_only_file_name, get_file_path
class AdjustDataFrame:
def __init__(self, name):
self.name = name
def encode_data_frame1(data: DataFrame):
columns_name = data.columns
encoded_data = data
data_types = data.dtypes
for i in range(len(data_types)):
if data_types[i] != np.int64:
col_name = columns_name[i]
oe_style = OneHotEncoder()
oe_results = oe_style.fit_transform(data[[col_name]])
pd.DataFrame(oe_results.toarray(), columns=oe_style.categories_).head()
# encoded_data = encoded_data.join(pd.DataFrame(oe_results.toarray(), columns=oe_style.categories_))
encoded_data = encoded_data.merge(pd.DataFrame(oe_results.toarray()), how='left', left_index=True,
right_index=True)
return encoded_data
def encode_data_frame(model_id, data: DataFrame, column_type):
try:
if column_type != 'F':
return encode_labels_data_frame(model_id, data)
else:
return encode_features_data_frame(model_id, data)
except Exception as e:
print('Ohh -encode_data_frame...Something went wrong.')
print(e)
return 0
def encode_features_data_frame(model_id, data: DataFrame, column_type='F'):
columns_name = data.columns
encoded_data = []
data_types = data.dtypes
for i in range(len(data_types)):
if data_types[i] != np.int64 and data_types[i] != np.float:
col_name = columns_name[i]
dummies = pd.get_dummies(data[[col_name]])
dummies_columns = dummies.columns
# encoded_data = encoded_data.append(dummies)
data = data.drop([col_name], axis=1)
data = pd.concat([data, dummies], axis=1)
endoced_column = get_encoded_columns(data.columns, col_name)
# encoder = ce.OneHotEncoder(cols=col_name, use_cat_names=True)
# data = encoder.fit_transform(data)
addencodedcolumnvalues = add_encoded_column_values(model_id, col_name, dummies, column_type)
# encoded_data = encoder.inverse_transform(encoded_data)
else:
# encoded_data = encoded_data.append(data[columns_name[i]])
column_data = data[columns_name[i]]
data = data.drop(columns_name[i], axis=1)
data = pd.concat([data, column_data], axis=1)
model_encoded_column = {'model_id': model_id, 'column_name': columns_name[i],
'column_type': column_type}
model_encoded = ModelEncodedColumns(**model_encoded_column)
db.session.add(model_encoded)
db.session.commit()
db.session.close()
return data
def encode_labels_data_frame(model_id, data: DataFrame, column_type='L'):
try:
columns_name = data.columns
encoded_data = []
data_types = data.dtypes
for i in range(len(data_types)):
if data_types[i] != np.int64:
col_name = columns_name[i]
dummies = pd.get_dummies(data[[col_name]] if data_types[i] != np.float else round(data[[col_name]], 0))
dummies_columns = dummies.columns
# encoded_data = encoded_data.append(dummies)
data = data.drop([col_name], axis=1)
data = pd.concat([data, dummies], axis=1)
endoced_column = get_encoded_columns(data.columns, col_name)
# encoder = ce.OneHotEncoder(cols=col_name, use_cat_names=True)
# data = encoder.fit_transform(data)
addencodedcolumnvalues = add_encoded_column_values(model_id, col_name, dummies, column_type)
# encoded_data = encoder.inverse_transform(encoded_data)
else:
# encoded_data = encoded_data.append(data[columns_name[i]])
column_data = data[columns_name[i]]
data = data.drop(columns_name[i], axis=1)
data = pd.concat([data, column_data], axis=1)
model_encoded_column = {'model_id': model_id, 'column_name': columns_name[i],
'column_type': column_type}
model_encoded = ModelEncodedColumns(**model_encoded_column)
db.session.add(model_encoded)
db.session.commit()
db.session.close()
return data
except Exception as e:
print('Ohh -encode_data_frame...Something went wrong.')
print(e)
return 0
def encode_data_array(columns_list, data_array):
data_frame = pd.DataFrame(data_array)
# Create the mapper
data_frame_columns = data_frame.columns
zip_iterator = zip(data_frame_columns, columns_list)
a_dictionary = dict(zip_iterator)
data_frame = data_frame.rename(a_dictionary, axis=1)
data_types = data_frame.dtypes
columns_name = data_frame.columns
encoded_data_frame = data_frame
for i in range(len(data_types)):
if data_types[i] != np.int64:
col_name = columns_name[i]
encoder = ce.OneHotEncoder(cols=col_name, use_cat_names=True)
encoded_data_frame = encoder.fit_transform(encoded_data_frame)
print(encoded_data_frame)
return encoded_data_frame
"""
Function: encode_prediction_data_frame(data: DataFrame)
Use this function to encode the sent values for prediction from the user.
The function uses the same encoder that have been used to encode the training and testing data
"""
# def encode_prediction_data_frame(data: DataFrame):
def encode_prediction_data_frame(features_values, column_type):
# 1- Get the all columns after encoded
encoded_dataframe_columns = np.asarray(
ModelEncodedColumns.query.with_entities(ModelEncodedColumns.column_name).filter_by(
column_type=column_type).all()).flatten()
model_features = np.array(ModelFeatures.query.with_entities(ModelFeatures.feature_name).all()).flatten()
# p_value = ['<NAME>', '8', '0', '1', '0', '0', '0.5']
p_value = features_values
# 2- Match the predicted columns with the encoded columns
out_put = []
enc_label = []
for i in range(len(model_features)):
get_indexes = lambda encoded_dataframe_columns, xs: [i for (y, i) in zip(xs, range(len(xs))) if
encoded_dataframe_columns in y]
occurrences_indexes = get_indexes(model_features[i], encoded_dataframe_columns)
number_of_occurrences = len(occurrences_indexes)
# print('model_features[i] = ' + str(model_features[i]))
# print('number_of_occurrences = ' + str(number_of_occurrences))
label_len = len(model_features[i])
if number_of_occurrences == 1:
out_put.append(p_value[i])
elif number_of_occurrences > 1:
predicted_value = p_value[i]
for j in range(len(occurrences_indexes)):
# print("occurances_indexes[j]=" + str(occurrences_indexes[j]))
# print("p_value[occurances_indexes[j]]=" + str(p_value[occurrences_indexes[i]]))
# print("encoded_dataframe_columns[occurrences_indexes[j]]= " + str( encoded_dataframe_columns[occurrences_indexes[j]]))
if predicted_value in str(encoded_dataframe_columns[occurrences_indexes[j]]):
print("the predicted_value= " + predicted_value)
print("the encoded_dataframe_column= " + str(encoded_dataframe_columns[occurrences_indexes[j]]))
print("j= " + str(j))
out_put.append(1)
else:
out_put.append(0)
else:
print('0')
# 3-
return out_put
def remove_null_values(data: DataFrame):
# data = data.apply(pd.to_numeric, errors='coerce')
# data = data.fillna(0) --Replace null with 0
df1 = data.dropna() # Remove columns that have any null values
# vectorizer = DictVectorizer()
# vector_data = vectorizer.fit_transform(data)
return df1
def create_figure(csv_file_location, x_axis, y_axis):
fig, ax = plt.subplots(figsize=(6, 4))
fig.patch.set_facecolor('#E8E5DA')
x = x_axis
y = y_axis
ax.bar(x, y, color="#304C89")
plt.xticks(rotation=30, size=5)
plt.ylabel("Expected Clean Sheets", size=5)
# save the figure
image_location = get_file_path(csv_file_location) + get_only_file_name(csv_file_location) + '_plot.png'
plt.savefig(image_location, dpi=300,
bbox_inches='tight')
return image_location
def get_encoded_columns(df_head, column_name):
encoded_columns = []
for i in range(len(df_head)):
if df_head[i].find(column_name) >= 0:
encoded_columns.append(df_head[i])
print(encoded_columns)
return encoded_columns
def decode_predicted_values(model_id, p_value, labels, enc_labels):
try:
# p_value = [8, 0, 1, 0, 0, 0.5, 1, 0]
# lables = ['A', 'BB', 'C', 'D'] # DBBBB
# enc_labels = ['A', 'BB0.5', 'BB12', 'BB115', 'BB0', 'C', 'D12', 'D1150'] # DB
out_put = []
# out_put[8, 12, 0.5, 12]
for i in range(len(labels)):
get_indexes = lambda enc_labels, xs: [i for (y, i) in zip(xs, range(len(xs))) if enc_labels in y]
occurances_indexes = get_indexes(labels[i], enc_labels)
number_of_occurances = len(occurances_indexes)
print(labels[i] + "= " + str(number_of_occurances))
label_len = len(labels[i])
if number_of_occurances == 1:
out_put.append(str(p_value[occurances_indexes[0]]))
elif number_of_occurances > 1:
predicted_value = p_value[occurances_indexes]
if 1 in predicted_value: # Check if there is return value in the encoded values, if no return 0
for j in range(len(occurances_indexes)):
predicted_value = p_value[occurances_indexes[j]]
if predicted_value == 1:
real_value = enc_labels[occurances_indexes[j]][label_len:]
out_put.append(real_value)
else:
out_put.append('Can not be predicted')
else:
print('Nothing')
print(out_put)
return out_put
except Exception as e:
print('Ohh -decode_predicted_values...Something went wrong.')
print(e)
return 0
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = | pd.get_dummies(original_dataframe[[feature_to_encode]]) | pandas.get_dummies |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, align_axis=align_axis)
if align_axis in (1, "columns"):
indices = pd.Index([0, 2])
columns = pd.Index(["self", "other"])
expected = pd.DataFrame(
[["a", "x"], ["c", "z"]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
else:
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
expected = pd.Series(["a", "x", "c", "z"], index=indices)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"keep_shape, keep_equal",
[
(True, False),
(False, True),
(True, True),
# False, False case is already covered in test_compare_axis
],
)
def test_compare_various_formats(keep_shape, keep_equal):
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, keep_shape=keep_shape, keep_equal=keep_equal)
if keep_shape:
indices = pd.Index([0, 1, 2])
columns = pd.Index(["self", "other"])
if keep_equal:
expected = pd.DataFrame(
[["a", "x"], ["b", "b"], ["c", "z"]], index=indices, columns=columns
)
else:
expected = pd.DataFrame(
[["a", "x"], [np.nan, np.nan], ["c", "z"]],
index=indices,
columns=columns,
)
else:
indices = pd.Index([0, 2])
columns = pd.Index(["self", "other"])
expected = pd.DataFrame(
[["a", "x"], ["c", "z"]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
def test_compare_with_equal_nulls():
# We want to make sure two NaNs are considered the same
# and dropped where applicable
s1 = pd.Series(["a", "b", np.nan])
s2 = pd.Series(["x", "b", np.nan])
result = s1.compare(s2)
expected = pd.DataFrame([["a", "x"]], columns=["self", "other"])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
"""
This script contains code used clean the raw data and is used in '1. descriptive.ipynb'
"""
#Import libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
def ordered_dict_values(dictionary):
"""
A function to obtain unique values from a dictionary
Parameters
----------
dictionary: dict
Dictionary to obtain unique values from
Returns
-------
lst: list
A list of unique values in the dictionary by order of appearance
"""
lst = []
for v in dictionary.values():
if v not in lst:
lst.append(v)
return lst
def get_dtype(df):
"""
A function to get the datatype for each column in a dataframe
Parameters
----------
df: pandas.DataFrame
A dataframe to extract datatypes from
Returns
-------
dtype: dict
A dictionary containing 2 lists: column and dtype - datatype
"""
dtype = {'column':[],'dtype':[]}
for column in df.columns:
dtype['column'].append(column)
dtype['dtype'].append(df[column].dtype)
dtype = pd.DataFrame(dtype)
return dtype
def clean_df(df,schema,debug=False):
"""
A function to do some basic data cleaning using a provided schema.
The following steps are performed in order:
- variable names are convert to lowercase
- spacing replaced with '_'
- miscellaneous replacements of categorical variable names/missing values that don't work with scehma
- loop through each variable name:
a. exclude variables which are specified in the schema doc
b. expand any nested lists
c. replace missing values with specified values
d. if value is less than minimum value, set to missing
e. if value is greater than maximum value, set to missing
f. enforce variable type for string/categorical variables
g. enforce datetime variable type
Parameters
----------
df: pandas.DataFrame
The dataframe to be cleaned
schema: pandas.DataFrame
A dataframe containing schema information including: variable name, supposed dtype, missing value indicator, max and min ranges
debug: bool
A flag used for debugging
Returns
-------
df: pandas.DataFrame
The cleaned dataframe
"""
#Clean the names
df.columns = [name.lower() for name in df.columns]
df.columns = [name.replace(' ','_') for name in df.columns]
df = df[[name for name in df.columns if 'unnamed' not in name]]
df['tropi'] = df['tropi'].replace({999.:np.nan})
df['stenttype'] = df['stenttype'].replace({999.0:'4.0'})
for var in schema.varname:
if var in df.columns:
index = schema['varname']==var
series = df[var]
if schema['type'][index].values[0] == 'exclude':
df = df.drop(var,axis=1)
elif schema['dtype'][index].values[0] == 'nested':
expanded = series.str.split(',',expand=True)
expanded.columns = [var+str(col) for col in expanded.columns]
df = df.drop(var,axis=1).join(expanded)
else:
series = series.replace({schema['missing_code'][index].values[0]:np.nan,999:np.nan})
series = series.fillna(schema['impute_value'][index].values[0])
if schema['dtype'][index].values[0] in ['float64','numeric','timeto','category']:
series = pd.to_numeric(series,errors='coerce')
if schema['min'][index].values[0] == schema['min'][index].values[0]:
series[series < schema['min'][index].values[0]] = np.nan
if schema['max'][index].values[0] == schema['max'][index].values[0]:
series[series > schema['max'][index].values[0]] = np.nan
if schema['dtype'][index].values[0] in ['category','object','str','freetext']:
series = series.apply(lambda row: str(row) if row==row else np.nan)
elif schema['dtype'][index].values[0] == 'datetime':
series = pd.to_datetime(series,errors='coerce')
df[var] = series
return df
def tidy(df,var_dict):
"""
Subfunction to extract variable names, categorical variables and get order of display of categorical levels for Table 1
Parameters
----------
df: pandas.DataFrame
The dataset to be tidied
var_dict:
A nested dictionary containing original variable names as keys and a dictionary of display name and dictionary to replace categorical values
"""
var_list = ['lvtstatus','lvtrecurrence','dateofdeath','repeat_scan_date','finalscandate']
cat_features = []
cat_order = {}
for varname in var_dict:
display_name = var_dict[varname].get('display')
replace_dict = var_dict[varname].get('replace',None)
if replace_dict is not None:
try:
df[varname] = df[varname].apply(str)
df[varname] = df[varname].replace(replace_dict)
df[varname] = df[varname].replace({'nan':np.nan})
except:
print(varname)
raise
cat_features.append(display_name)
cat_order[display_name] = ordered_dict_values(replace_dict)
df = df.rename({varname:display_name},axis=1)
var_list.append(display_name)
df = df[var_list]
return df,var_list,cat_features, cat_order
def get_data():
"""
A wrapper function to read csv datasets, perform cleaning and replaced categorical levels using the var_dict
Variables which are commented out are removed intentionally from the analysis
Parameters
----------
None
Returns
-------
combined: pandas.DataFrame
A dataframe containing the dataset
var_list: list
A list of strings containing variable names
cat_features_list: list
A list of strings containing categorical variables
cat_order:
A dictionary of lists indicating the order of appearance for each variable - used for Table 1
"""
schema = pd.read_csv('raw_data/schema.csv')
nstemi = | pd.read_csv('raw_data/nstemi.csv') | pandas.read_csv |
import sys
import numpy as np
import pandas as pd
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pvlib._deprecation import pvlibDeprecationWarning
from pandas.util.testing import assert_series_equal
import pytest
from test_pvsystem import sam_data, pvsyst_module_params
from conftest import fail_on_pvlib_version, requires_scipy, requires_tables
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = modules[module].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module = 'Canadian_Solar_CS5P_220M'
module_parameters = modules[module].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_native_snl_ac_system(sam_data):
module = 'Canadian_Solar_CS5P_220M'
module_parameters = sam_data['cecmod'][module].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvsyst_dc_snl_ac_system(sam_data, pvsyst_module_params):
module = 'PVsyst test module'
module_parameters = pvsyst_module_params
module_parameters['b'] = 0.05
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_adr_ac_system(sam_data):
modules = sam_data['cecmod']
module = 'Canadian_Solar_CS5P_220M'
module_parameters = modules[module].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['adrinverter']
inverter = inverters['Zigor__Sunzet_3_TL_US_240V__CEC_2011_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module=module,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180,
module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture
def location():
return Location(32.2, -111, altitude=700)
@pytest.fixture
def weather():
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'ghi': [500, 0], 'dni': [800, 0], 'dhi': [100, 0]},
index=times)
return weather
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
@pytest.mark.parametrize('strategy, expected', [
(None, (32.2, 180)), ('None', (32.2, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
with pytest.warns(pvlibDeprecationWarning):
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 183.522449305, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=1)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni': 900, 'ghi': 600, 'dhi': 150},
index=times)
ac = mc.run_model(times, weather=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
| assert_series_equal(ac, expected) | pandas.util.testing.assert_series_equal |
# summarize class balance from the har dataset
from numpy import vstack
from pandas import read_csv
from pandas import DataFrame
# load a single file as a numpy array
def load_file(filepath):
dataframe = read_csv(filepath, header=None, delim_whitespace=True)
return dataframe.values
# summarize the balance of classes in an output variable column
def class_breakdown(data):
# convert the numpy array into a dataframe
df = | DataFrame(data) | pandas.DataFrame |
import argparse
from ast import parse
from os import P_ALL, error, path
import sys
import math
from numpy.core.fromnumeric import repeat
from numpy.core.numeric import full
import pandas as pd
from pandas import plotting as pdplot
import numpy as np
from pandas.core.frame import DataFrame
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace import mlemodel
from statsmodels.iolib import summary
from statsmodels.tsa.stattools import adfuller, kpss
from statsmodels.regression import linear_model
from sklearn.metrics import mean_squared_error
from math import sqrt
from copy import deepcopy
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from pmdarima.arima import auto_arima
from pmdarima import arima as pmd
from pmdarima import model_selection
from pmdarima.arima.utils import ndiffs
from matplotlib import pyplot as plt
from streamlit.state.session_state import Value
from src.utils import *
from src.data import TimeSeriesData
from src.import_data import ExogeneousDataImporter, EndogeneousDataImporter, EndogeneousDataFormats, ExogeneousDataFormats
import enum
# display options
| pd.set_option("display.max_columns", 999) | pandas.set_option |
from scipy import sparse
import pandas as pd
import joblib
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
def load_features(filepath):
matrix = sparse.load_npz(filepath)
return matrix
def load_train_label(filepath):
label = pd.read_csv(filepath)
return label
def load_data(filepath):
df = pd.read_csv(filepath)
return df
def save_model(model, model_filepath):
joblib.dump(model, open(model_filepath, 'wb'))
def load_model(model_filepath):
model = joblib.load(model_filepath)
return model
def evaluate_model(train_features, train_y, class_names, model):
model_report = classification_report(train_y, model.predict(train_features))
matrix = confusion_matrix(train_y, model.predict(train_features), labels = class_names)
model_acc = matrix.diagonal()/matrix.sum(axis=1 )
model_df = | pd.DataFrame({'accuracy': model_acc}, index=class_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import sqlalchemy as sql
import math
from base_classes import Data, Portfolio, Strategy, Backtest
###############################################################################
class Data_Selected(Data):
# Get Data Set with closing prices for bitcoin, bitcoin-cash, ethereum, litecoin
# ... ripple
def __init__(self,start,end,frequency,tickers):
self.start = start
self.end = end
self.frequency = frequency
self.tickers = tickers
def load_data(self,table):
tickers = ['Date'] + self.tickers
ticker_str = ', '.join("`{}`".format(ticker) for ticker in tickers)
engine = sql.create_engine('mysql+pymysql://protos-github:protos-github@google-sheet-data.cfyqhzfdz93r.eu-west-1.rds.amazonaws.com:3306/protos')
prices = pd.read_sql("Select " + str(ticker_str) + " From " + str(table), con=engine)
return prices
def load_OHLC(self):
tickers = ['Date'] + self.tickers
ticker_str = ', '.join("`{}`".format(ticker) for ticker in tickers)
engine = sql.create_engine('mysql+pymysql://protos-github:protos-github@google-sheet-data.cfyqhzfdz93r.eu-west-1.rds.amazonaws.com:3306/protos')
opening = pd.read_sql("Select " + str(ticker_str) + " From open", con=engine)
high = pd.read_sql("Select " + str(ticker_str) + " From high", con=engine)
low = pd.read_sql("Select " + str(ticker_str) + " From low", con=engine)
closing = pd.read_sql("Select " + str(ticker_str) + " From close", con=engine)
return [opening, high, low, closing]
def load_Volume(self):
tickers = ['Date'] + self.tickers
ticker_str = ', '.join("`{}`".format(ticker) for ticker in tickers)
engine = sql.create_engine('mysql+pymysql://protos-github:<EMAIL>:3306/protos')
volume = pd.read_sql("Select " + str(ticker_str) + " From volume", con=engine)
return volume
def clean_data(self, data):
data.set_index('Date', inplace=True)
data.index = pd.to_datetime(data.index)
date_filter = (data.index >= self.start) & (data.index <= self.end)
data = data[date_filter]
# frequency_filter = data['Date'] == ...
# price = price[frequency_filter]
data.fillna('NaN')
data = data.apply(lambda x: x.str.replace(',',''))
data = data.apply(pd.to_numeric, errors='coerce')
return data
class Trend_Following(Strategy):
def __init__(self, max_lookback, weights,
normalize_vol, long_only, short_only):
self.max_lookback = max_lookback
self.weights = weights
self.normalize_vol = normalize_vol
self.long_only = long_only
self.short_only = short_only
def generate_signals(self, prices):
last_row = prices.shape[0]-1
lb1 = int(self.max_lookback/3)
lb2 = int(2*self.max_lookback/3)
# As soon as singals are fully calculated
if(last_row >= self.max_lookback):
l_mask_1 = prices.iloc[last_row,:]>=prices.iloc[last_row-lb1,:]
l_mask_1 = l_mask_1*self.weights[0]
l_mask_1.mask(l_mask_1==0,other=(-self.weights[0]), inplace=True)
l_mask_2 = prices.iloc[last_row,:]>=prices.iloc[last_row-lb2,:]
l_mask_2 = l_mask_2*self.weights[1]
l_mask_2.mask(l_mask_2==0,other=(-self.weights[1]), inplace=True)
l_mask_3 = prices.iloc[last_row,:]>=prices.iloc[last_row-self.max_lookback,:]
l_mask_3 = l_mask_3*self.weights[2]
l_mask_3.mask(l_mask_3==False,other=(-self.weights[2]), inplace=True)
#### Short Masks
s_mask_1 = prices.iloc[last_row,:]<prices.iloc[last_row-lb1,:]
s_mask_1 = s_mask_1*(-self.weights[0])
s_mask_1.mask(s_mask_1==0,other=(self.weights[0]), inplace=True)
s_mask_2 = prices.iloc[last_row,:]<prices.iloc[last_row-lb2,:]
s_mask_2 = s_mask_2*(-self.weights[1])
s_mask_2.mask(s_mask_2==0,other=(self.weights[1]), inplace=True)
s_mask_3 = prices.iloc[last_row,:]<prices.iloc[last_row-self.max_lookback,:]
s_mask_3 = s_mask_3*(-self.weights[2])
s_mask_3.mask(s_mask_3==0,other=(self.weights[2]), inplace=True)
for index, i in enumerate(prices.iloc[last_row-self.max_lookback,:]):
if(math.isnan(i)):
l_mask_1[index] = np.NAN
l_mask_2[index] = np.NAN
l_mask_3[index] = np.NAN
s_mask_1[index] = np.NAN
s_mask_2[index] = np.NAN
s_mask_3[index] = np.NAN
# Long-Only or Long-Short
if(self.long_only):
mask = l_mask_1 + l_mask_2 + l_mask_3
mask.mask(mask < 0, other=0, inplace=True)
elif(self.short_only):
mask = s_mask_1 +s_mask_2 + s_mask_3
mask.mask(mask > 0, other=0, inplace=True)
else:
mask = l_mask_1 + l_mask_2 + l_mask_3
else:
mask = prices.iloc[last_row,:]
mask = (mask*0).fillna(0)
ewma_ann = [0,0,0,0,0]
# Normalize for Volatility as well:
vol_lb = 90
if(last_row+1 >= vol_lb):
if(self.normalize_vol):
returns = prices.pct_change().replace(np.inf, np.nan)
ewma0 = returns.iloc[:vol_lb,:].std(axis=0)**2
if(last_row>0):
for i in range(vol_lb,last_row+1):#returns.shape[0]-vol_lb .... vol_lb+i
ewma0 = 0.94*ewma0.squeeze() + 0.06*((returns.iloc[i,:].rename())**2).T.squeeze()
ewma_ann = np.sqrt(ewma0)*np.sqrt(365)
ewma = ewma_ann.sum()/ewma_ann
ewma_norm = ewma/ewma.sum()
mask = mask*ewma_norm
# Normalize the mask - max single position risk = 1/(nr of tickers active)
if(self.normalize_vol): mask_norm = mask
else: mask_norm = mask/mask.count()
#Replace NaN with 0
mask_norm = mask_norm.fillna(0)
return mask_norm
class Daily_Portfolio(Portfolio):
def __init__(self, init_balance):
self.positions = []
self.init_balance = init_balance
self.balance = []
self.trading = []
class Daily_Backtest(Backtest):
def __init__(self, rebalance_period, spread, fees):
self.rebalance_period = rebalance_period
self.spread = spread
self.fees = fees
def run_backtest(self, data, portfolio,strategy):
balance = portfolio.init_balance
for i in range(1,data.shape[0]):
### What happened to our portfolio during the timestep?
# Add returns to balance, if we had a non-empty portfolio allocation
if(i > 1):
# update current balance only when portfolio has allocations
# for the first days, there are no trend-signals == no allocation
if(abs(portfolio.positions[len(portfolio.positions)-1]).sum() != 0):
# add returns of each ticker for this timestep
# quantity of ticker * price_delta (quantity is neg for short pos)
balance += (portfolio.positions[len(portfolio.positions)-1]*(data.iloc[i-1,:]-data.iloc[i-2,:])).sum()
### How should we react to new prices?
# get new weights
allocation = strategy.generate_signals(data.iloc[0:i,:])
# calculate target allocation
t_alloc = allocation*balance
#tweights.append(allocation)
# calculate target quantity allocation
q_alloc = (t_alloc/data.iloc[i-1,:]).fillna(0)
# change division by zero (no prices available, etc.) to 0
q_alloc = q_alloc.replace(np.inf, 0)
# change quantity allocation of our portfolio
# i%7 == 0 every seven days! On all other days, portfolio allocation stays unchanged
if(i == 1): portfolio.positions.append(q_alloc)
if((i%self.rebalance_period == 0) & (i != 1)):
# Append new allocation to portfolio every x=rebalancing_period days
portfolio.positions.append(q_alloc)
# Subtract transaction fees and market spread
trades = portfolio.positions[len(portfolio.positions)-1]-portfolio.positions[len(portfolio.positions)-2]
portfolio.trading.append(trades)
balance -= (abs(portfolio.positions[len(portfolio.positions)-1])*data.iloc[i-1,:]*self.spread).sum()
#balance -= fees*trading.count()
# add current days new balance (calculated above) as soon as signals start to come in (i > lookback-period for trend signals)
if(i >= strategy.max_lookback):
portfolio.balance.append(balance)
return portfolio.balance
def collect_statistics(self, portfolio_balance):
portfolio_balance = | pd.DataFrame(portfolio_balance) | pandas.DataFrame |
"""
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import annotations
import operator
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Literal,
Union,
cast,
final,
)
from warnings import warn
import numpy as np
from pandas._libs import (
algos,
hashtable as htable,
iNaT,
lib,
)
from pandas._typing import (
AnyArrayLike,
ArrayLike,
DtypeObj,
Scalar,
TakeIndexer,
npt,
)
from pandas.util._decorators import doc
from pandas.core.dtypes.cast import (
construct_1d_object_array_from_listlike,
infer_dtype_from_array,
sanitize_to_nanoseconds,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_object,
ensure_platform_int,
is_array_like,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.generic import (
ABCDatetimeArray,
ABCExtensionArray,
ABCIndex,
ABCMultiIndex,
ABCRangeIndex,
ABCSeries,
ABCTimedeltaArray,
)
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
)
from pandas.core.array_algos.take import take_nd
from pandas.core.construction import (
array as pd_array,
ensure_wrapped_if_datetimelike,
extract_array,
)
from pandas.core.indexers import validate_indices
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
_shared_docs: dict[str, str] = {}
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values: ArrayLike) -> np.ndarray:
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : np.ndarray or ExtensionArray
Returns
-------
np.ndarray
"""
if not isinstance(values, ABCMultiIndex):
# extract_array would raise
values = extract_array(values, extract_numpy=True)
# we check some simple dtypes first
if is_object_dtype(values.dtype):
return ensure_object(np.asarray(values))
elif is_bool_dtype(values.dtype):
if isinstance(values, np.ndarray):
# i.e. actually dtype == np.dtype("bool")
return np.asarray(values).view("uint8")
else:
# i.e. all-bool Categorical, BooleanArray
try:
return np.asarray(values).astype("uint8", copy=False)
except TypeError:
# GH#42107 we have pd.NAs present
return np.asarray(values)
elif is_integer_dtype(values.dtype):
return np.asarray(values)
elif is_float_dtype(values.dtype):
# Note: checking `values.dtype == "float128"` raises on Windows and 32bit
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]"
# has no attribute "itemsize"
if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr]
# we dont (yet) have float128 hashtable support
return ensure_float64(values)
return np.asarray(values)
elif is_complex_dtype(values.dtype):
# Incompatible return value type (got "Tuple[Union[Any, ExtensionArray,
# ndarray[Any, Any]], Union[Any, ExtensionDtype]]", expected
# "Tuple[ndarray[Any, Any], Union[dtype[Any], ExtensionDtype]]")
return values # type: ignore[return-value]
# datetimelike
elif needs_i8_conversion(values.dtype):
if isinstance(values, np.ndarray):
values = sanitize_to_nanoseconds(values)
npvalues = values.view("i8")
npvalues = cast(np.ndarray, npvalues)
return npvalues
elif is_categorical_dtype(values.dtype):
values = cast("Categorical", values)
values = values.codes
return values
# we have failed, return object
values = np.asarray(values, dtype=object)
return ensure_object(values)
def _reconstruct_data(
values: ArrayLike, dtype: DtypeObj, original: AnyArrayLike
) -> ArrayLike:
"""
reverse of _ensure_data
Parameters
----------
values : np.ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
original : AnyArrayLike
Returns
-------
ExtensionArray or np.ndarray
"""
if isinstance(values, ABCExtensionArray) and values.dtype == dtype:
# Catch DatetimeArray/TimedeltaArray
return values
if not isinstance(dtype, np.dtype):
# i.e. ExtensionDtype
cls = dtype.construct_array_type()
if isinstance(values, cls) and values.dtype == dtype:
return values
values = cls._from_sequence(values)
elif is_bool_dtype(dtype):
values = values.astype(dtype, copy=False)
# we only support object dtypes bool Index
if isinstance(original, ABCIndex):
values = values.astype(object, copy=False)
elif dtype is not None:
if is_datetime64_dtype(dtype):
dtype = np.dtype("datetime64[ns]")
elif is_timedelta64_dtype(dtype):
dtype = np.dtype("timedelta64[ns]")
values = values.astype(dtype, copy=False)
return values
def _ensure_arraylike(values) -> ArrayLike:
"""
ensure that we are arraylike if not already
"""
if not is_array_like(values):
inferred = lib.infer_dtype(values, skipna=False)
if inferred in ["mixed", "string", "mixed-integer"]:
# "mixed-integer" to ensure we do not cast ["ss", 42] to str GH#22160
if isinstance(values, tuple):
values = list(values)
values = construct_1d_object_array_from_listlike(values)
else:
values = np.asarray(values)
return values
_hashtables = {
"complex128": htable.Complex128HashTable,
"complex64": htable.Complex64HashTable,
"float64": htable.Float64HashTable,
"float32": htable.Float32HashTable,
"uint64": htable.UInt64HashTable,
"uint32": htable.UInt32HashTable,
"uint16": htable.UInt16HashTable,
"uint8": htable.UInt8HashTable,
"int64": htable.Int64HashTable,
"int32": htable.Int32HashTable,
"int16": htable.Int16HashTable,
"int8": htable.Int8HashTable,
"string": htable.StringHashTable,
"object": htable.PyObjectHashTable,
}
def _get_hashtable_algo(values: np.ndarray):
"""
Parameters
----------
values : np.ndarray
Returns
-------
htable : HashTable subclass
values : ndarray
"""
values = _ensure_data(values)
ndtype = _check_object_for_strings(values)
htable = _hashtables[ndtype]
return htable, values
def _get_values_for_rank(values: ArrayLike) -> np.ndarray:
if is_categorical_dtype(values):
values = cast("Categorical", values)._values_for_rank()
values = _ensure_data(values)
if values.dtype.kind in ["i", "u", "f"]:
# rank_t includes only object, int64, uint64, float64
dtype = values.dtype.kind + "8"
values = values.astype(dtype, copy=False)
return values
def get_data_algo(values: ArrayLike):
values = _get_values_for_rank(values)
ndtype = _check_object_for_strings(values)
htable = _hashtables.get(ndtype, _hashtables["object"])
return htable, values
def _check_object_for_strings(values: np.ndarray) -> str:
"""
Check if we can use string hashtable instead of object hashtable.
Parameters
----------
values : ndarray
Returns
-------
str
"""
ndtype = values.dtype.name
if ndtype == "object":
# it's cheaper to use a String Hash Table than Object; we infer
# including nulls because that is the only difference between
# StringHashTable and ObjectHashtable
if lib.infer_dtype(values, skipna=False) in ["string"]:
ndtype = "string"
return ndtype
# --------------- #
# top-level algos #
# --------------- #
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique for long enough sequences.
Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
numpy.ndarray or ExtensionArray
The return can be:
* Index : when the input is an Index
* Categorical : when the input is a Categorical dtype
* ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
See Also
--------
Index.unique : Return unique values from an Index.
Series.unique : Return unique values of Series object.
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(
... pd.Series(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
>>> pd.unique(
... pd.Index(
... [
... pd.Timestamp("20160101", tz="US/Eastern"),
... pd.Timestamp("20160101", tz="US/Eastern"),
... ]
... )
... )
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
>>> pd.unique(list("baabc"))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
>>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc"))))
['b', 'a', 'c']
Categories (3, object): ['a', 'b', 'c']
An ordered Categorical preserves the category ordering.
>>> pd.unique(
... pd.Series(
... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
... )
... )
['b', 'a', 'c']
Categories (3, object): ['a' < 'b' < 'c']
An array of tuples
>>> pd.unique([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
"""
values = _ensure_arraylike(values)
if is_extension_array_dtype(values.dtype):
# Dispatch to extension dtype's unique.
return values.unique()
original = values
htable, values = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, original.dtype, original)
return uniques
unique1d = unique
def isin(comps: AnyArrayLike, values: AnyArrayLike) -> npt.NDArray[np.bool_]:
"""
Compute the isin boolean array.
Parameters
----------
comps : array-like
values : array-like
Returns
-------
ndarray[bool]
Same length as `comps`.
"""
if not is_list_like(comps):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(comps).__name__}]"
)
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(values).__name__}]"
)
if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)):
values = _ensure_arraylike(list(values))
elif isinstance(values, ABCMultiIndex):
# Avoid raising in extract_array
values = np.array(values)
else:
values = extract_array(values, extract_numpy=True, extract_range=True)
comps = _ensure_arraylike(comps)
comps = extract_array(comps, extract_numpy=True)
if not isinstance(comps, np.ndarray):
# i.e. Extension Array
return comps.isin(values)
elif needs_i8_conversion(comps.dtype):
# Dispatch to DatetimeLikeArrayMixin.isin
return pd_array(comps).isin(values)
elif needs_i8_conversion(values.dtype) and not is_object_dtype(comps.dtype):
# e.g. comps are integers and values are datetime64s
return np.zeros(comps.shape, dtype=bool)
# TODO: not quite right ... Sparse/Categorical
elif needs_i8_conversion(values.dtype):
return isin(comps, values.astype(object))
elif is_extension_array_dtype(values.dtype):
return isin(np.asarray(comps), np.asarray(values))
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
# Albeit hashmap has O(1) look-up (vs. O(logn) in sorted array),
# in1d is faster for small sizes
if len(comps) > 1_000_000 and len(values) <= 26 and not is_object_dtype(comps):
# If the values include nan we need to check for nan explicitly
# since np.nan it not equal to np.nan
if isna(values).any():
def f(c, v):
return np.logical_or(np.in1d(c, v), np.isnan(c))
else:
f = np.in1d
else:
# error: List item 0 has incompatible type "Union[Any, dtype[Any],
# ExtensionDtype]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
# Tuple[Any, Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any,
# Any]]"
# error: List item 1 has incompatible type "Union[Any, ExtensionDtype]";
# expected "Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]"
# error: List item 1 has incompatible type "Union[dtype[Any], ExtensionDtype]";
# expected "Union[dtype[Any], None, type, _SupportsDType, str, Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]"
common = np.find_common_type(
[values.dtype, comps.dtype], [] # type: ignore[list-item]
)
values = values.astype(common, copy=False)
comps = comps.astype(common, copy=False)
f = htable.ismember
return f(comps, values)
def factorize_array(
values: np.ndarray,
na_sentinel: int = -1,
size_hint: int | None = None,
na_value=None,
mask: np.ndarray | None = None,
) -> tuple[npt.NDArray[np.intp], np.ndarray]:
"""
Factorize a numpy array to codes and uniques.
This doesn't do any coercion of types or unboxing before factorization.
Parameters
----------
values : ndarray
na_sentinel : int, default -1
size_hint : int, optional
Passed through to the hashtable's 'get_labels' method
na_value : object, optional
A value in `values` to consider missing. Note: only use this
parameter when you know that you don't have any values pandas would
consider missing in the array (NaN for float data, iNaT for
datetimes, etc.).
mask : ndarray[bool], optional
If not None, the mask is used as indicator for missing values
(True = missing, False = valid) instead of `na_value` or
condition "val != val".
Returns
-------
codes : ndarray[np.intp]
uniques : ndarray
"""
hash_klass, values = get_data_algo(values)
table = hash_klass(size_hint or len(values))
uniques, codes = table.factorize(
values, na_sentinel=na_sentinel, na_value=na_value, mask=mask
)
codes = ensure_platform_int(codes)
return codes, uniques
@doc(
values=dedent(
"""\
values : sequence
A 1-D sequence. Sequences that aren't pandas objects are
coerced to ndarrays before factorization.
"""
),
sort=dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
size_hint=dedent(
"""\
size_hint : int, optional
Hint to the hashtable sizer.
"""
),
)
def factorize(
values,
sort: bool = False,
na_sentinel: int | None = -1,
size_hint: int | None = None,
) -> tuple[np.ndarray, np.ndarray | Index]:
"""
Encode the object as an enumerated type or categorical variable.
This method is useful for obtaining a numeric representation of an
array when all that matters is identifying distinct values. `factorize`
is available as both a top-level function :func:`pandas.factorize`,
and as a method :meth:`Series.factorize` and :meth:`Index.factorize`.
Parameters
----------
{values}{sort}
na_sentinel : int or None, default -1
Value to mark "not found". If None, will not drop the NaN
from the uniques of the values.
.. versionchanged:: 1.1.2
{size_hint}\
Returns
-------
codes : ndarray
An integer ndarray that's an indexer into `uniques`.
``uniques.take(codes)`` will have the same values as `values`.
uniques : ndarray, Index, or Categorical
The unique valid values. When `values` is Categorical, `uniques`
is a Categorical. When `values` is some other pandas object, an
`Index` is returned. Otherwise, a 1-D ndarray is returned.
.. note ::
Even if there's a missing value in `values`, `uniques` will
*not* contain an entry for it.
See Also
--------
cut : Discretize continuous-valued array.
unique : Find the unique value in an array.
Examples
--------
These examples all show factorize as a top-level method like
``pd.factorize(values)``. The results are identical for methods like
:meth:`Series.factorize`.
>>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'])
>>> codes
array([0, 0, 1, 2, 0]...)
>>> uniques
array(['b', 'a', 'c'], dtype=object)
With ``sort=True``, the `uniques` will be sorted, and `codes` will be
shuffled so that the relationship is the maintained.
>>> codes, uniques = pd.factorize(['b', 'b', 'a', 'c', 'b'], sort=True)
>>> codes
array([1, 1, 0, 2, 1]...)
>>> uniques
array(['a', 'b', 'c'], dtype=object)
Missing values are indicated in `codes` with `na_sentinel`
(``-1`` by default). Note that missing values are never
included in `uniques`.
>>> codes, uniques = pd.factorize(['b', None, 'a', 'c', 'b'])
>>> codes
array([ 0, -1, 1, 2, 0]...)
>>> uniques
array(['b', 'a', 'c'], dtype=object)
Thus far, we've only factorized lists (which are internally coerced to
NumPy arrays). When factorizing pandas objects, the type of `uniques`
will differ. For Categoricals, a `Categorical` is returned.
>>> cat = pd.Categorical(['a', 'a', 'c'], categories=['a', 'b', 'c'])
>>> codes, uniques = pd.factorize(cat)
>>> codes
array([0, 0, 1]...)
>>> uniques
['a', 'c']
Categories (3, object): ['a', 'b', 'c']
Notice that ``'b'`` is in ``uniques.categories``, despite not being
present in ``cat.values``.
For all other pandas objects, an Index of the appropriate type is
returned.
>>> cat = pd.Series(['a', 'a', 'c'])
>>> codes, uniques = pd.factorize(cat)
>>> codes
array([0, 0, 1]...)
>>> uniques
Index(['a', 'c'], dtype='object')
If NaN is in the values, and we want to include NaN in the uniques of the
values, it can be achieved by setting ``na_sentinel=None``.
>>> values = np.array([1, 2, 1, np.nan])
>>> codes, uniques = pd.factorize(values) # default: na_sentinel=-1
>>> codes
array([ 0, 1, 0, -1])
>>> uniques
array([1., 2.])
>>> codes, uniques = pd.factorize(values, na_sentinel=None)
>>> codes
array([0, 1, 0, 2])
>>> uniques
array([ 1., 2., nan])
"""
# Implementation notes: This method is responsible for 3 things
# 1.) coercing data to array-like (ndarray, Index, extension array)
# 2.) factorizing codes and uniques
# 3.) Maybe boxing the uniques in an Index
#
# Step 2 is dispatched to extension types (like Categorical). They are
# responsible only for factorization. All data coercion, sorting and boxing
# should happen here.
if isinstance(values, ABCRangeIndex):
return values.factorize(sort=sort)
values = _ensure_arraylike(values)
original = values
if not isinstance(values, ABCMultiIndex):
values = extract_array(values, extract_numpy=True)
# GH35667, if na_sentinel=None, we will not dropna NaNs from the uniques
# of values, assign na_sentinel=-1 to replace code value for NaN.
dropna = True
if na_sentinel is None:
na_sentinel = -1
dropna = False
if (
isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray))
and values.freq is not None
):
codes, uniques = values.factorize(sort=sort)
if isinstance(original, ABCIndex):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return codes, uniques
if not isinstance(values.dtype, np.dtype):
# i.e. ExtensionDtype
codes, uniques = values.factorize(na_sentinel=na_sentinel)
dtype = original.dtype
else:
dtype = values.dtype
values = _ensure_data(values)
na_value: Scalar
if original.dtype.kind in ["m", "M"]:
# Note: factorize_array will cast NaT bc it has a __int__
# method, but will not cast the more-correct dtype.type("nat")
na_value = iNaT
else:
na_value = None
codes, uniques = factorize_array(
values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value
)
if sort and len(uniques) > 0:
uniques, codes = safe_sort(
uniques, codes, na_sentinel=na_sentinel, assume_unique=True, verify=False
)
code_is_na = codes == na_sentinel
if not dropna and code_is_na.any():
# na_value is set based on the dtype of uniques, and compat set to False is
# because we do not want na_value to be 0 for integers
na_value = na_value_for_dtype(uniques.dtype, compat=False)
uniques = np.append(uniques, [na_value])
codes = np.where(code_is_na, len(uniques) - 1, codes)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndex):
if original.dtype.kind in ["m", "M"] and isinstance(uniques, np.ndarray):
original._data = cast(
"Union[DatetimeArray, TimedeltaArray]", original._data
)
uniques = type(original._data)._simple_new(uniques, dtype=original.dtype)
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return codes, uniques
def value_counts(
values,
sort: bool = True,
ascending: bool = False,
normalize: bool = False,
bins=None,
dropna: bool = True,
) -> Series:
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : bool, default True
Sort by values
ascending : bool, default False
Sort in ascending order
normalize: bool, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : bool, default True
Don't include counts of NaN
Returns
-------
Series
"""
from pandas.core.series import Series
name = getattr(values, "name", None)
if bins is not None:
from pandas.core.reshape.tile import cut
values = Series(values)
try:
ii = cut(values, bins, include_lowest=True)
except TypeError as err:
raise TypeError("bins argument only works with numeric data.") from err
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype("interval")
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result._values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_extension_array_dtype(values):
# handle Categorical and sparse,
result = Series(values)._values.value_counts(dropna=dropna)
result.name = name
counts = result._values
else:
keys, counts = value_counts_arraylike(values, dropna)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / counts.sum()
return result
# Called once from SparseArray, otherwise could be private
def value_counts_arraylike(values, dropna: bool):
"""
Parameters
----------
values : arraylike
dropna : bool
Returns
-------
uniques : np.ndarray or ExtensionArray
counts : np.ndarray
"""
values = _ensure_arraylike(values)
original = values
values = _ensure_data(values)
# TODO: handle uint8
keys, counts = htable.value_count(values, dropna)
if needs_i8_conversion(original.dtype):
# datetime, timedelta, or period
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
res_keys = _reconstruct_data(keys, original.dtype, original)
return res_keys, counts
def duplicated(
values: ArrayLike, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
"""
Return boolean ndarray denoting duplicate values.
Parameters
----------
values : nd.array, ExtensionArray or Series
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray[bool]
"""
values = _ensure_data(values)
return htable.duplicated(values, keep=keep)
def mode(values, dropna: bool = True) -> Series:
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
mode : Series
"""
from pandas import Series
from pandas.core.indexes.api import default_index
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
# TODO: should we be passing `name` below?
return Series(values._values.mode(dropna=dropna), name=values.name)
return values.mode(dropna=dropna)
if dropna and needs_i8_conversion(values.dtype):
mask = values.isnull()
values = values[~mask]
values = _ensure_data(values)
npresult = htable.mode(values, dropna=dropna)
try:
npresult = np.sort(npresult)
except TypeError as err:
warn(f"Unable to sort modes: {err}")
result = _reconstruct_data(npresult, original.dtype, original)
# Ensure index is type stable (should always use int index)
return Series(result, index=default_index(len(result)))
def rank(
values: ArrayLike,
axis: int = 0,
method: str = "average",
na_option: str = "keep",
ascending: bool = True,
pct: bool = False,
) -> np.ndarray:
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
is_datetimelike = needs_i8_conversion(values.dtype)
values = _get_values_for_rank(values)
if values.ndim == 1:
ranks = algos.rank_1d(
values,
is_datetimelike=is_datetimelike,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
elif values.ndim == 2:
ranks = algos.rank_2d(
values,
axis=axis,
is_datetimelike=is_datetimelike,
ties_method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(
arr: np.ndarray,
b,
arr_mask: npt.NDArray[np.bool_] | None = None,
b_mask: npt.NDArray[np.bool_] | None = None,
) -> np.ndarray:
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : np.ndarray[bool] or None, default None
array indicating which elements to exclude from checking
b_mask : np.ndarray[bool] or None, default None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = np.broadcast_to(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = np.broadcast_to(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
i8max = lib.i8max
i8min = iNaT
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((i8min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((i8max - b2 < arr) & not_nan).any()
else:
to_raise = ((i8max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or (
(i8min - b2[mask2] > arr[mask2]) & not_nan[mask2]
).any()
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
def quantile(x, q, interpolation_method="fraction"):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""
Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == "fraction":
score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1)
elif interpolation_method == "lower":
score = values[np.floor(idx)]
elif interpolation_method == "higher":
score = values[np.ceil(idx)]
else:
raise ValueError(
"interpolation_method can only be 'fraction' "
", 'lower' or 'higher'"
)
return score
if is_scalar(q):
return _get_score(q)
q = np.asarray(q, np.float64)
result = [_get_score(x) for x in q]
return np.array(result, dtype=np.float64)
# --------------- #
# select n #
# --------------- #
class SelectN:
def __init__(self, obj, n: int, keep: str):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ("first", "last", "all"):
raise ValueError('keep must be either "first", "last" or "all"')
def compute(self, method: str) -> DataFrame | Series:
raise NotImplementedError
@final
def nlargest(self):
return self.compute("nlargest")
@final
def nsmallest(self):
return self.compute("nsmallest")
@final
@staticmethod
def is_valid_dtype_n_method(dtype: DtypeObj) -> bool:
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return (
is_numeric_dtype(dtype) and not is_complex_dtype(dtype)
) or needs_i8_conversion(dtype)
class SelectNSeries(SelectN):
"""
Implement n largest/smallest for Series
Parameters
----------
obj : Series
n : int
keep : {'first', 'last'}, default 'first'
Returns
-------
nordered : Series
"""
def compute(self, method: str) -> Series:
from pandas.core.reshape.concat import concat
n = self.n
dtype = self.obj.dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError(f"Cannot use method '{method}' with dtype {dtype}")
if n <= 0:
return self.obj[[]]
dropped = self.obj.dropna()
nan_index = self.obj.drop(dropped.index)
if is_extension_array_dtype(dropped.dtype):
# GH#41816 bc we have dropped NAs above, MaskedArrays can use the
# numpy logic.
from pandas.core.arrays import BaseMaskedArray
arr = dropped._values
if isinstance(arr, BaseMaskedArray):
ser = type(dropped)(arr._data, index=dropped.index, name=dropped.name)
result = type(self)(ser, n=self.n, keep=self.keep).compute(method)
return result.astype(arr.dtype)
# slow method
if n >= len(self.obj):
ascending = method == "nsmallest"
return self.obj.sort_values(ascending=ascending).head(n)
# fast method
new_dtype = dropped.dtype
arr = _ensure_data(dropped.values)
if method == "nlargest":
arr = -arr
if is_integer_dtype(new_dtype):
# GH 21426: ensure reverse ordering at boundaries
arr -= 1
elif is_bool_dtype(new_dtype):
# GH 26154: ensure False is smaller than True
arr = 1 - (-arr)
if self.keep == "last":
arr = arr[::-1]
nbase = n
findex = len(self.obj)
narr = len(arr)
n = min(n, narr)
# arr passed into kth_smallest must be contiguous. We copy
# here because kth_smallest will modify its input
kth_val = algos.kth_smallest(arr.copy(order="C"), n - 1)
(ns,) = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind="mergesort")]
if self.keep != "all":
inds = inds[:n]
findex = nbase
if self.keep == "last":
# reverse indices
inds = narr - 1 - inds
return concat([dropped.iloc[inds], nan_index]).iloc[:findex]
class SelectNFrame(SelectN):
"""
Implement n largest/smallest for DataFrame
Parameters
----------
obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
columns : list or str
Returns
-------
nordered : DataFrame
"""
def __init__(self, obj, n: int, keep: str, columns):
super().__init__(obj, n, keep)
if not is_list_like(columns) or isinstance(columns, tuple):
columns = [columns]
columns = list(columns)
self.columns = columns
def compute(self, method: str) -> DataFrame:
from pandas.core.api import Int64Index
n = self.n
frame = self.obj
columns = self.columns
for column in columns:
dtype = frame[column].dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError(
f"Column {repr(column)} has dtype {dtype}, "
f"cannot use method {repr(method)} with this dtype"
)
def get_indexer(current_indexer, other_indexer):
"""
Helper function to concat `current_indexer` and `other_indexer`
depending on `method`
"""
if method == "nsmallest":
return current_indexer.append(other_indexer)
else:
return other_indexer.append(current_indexer)
# Below we save and reset the index in case index contains duplicates
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
indexer = Int64Index([])
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
# If it's the last column or if we have the number of
# results desired we are done.
# Otherwise there are duplicates of the largest/smallest
# value and we need to look at the rest of the columns
# to determine which of the rows with the largest/smallest
# value in the column to keep.
series = cur_frame[column]
is_last_column = len(columns) - 1 == i
values = getattr(series, method)(
cur_n, keep=self.keep if is_last_column else "all"
)
if is_last_column or len(values) <= cur_n:
indexer = get_indexer(indexer, values.index)
break
# Now find all values which are equal to
# the (nsmallest: largest)/(nlargest: smallest)
# from our series.
border_value = values == values[values.index[-1]]
# Some of these values are among the top-n
# some aren't.
unsafe_values = values[border_value]
# These values are definitely among the top-n
safe_values = values[~border_value]
indexer = get_indexer(indexer, safe_values.index)
# Go on and separate the unsafe_values on the remaining
# columns.
cur_frame = cur_frame.loc[unsafe_values.index]
cur_n = n - len(indexer)
frame = frame.take(indexer)
# Restore the index on frame
frame.index = original_index.take(indexer)
# If there is only one column, the frame is already sorted.
if len(columns) == 1:
return frame
ascending = method == "nsmallest"
return frame.sort_values(columns, ascending=ascending, kind="mergesort")
# ---- #
# take #
# ---- #
def take(
arr,
indices: TakeIndexer,
axis: int = 0,
allow_fill: bool = False,
fill_value=None,
):
"""
Take elements from an array.
Parameters
----------
arr : array-like or scalar value
Non array-likes (sequences/scalars without a dtype) are coerced
to an ndarray.
indices : sequence of int or one-dimensional np.ndarray of int
Indices to be taken.
axis : int, default 0
The axis over which to select values.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to :func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type (``self.dtype.na_value``) is used.
For multi-dimensional `arr`, each *element* is filled with
`fill_value`.
Returns
-------
ndarray or ExtensionArray
Same type as the input.
Raises
------
IndexError
When `indices` is out of bounds for the array.
ValueError
When the indexer contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
When `allow_fill` is False, `indices` may be whatever dimensionality
is accepted by NumPy for `arr`.
When `allow_fill` is True, `indices` should be 1-D.
See Also
--------
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> from pandas.api.extensions import take
With the default ``allow_fill=False``, negative numbers indicate
positional indices from the right.
>>> take(np.array([10, 20, 30]), [0, 0, -1])
array([10, 10, 30])
Setting ``allow_fill=True`` will place `fill_value` in those positions.
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
array([10., 10., nan])
>>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
... fill_value=-10)
array([ 10, 10, -10])
"""
if not | is_array_like(arr) | pandas.core.dtypes.common.is_array_like |
import glob
import os
import hashlib
import gc
import numpy as np
import pandas as pd
from PIL import Image
import skimage.color as skcolor
from skimage.transform import resize
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from collections import OrderedDict
from scipy.stats.mstats import gmean
from miso.data.download import download_images
from numpy.lib.format import open_memmap
import lxml.etree as ET
from pathlib import Path
class DataSource:
def __init__(self):
self.num_classes = 0
self.cls_labels = []
self.cls_counts = []
self.source_name = ""
self.data_df = None
self.train_df = None
self.test_df = None
self.random_idx = None
self.random_idx_init = None
self.images = None
self.cls = None
self.onehots = None
self.vectors = None
self.train_images = None
self.train_cls = None
self.train_onehots = None
self.train_vectors = None
self.test_images = None
self.test_cls = None
self.test_onehots = None
self.test_vectors = None
self.use_mmap = False
self.images_mmap_filename = None
self.mmap_directory = None
def get_class_weights(self):
count = np.bincount(self.data_df['cls']).astype(np.float64)
weights = gmean(count) / count
weights[weights < 0.1] = 0.1
weights[weights > 10] = 10
return weights
def get_short_filenames(self):
return pd.concat((self.train_df.short_filenames, self.test_df.short_filenames))
@staticmethod
def preprocess_image(im, prepro_method='rescale', prepro_params=(255, 0, 1)):
# TODO divide according to image depth (8 or 16 bits etc)
if prepro_method == 'rescale':
im = np.divide(im, prepro_params[0])
im = np.subtract(im, prepro_params[1])
im = np.multiply(im, prepro_params[2])
return im
@staticmethod
def load_image(filename, img_size, img_type):
if img_type == 'rgb':
im = Image.open(filename)
im = np.asarray(im, dtype=np.float)
if im.ndim == 2:
im = np.expand_dims(im, -1)
im = np.repeat(im, repeats=3, axis=-1)
elif img_type == 'greyscale':
im = Image.open(filename).convert('L')
im = np.asarray(im, dtype=np.float)
im = np.expand_dims(im, -1)
elif img_type == 'greyscale3':
im = Image.open(filename).convert('L')
im = np.asarray(im, dtype=np.float)
im = np.expand_dims(im, -1)
im = np.repeat(im, repeats=3, axis=-1)
elif img_type == 'greyscaled':
ims = DataSource.read_tiff(filename, [0, 2])
g = skcolor.rgb2grey(ims[0]) * 255 # Scales to 0 - 1 for some reason
if ims[1].ndim == 3:
d = skcolor.rgb2grey(ims[1])
else:
d = ims[1].astype(float)
im = np.concatenate((g[:, :, np.newaxis], d[:, :, np.newaxis]), 2)
elif img_type == 'greyscaledm':
ims = DataSource.read_tiff(filename, [0, 2, 4])
g = skcolor.rgb2grey(ims[0]) * 255 # Scales to 0 - 1 for some reason
if ims[1].ndim == 3:
d = skcolor.rgb2grey(ims[1])
else:
d = ims[1].astype(float)
if ims[2].ndim == 3:
m = skcolor.rgb2grey(ims[2])
else:
m = ims[2].astype(float)
im = np.concatenate((g[:, :, np.newaxis], d[:, :, np.newaxis], m[:, :, np.newaxis]), 2)
elif img_type == 'rgbd':
ims = DataSource.read_tiff(filename, [0, 2])
rgb = ims[0]
# print("rgbd {}".format(filename))
# print(rgb.shape)
# print(ims[1].shape)
if rgb.ndim == 2:
rgb = np.expand_dims(rgb, -1)
rgb = np.repeat(rgb, repeats=3, axis=-1)
d = skcolor.rgb2grey(ims[1])
im = np.concatenate((rgb, d[:,:, np.newaxis]), 2)
# print(im.shape)
im = DataSource.make_image_square(im)
# print(im.shape)
im = resize(im, (img_size[0], img_size[1]), order=1)
# print(im.shape)
return im
@staticmethod
def read_tiff(filename, indices):
img = Image.open(filename)
images = []
# print("num frames: {}".format(img.n_frames))
for i in range(img.n_frames):
img.seek(i)
# print("- frame {} shape {}".format(i, np.array(img).shape))
# print(img)
for i, idx in enumerate(indices):
img.seek(idx)
if len(np.array(img).shape) == 0:
#print("Bad")
img.mode = 'L'
images.append(np.array(img))
return images
def load_dataset(self,
img_size,
img_type='rgb',
dtype=np.float16):
# Image filenames
filenames = self.data_df['filenames']
image_count = len(filenames)
# Color mode:
# - rgb: normal RGB
# - greyscale: convert to greyscale (single channel) if necessary
# - greyscale3: convert to greyscale then repeat across 3 channels
# (for inputting greyscale images into networks that take three channels)
if img_type == 'rgb' or img_type == 'greyscale3':
channels = 3
elif img_type == 'greyscale':
channels = 1
elif img_type == 'rgbd':
channels = 4
elif img_type == 'greyscaled':
channels = 2
elif img_type == 'greyscaledm':
channels = 3
else:
raise ValueError("Unknown image type")
# float16 is used be default to save memory
if dtype is np.float16:
byte_count = 2
elif dtype is np.float32:
byte_count = 4
elif dtype is np.float64:
byte_count = 8
else:
byte_count = 'X'
# Sometimes the data set is too big to be saved into memory.
# In this case, we can memory map the numpy array onto disk.
# Make sure to delete the files afterwards
if self.use_mmap:
# Unique hash id is used for the filename
hashstr = hashlib.sha256(pd.util.hash_pandas_object(self.data_df, index=True).values).hexdigest()[0:16]
unique_id = "{}_{}_{}_{}_{}.npy".format(hashstr, img_size[0], img_size[1], img_type, byte_count)
self.images_mmap_filename = os.path.join(self.mmap_directory, unique_id)
print(self.images_mmap_filename)
# If the memmap file already exists, simply load it
if os.path.exists(self.images_mmap_filename):
self.images = open_memmap(self.images_mmap_filename, dtype=dtype, mode='r+', shape=(image_count, img_size[0], img_size[1], channels))
return
self.images = open_memmap(self.images_mmap_filename, dtype=dtype, mode='w+', shape=(image_count, img_size[0], img_size[1], channels))
else:
self.images = np.zeros(shape=(image_count, img_size[0], img_size[1], channels), dtype=dtype)
# Load each image
idx = 0
print("@ Loading images... ")
for filename in filenames:
try:
im = self.load_image(filename, img_size, img_type)
im = self.preprocess_image(im)
# Convert to format
im = im.astype(dtype)
if im.ndim == 2:
im = im[:, :, np.newaxis]
self.images[idx] = im
except:
print("@ Error loading image {}".format(filename))
idx += 1
if idx % 100 == 0:
print("\r@ Loading images {}%".format((int)(idx / len(filenames) * 100)))
if self.use_mmap:
self.images.flush()
def delete_memmap_files(self, del_split=True, del_source=True):
if self.use_mmap is False:
return
if self.mmap_directory is None:
return
if del_split:
train_filename = os.path.join(self.mmap_directory, "train.npy")
test_filename = os.path.join(self.mmap_directory, "test.npy")
if os.path.exists(train_filename):
if self.train_images is not None:
self.train_images._mmap.close()
del self.train_images
gc.collect()
os.remove(train_filename)
if os.path.exists(test_filename):
if self.test_images is not None:
self.test_images._mmap.close()
del self.test_images
gc.collect()
os.remove(test_filename)
if del_source:
if os.path.exists(self.images_mmap_filename):
if self.images is not None:
self.images._mmap.close()
del self.images
gc.collect()
os.remove(self.images_mmap_filename)
def split(self, split=0.20, seed=None):
dtype=self.images.dtype
if split > 0.0:
# Split with stratify
train_idx, test_idx = train_test_split(range(len(self.images)), test_size=split, random_state=seed, shuffle=True, stratify=self.cls)
self.random_idx = train_idx + test_idx
else:
train_idx = np.random.permutation(range(len(self.images)))
test_idx = []
self.random_idx = train_idx
print("@ Split mapping...")
img_size = self.images.shape[1:]
# Memmap splitting
if self.use_mmap:
print("@ Split mapping - deleting old memmap files")
train_filename = os.path.join(self.mmap_directory, "train.npy")
test_filename = os.path.join(self.mmap_directory, "test.npy")
self.delete_memmap_files(del_split=True, del_source=False)
print("@ Split mapping - creating new memmap files")
self.train_images = open_memmap(train_filename, dtype=dtype, mode='w+', shape=(len(train_idx), ) + img_size)
self.test_images = open_memmap(test_filename, dtype=dtype, mode='w+', shape=(len(test_idx), ) + img_size)
print("@ Split mapping - copying train images")
for i in range(len(train_idx)):
self.train_images[i] = self.images[train_idx[i]]
print("@ Split mapping - copying test images")
for i in range(len(test_idx)):
self.test_images[i] = self.images[test_idx[i]]
# Normal splitting
else:
self.train_images = self.images[train_idx]
self.test_images = self.images[test_idx]
# Remainder
self.train_cls = self.cls[train_idx]
self.test_cls = self.cls[test_idx]
self.train_onehots = self.onehots[train_idx]
self.test_onehots = self.onehots[test_idx]
self.train_df = self.data_df.iloc[train_idx,:]
self.test_df = self.data_df.iloc[test_idx,:]
print("@ Split mapping - done")
def set_source(self,
source,
min_count,
max_count=None,
min_count_to_others=False,
extension=None,
mapping: dict = None,
map_others=True,
must_contain: str = None,
ignore_list: list = None,
mmap_directory = None):
"""
Loads images from from a directory where each sub-directories contains images for a single class, e.g.:
directory
|-- class 1 directory
|-- class 2 directory
|-- class 3 directory
`-- ...
The cls for the class are taken as the sub-directory names
:param source: Path to the directory containing sub-directories of classes
:param extension: Extension of the images in directory (e.g. "jpg"). If `None`, it looks for jpg, png and tiff
:param min_count: Minimum number of images in a sub-directory for that class to be included
:param max_count: Maximum number of images in a sub-directory to be used (If `None` all images are used)
:param mapping: Dictionary mapping classes to final classes. E.g. {"cat": "animal", "dog",:"animal"} maps "cat" and "dog" both to animal.
If mapping is `None`, the original classes are used. If mapping is not `None` then only the classes in the map are used.
:param map_others: If `True` then classes not in the mapping will be mapped to an "Others" class
:param must_contain: The image filenames must contain this string
:param ignore_list: List of classes that will be ignored, and their images not loaded
:return:
"""
self.mmap_directory = mmap_directory
if source.startswith("http"):
print("@ Downloading dataset " + source + "...")
dir_for_download = os.path.join(os.getcwd(), 'datasets')
os.makedirs(dir_for_download, exist_ok=True)
dir_path = download_images(source, dir_for_download)
self.source_name = dir_path
if mmap_directory is None:
self.mmap_directory = dir_for_download
else:
self.source_name = source
if mmap_directory is None:
self.mmap_directory = str(Path(self.source_name).parent)
if self.source_name.endswith("xml"):
print("@ Parsing project file " + self.source_name)
filenames = self.parse_xml(self.source_name)
if mmap_directory is None:
self.mmap_directory = str(Path(self.source_name).parent)
else:
print("@ Parsing image directory...")
# Get alphabetically sorted list of class directories
class_dirs = sorted(glob.glob(os.path.join(self.source_name, "*")))
# Load images from each class and place into a dictionary
filenames = OrderedDict()
for class_dir in class_dirs:
if os.path.isdir(class_dir) is False:
continue
# Get the class name
class_name = os.path.basename(class_dir)
# Skip directories starting with ~
if class_name.startswith('~'):
continue
# Get the files
files = []
if extension is None:
for ext in ["*.jpg", "*.jpeg", "*.png", "*.tif", "*.tiff", "*.bmp"]:
if must_contain is not None:
files.extend(sorted(glob.glob(os.path.join(class_dir, "*" + must_contain + ext))))
else:
files.extend(sorted(glob.glob(os.path.join(class_dir, ext))))
else:
if must_contain is not None:
files = sorted(glob.glob(os.path.join(class_dir, "*" + must_contain + "*." + extension)))
else:
files = sorted(glob.glob(os.path.join(class_dir, "*." + extension)))
# Add to dictionary
filenames[class_name] = files
# Map the classes into overall classes if mapping is enabled
if mapping is not None:
print("@ Applying mapping...")
mapped_filenames = OrderedDict()
# Sort the map
sorted_map = OrderedDict()
for key in sorted(mapping.keys()):
sorted_map[key] = mapping[key]
# Keep a record of which classes have already been mapped
already_mapped_cls = list()
# Iterate through the map
for cls, sub_cls_list in sorted_map.items():
print(" - {} <= ".format(cls), end='')
# Create list entry for this class
mapped_filenames[cls] = list()
# Add all the component classes
for sub_cls in sub_cls_list:
if sub_cls in filenames and sub_cls not in already_mapped_cls:
print("{} ".format(sub_cls), end='')
mapped_filenames[cls].extend(filenames[sub_cls])
already_mapped_cls.append(sub_cls)
# Number of classes
print("({} images)".format(len(mapped_filenames[cls])))
# Add others
if map_others is True:
mapped_filenames['other'] = list()
# Iterate though the filenames dictionary and add any classes not already mapped to others
for cls in filenames.keys():
if cls not in already_mapped_cls:
mapped_filenames['other'].extend(filenames[cls])
# Save the mapped filenames as the current filenames dictionary
filenames = mapped_filenames
# Remove any classes that do not have enough images and put in 'other'
print("@ Moving classes with not enough images to 'other'...")
not_enough_list = list()
enough_dict = OrderedDict()
for cls, cls_filenames in filenames.items():
print(" - ({:5d} images) {}".format(len(cls_filenames), cls), end='')
if len(cls_filenames) < min_count:
not_enough_list.extend(cls_filenames)
print(" => other".format(len(cls_filenames)))
else:
enough_dict[cls] = cls_filenames
print()
# Put the others in the list if there is also enough for them
if min_count_to_others is True:
if len(not_enough_list) > min_count:
if 'other' in enough_dict:
enough_dict['other'].extend(not_enough_list)
else:
enough_dict['other'] = not_enough_list
print(" - {} images in other".format(len(not_enough_list)))
else:
print(" - other not included ({} images)".format(len(not_enough_list)))
filenames = enough_dict
# print(enough_dict.keys())
# Finally, create a list for each (make sure 'other' is last class
cls_index = []
cls_labels = []
cls_counts = []
long_filenames = []
short_filenames = []
self.cls_labels = []
if 'other' in filenames:
other_index = len(filenames) - 1
else:
other_index = len(filenames)
index = 0
for cls, cls_filenames in filenames.items():
for filename in cls_filenames:
if cls != 'other':
cls_index.append(index)
else:
cls_index.append(other_index)
cls_labels.append(cls)
long_filenames.append(filename)
short_filenames.append(os.path.basename(filename))
if cls != 'other':
self.cls_labels.append(cls)
index += 1
if 'other' in filenames:
self.cls_labels.append('other')
df = {"filenames": long_filenames, "short_filenames": short_filenames, "cls": cls_labels, "cls": cls_index}
self.data_df = pd.DataFrame(df)
self.num_classes = len(self.cls_labels)
print("@ {} images in {} classes".format(len(cls_index), self.num_classes))
for idx in range(self.num_classes):
cls_counts.append(len(self.data_df[self.data_df['cls'] == idx]))
# print(cls_counts)
self.cls_counts = cls_counts
self.cls = self.data_df['cls'].to_numpy()
self.onehots = to_categorical(self.data_df['cls'])
# print(self.data_df)
@staticmethod
def make_image_square(im):
if im.shape[0] == im.shape[1]:
return im
height = im.shape[0]
width = im.shape[1]
half = max(height, width)
height_pad_start = int(abs(np.floor((height - half) / 2)))
height_pad_end = int(abs(np.ceil((height - half) / 2)))
width_pad_start = int(abs(np.floor((width - half) / 2)))
width_pad_end = int(abs(np.ceil((width - half) / 2)))
consts = [np.median(np.concatenate((im[0, :, i], im[-1, :, i], im[:, 0, i], im[:, -1, i]))) for i in
range(im.shape[2])]
im = np.stack(
[np.pad(im[:, :, c],
((height_pad_start, height_pad_end), (width_pad_start, width_pad_end)),
mode='constant',
constant_values=consts[c])
for c in range(im.shape[2])], axis=2)
return im
@staticmethod
def parse_xml(xml_filename):
project = ET.parse(xml_filename).getroot()
filenames = []
cls = []
cls_labels = []
filenames_dict = OrderedDict()
images_xml = project.find('images')
for i, image_xml in enumerate(images_xml.iter('image')):
relfile = image_xml.find('source').find('filename').text
if os.path.isabs(relfile):
absfile = relfile
else:
absfile = os.path.abspath(os.path.join(os.path.dirname(xml_filename), relfile))
if os.path.isfile(absfile) is False:
continue
filenames.append(absfile)
cls_names = []
cls_scores = []
cls_base = image_xml.find('classifications')
for cls_val in cls_base.iter('classification'):
cls_names.append(cls_val.find('code').text)
cls_scores.append(float(cls_val.find('value').text))
cls.append(cls_names[np.argmax(cls_scores)])
for taxon_xml in project.find('taxons').iter('taxon'):
if taxon_xml.find('isClass').text == 'true':
cls_labels.append(taxon_xml.find('code').text)
cls_labels = sorted(cls_labels)
df = | pd.DataFrame({'filenames': filenames, 'cls': cls}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
'''
Module with preprocessing methods to prepare a data from utterances
to an excel table with features for classification and labels
'''
__author__ = "<NAME>"
'''import os
import glob
import re
import numpy as np
import nltk
from collections import Counter
from nltk.corpus import stopwords'''
import pandas as pd
import numpy as np
#
def get_topic_distribution_utterance(utterance,ldamodel,dictionary):
bow = dictionary.doc2bow(utterance)
T = ldamodel.get_document_topics(bow,minimum_probability=0,minimum_phi_value=0.001)
return [x[1] for x in T]
def get_data_next_utterances(dict_utterances,groups_to_filter,lda_model,dictionary):
#X = get_topic_distribution_by_list(doc_clean)
phases = []
ut_order = []
n_words = []
X = []
utterances = []
for g in dict_utterances:
if g in groups_to_filter:
n_utterances = len(dict_utterances[g]['clean_utterances'])
for i,phrase in enumerate(dict_utterances[g]['clean_utterances']):
if i == 0:
before_phrase = phrase
else:
before_phrase = dict_utterances[g]['clean_utterances'][i-1]
if i+1 == n_utterances:
after_phrase = phrase
else:
after_phrase = dict_utterances[g]['clean_utterances'][i+1]
T_before = get_topic_distribution_utterance(before_phrase,lda_model,dictionary)
T_phrase = get_topic_distribution_utterance(phrase,lda_model,dictionary)
T_after = get_topic_distribution_utterance(after_phrase,lda_model,dictionary)
X.append(T_before+T_phrase+T_after)
phases.append(dict_utterances[g]['phases'][i])
ut_order.append(dict_utterances[g]['ut_order'][i])
n_words.append(len(dict_utterances[g]['clean_utterances'][i]))
utterance = dict_utterances[g]['clean_utterances'][i]
utterances.append(" ".join(utterance))
return X,phases,utterances,n_words,ut_order
def get_data(dict_utterances,groups_to_filter,lda_model,dictionary):
phases = []
ut_order = []
n_words = []
X = []
utterances = []
for g in dict_utterances:
if g in groups_to_filter:
for i,v in enumerate(dict_utterances[g]['phases']):
phases.append(v)
ut_order.append(dict_utterances[g]['ut_order'][i])
n_words.append(len(dict_utterances[g]['clean_utterances'][i]))
utterance = dict_utterances[g]['clean_utterances'][i]
utterances.append(" ".join(utterance))
X.append(get_topic_distribution_utterance(utterance,lda_model,dictionary))
return X,phases,utterances,n_words,ut_order
def get_data_window(dict_utterances,groups_to_filter,lda_model,dictionary,size_window):
phases = []
ut_order = []
n_words = []
X = []
utterances = []
for g in dict_utterances:
if g in groups_to_filter:
utterances_window = []
for i,v in enumerate(dict_utterances[g]['phases']):
phases.append(v)
ut_order.append(dict_utterances[g]['ut_order'][i])
n_words.append(len(dict_utterances[g]['clean_utterances'][i]))
utterance = dict_utterances[g]['clean_utterances'][i]
if len(utterances_window)>=size_window:
utterances_window.pop()
utterances_window = utterance + utterances_window
utterances.append(" ".join(utterances_window))
X.append(get_topic_distribution_utterance(utterance,lda_model,dictionary))
return X,phases,utterances,n_words,ut_order
def build_simple_df(dict_utterances,groups_to_filter,lda_model,dictionary):
labels = list(map(lambda x:'Topic {}'.format(x+1),range(lda_model.num_topics)))
X,phases,utterance,n_words,ut_order = get_data(
dict_utterances,groups_to_filter,lda_model,dictionary)
df = pd.DataFrame(X,columns=labels)
df['phase'] = phases
df['phase_1'] = list(map(lambda x: 1 if x==1 else 0,phases))
df['phase_2'] = list(map(lambda x: 1 if x==2 else 0,phases))
df['phase_3'] = list(map(lambda x: 1 if x==3 else 0,phases))
df['phase_4'] = list(map(lambda x: 1 if x==4 else 0,phases))
df['phase_5'] = list(map(lambda x: 1 if x==5 else 0,phases))
df['utterance'] = utterance
df['length utterance'] = normalize_values(n_words)
df['utterance_relative_time'] = ut_order
return df
def build_simplest_df(dict_utterances,groups_to_filter,lda_model,dictionary):
labels = list(map(lambda x:'Topic {}'.format(x+1),range(lda_model.num_topics)))
X,phases,utterance,n_words,ut_order = get_data(
dict_utterances,groups_to_filter,lda_model,dictionary)
df = | pd.DataFrame(X,columns=labels) | pandas.DataFrame |
import pandas as pd
def get_rolling_mean(df: pd.DataFrame, column_name: str, window: int):
return df[column_name].rolling(window=window).mean()
def get_move_value(df: pd.DataFrame, column_name: str, down: int, up: int, equal: int):
df2 = | pd.DataFrame(index=df.index) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Defines the ForecastModel class, which encapsulates model functions used in
forecast model fitting, as well as their number of parameters and
initialisation parameters.
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python style guide
# http://sphinx.pocoo.org/rest.html - Use Restructured Text for
# docstrings
# -- Public Imports
import itertools
import logging
import numpy as np
import pandas as pd
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, \
MO, nearest_workday, next_monday, next_monday_or_tuesday, \
GoodFriday, EasterMonday, USFederalHolidayCalendar
from pandas.tseries.offsets import DateOffset
from datetime import datetime
# -- Private Imports
from anticipy import model_utils
# -- Globals
logger = logging.getLogger(__name__)
# Fourier model configuration
_dict_fourier_config = { # Default configuration for fourier-based models
'period': 365.25, # days in year
'harmonics': 10 # TODO: evaluate different harmonics values
}
_FOURIER_PERIOD = 365.25
_FOURIER_HARMONICS = 10 # TODO: evaluate different harmonics values
_FOURIER_K = (2.0 * np.pi / _FOURIER_PERIOD)
_FOURIER_I = np.arange(1, _FOURIER_HARMONICS + 1)
_FOURIER_DATE_ORIGIN = datetime(1970, 1, 1)
# -- Functions
# ---- Utility functions
def logger_info(msg, data):
# Convenience function for easier log typing
logger.info(msg + '\n%s', data)
def _get_f_init_params_default(n_params):
# Generate a default function for initialising model parameters: use
# random values between 0 and 1
return lambda a_x=None, a_y=None, a_date=None, is_mult=False:\
np.random.uniform(low=0.001, high=1, size=n_params)
def _get_f_bounds_default(n_params):
# Generate a default function for model parameter boundaries. Default
# boundaries are (-inf, inf)
return lambda a_x=None, a_y=None, a_date=None: (
n_params * [-np.inf], n_params * [np.inf])
def _get_f_add_2_f_models(forecast_model1, forecast_model2):
# Add model functions of 2 ForecastModels
def f_add_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=False,
**kwargs) +
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=False,
**kwargs))
return f_add_2_f_models
def _get_f_mult_2_f_models(forecast_model1, forecast_model2):
# Multiply model functions of 2 ForecastModels
def f_mult_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=True,
**kwargs) *
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=True,
**kwargs))
return f_mult_2_f_models
def _get_f_add_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# addition
def f_add_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=False),
f_init_params2(a_x, a_y, a_date, is_mult=False)])
return f_add_2_f_init_params
def _get_f_mult_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# multiplication
def f_mult_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=True),
f_init_params2(a_x, a_y, a_date, is_mult=True)])
return f_mult_2_f_init_params
def _get_f_concat_2_bounds(forecast_model1, forecast_model2):
# Compose parameter boundary functions of 2 ForecastModels
def f_add_2_f_bounds(a_x, a_y, a_date=None):
return np.concatenate(
(forecast_model1.f_bounds(
a_x, a_y, a_date), forecast_model2.f_bounds(
a_x, a_y, a_date)), axis=1)
return f_add_2_f_bounds
def _f_validate_input_default(a_x, a_y, a_date):
# Default input validation function for a ForecastModel. Always returns
# True
return True
def _as_list(l):
return l if isinstance(l, (list,)) else [l]
# Functions used to initialize cache variables in a ForecastModel
def _f_init_cache_a_month(a_x, a_date):
return a_date.month - 1
def _f_init_cache_a_weekday(a_x, a_date):
return a_date.weekday
def _f_init_cache_a_t_fourier(a_x, a_date):
# convert to days since epoch
t = (a_date - _FOURIER_DATE_ORIGIN).days.values
i = np.arange(1, _FOURIER_HARMONICS + 1)
a_tmp = _FOURIER_K * i.reshape(i.size, 1) * t
y = np.concatenate([np.sin(a_tmp), np.cos(a_tmp)])
return y
# Dictionary to store functions used to initialize cache variables
# in a ForecastModel
# This is shared across all ForecastModel instances
_dict_f_cache = dict(
a_month=_f_init_cache_a_month,
a_weekday=_f_init_cache_a_weekday,
a_t_fourier=_f_init_cache_a_t_fourier
)
# -- Classes
class ForecastModel:
"""
Class that encapsulates model functions for use in forecasting, as well as
their number of parameters and functions for parameter initialisation.
A ForecastModel instance is initialized with a model name, a number of
model parameters, and a model function. Class instances are
callable - when called as a function, their internal model function is
used. The main purpose of ForecastModel objects is to generate predicted
values for a time series, given a set of parameters. These values can be
compared to the original series to get an array of residuals::
y_predicted = model(a_x, a_date, params)
residuals = (a_y - y_predicted)
This is used in an optimization loop to obtain the optimal parameters for
the model.
The reason for using this class instead of raw model functions is that
ForecastModel supports function composition::
model_sum = fcast_model1 + fcast_model2
# fcast_model 1 and 2 are ForecastModel instances, and so is model_sum
a_y1 = fcast_model1(
a_x, a_date, params1) + fcast_model2(a_x, a_date, params2)
params = np.concatenate([params1, params2])
a_y2 = model_sum(a_x, a_date, params)
a_y1 == a_y2 # True
Forecast models can be added or multiplied, with the + and * operators.
Multiple levels of composition are supported::
model = (model1 + model2) * model3
Model composition is used to aggregate trend and seasonality model
components, among other uses.
Model functions have the following signature:
- f(a_x, a_date, params, is_mult)
- a_x : array of floats
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- params: array of floats - model parameters - the optimisation loop
updates this to fit our actual values. Each
model function uses a fixed number of parameters.
- is_mult: boolean. True if the model is being used with multiplicative
composition. Required because
some model functions (e.g. steps) have different behaviour
when added to other models than when multiplying them.
- returns an array of floats - with same length as a_x - output of the
model defined by this object's modelling function f_model and the
current set of parameters
By default, model parameters are initialized as random values between
0 and 1. It is possible to define a parameter initialization function
that picks initial values based on the original time series.
This is passed during ForecastModel creation with the argument
f_init_params. Parameter initialization is compatible with model
composition: the initialization function of each component will be used
for that component's parameters.
Parameter initialisation functions have the following signature:
- f_init_params(a_x, a_y, is_mult)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- returns an array of floats - with length equal to this object's n_params
value
By default, model parameters have no boundaries. However, it is possible
to define a boundary function for a model, that sets boundaries for each
model parameter, based on the input time series. This is passed during
ForecastModel creation with the argument f_bounds.
Boundary definition is compatible with model composition:
the boundary function of each component will be used for that component's
parameters.
Boundary functions have the following signature:
- f_bounds(a_x, a_y, a_date)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- returns a tuple of 2 arrays of floats. The first defines minimum
parameter boundaries, and the second the maximum parameter boundaries.
As an option, we can assign a list of input validation functions to a
model. These functions analyse the inputs that will be used for fitting a
model, returning True if valid, and False otherwise. The forecast logic
will skip a model from fitting if any of the validation functions for that
model returns False.
Input validation functions have the following signature:
- f_validate_input(a_x, a_y, a_date)
- See the description of model functions above for more details on these
parameters.
Our input time series should meet the following constraints:
- Minimum required samples depends on number of model parameters
- May include null values
- May include multiple values per sample
- A date array is only required if the model is date-aware
Class Usage::
model_x = ForecastModel(name, n_params, f_model, f_init_params,
l_f_validate_input)
# Get model name
model_name = model_x.name
# Get number of model parameters
n_params = model_x.n_params
# Get parameter initialisation function
f_init_params = model_x.f_init_params
# Get initial parameters
init_params = f_init_params(t_values, y_values)
# Get model fitting function
f_model = model_x.f_model
# Get model output
y = f_model(a_x, a_date, parameters)
The following pre-generated models are available. They are available as attributes from this module: # noqa
.. csv-table:: Forecast models
:header: "name", "params", "formula","notes"
:widths: 20, 10, 20, 40
"model_null",0, "y=0", "Does nothing.
Used to disable components (e.g. seasonality)"
"model_constant",1, "y=A", "Constant model"
"model_linear",2, "y=Ax + B", "Linear model"
"model_linear_nondec",2, "y=Ax + B", "Non decreasing linear model.
With boundaries to ensure model slope >=0"
"model_quasilinear",3, "y=A*(x^B) + C", "Quasilinear model"
"model_exp",2, "y=A * B^x", "Exponential model"
"model_decay",4, "Y = A * e^(B*(x-C)) + D", "Exponential decay model"
"model_step",2, "y=0 if x<A, y=B if x>=A", "Step model"
"model_two_steps",4, "see model_step", "2 step models.
Parameter initialization is aware of # of steps."
"model_sigmoid_step",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))
", "Sigmoid step model"
"model_sigmoid",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))", "
Sigmoid model"
"model_season_wday",7, "see desc.", "Weekday seasonality model.
Assigns a constant value to each weekday"
"model_season_wday",6, "see desc.", "6-param weekday seasonality model.
As above, with one constant set to 0."
"model_season_wday_2",2, "see desc.", "Weekend seasonality model.
Assigns a constant to each of weekday/weekend"
"model_season_month",12, "see desc.", "Month seasonality model.
Assigns a constant value to each month"
"model_season_fourier_yearly",10, "see desc", "Fourier
yearly seasonality model"
"""
def __init__(
self,
name,
n_params,
f_model,
f_init_params=None,
f_bounds=None,
l_f_validate_input=None,
l_cache_vars=None,
dict_f_cache=None,
):
"""
Create ForecastModel
:param name: Model name
:type name: basestring
:param n_params: Number of parameters for model function
:type n_params: int
:param f_model: Model function
:type f_model: function
:param f_init_params: Parameter initialisation function
:type f_init_params: function
:param f_bounds: Boundary function
:type f_bounds: function
"""
self.name = name
self.n_params = n_params
self.f_model = f_model
if f_init_params is not None:
self.f_init_params = f_init_params
else:
# Default initial parameters: random values between 0 and 1
self.f_init_params = _get_f_init_params_default(n_params)
if f_bounds is not None:
self.f_bounds = f_bounds
else:
self.f_bounds = _get_f_bounds_default(n_params)
if l_f_validate_input is None:
self.l_f_validate_input = [_f_validate_input_default]
else:
self.l_f_validate_input = _as_list(l_f_validate_input)
if l_cache_vars is None:
self.l_cache_vars = []
else:
self.l_cache_vars = _as_list(l_cache_vars)
if dict_f_cache is None:
self.dict_f_cache = dict()
else:
self.dict_f_cache = dict_f_cache
# TODO - REMOVE THIS - ASSUME NORMALIZED INPUT
def _get_f_init_params_validated(f_init_params):
# Adds argument validation to a parameter initialisation function
def f_init_params_validated(
a_x=None, a_y=None, a_date=None, is_mult=False):
if a_x is not None and pd.isnull(a_x).any():
raise ValueError('a_x cannot have null values')
return f_init_params(a_x, a_y, a_date, is_mult)
return f_init_params_validated
# Add logic to f_init_params that validates input
self.f_init_params = _get_f_init_params_validated(self.f_init_params)
def __call__(self, a_x, a_date, params, is_mult=False, **kwargs):
# assert len(params)==self.n_params
return self.f_model(a_x, a_date, params, is_mult, **kwargs)
def __str__(self):
return self.name
def __repr__(self):
return 'ForecastModel:{}'.format(self.name)
def __add__(self, forecast_model):
# Check for nulls
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}+{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_add_2_f_models(self, forecast_model)
f_init_params = _get_f_add_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, forecast_model):
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}*{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_mult_2_f_models(self, forecast_model)
f_init_params = _get_f_mult_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __rmul__(self, other):
return self.__mul__(other)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.name == other.name
return NotImplemented
def __ne__(self, other):
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
return self.name < other.name
def validate_input(self, a_x, a_y, a_date):
try:
l_result = [f_validate_input(a_x, a_y, a_date)
for f_validate_input in self.l_f_validate_input]
except AssertionError:
return False
return True
def init_cache(self, a_x, a_date):
dict_cache_vars = dict()
for k in self.l_cache_vars:
f = _dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
# Search vars defined in internal cache function dictionary
for k in self.dict_f_cache:
f = self.dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
return dict_cache_vars
# - Null model: 0
def _f_model_null(a_x, a_date, params, is_mult=False, **kwargs):
# This model does nothing - used to disable model components
# (e.g. seasonality) when adding/multiplying multiple functions
return float(is_mult) # Returns 1 if multiplying, 0 if adding
model_null = ForecastModel('null', 0, _f_model_null)
# - Constant model: :math:`Y = A`
def _f_model_constant(a_x, a_date, params, is_mult=False, **kwargs):
[A] = params
y = np.full(len(a_x), A)
return y
def _f_init_params_constant(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
return np.nanmean(a_y) + np.random.uniform(0, 1, 1)
model_constant = ForecastModel(
'constant',
1,
_f_model_constant,
_f_init_params_constant)
# - Naive model: Y = Y(x-1)
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _f_model_naive(a_x, a_date, params, is_mult=False, df_actuals=None):
if df_actuals is None:
raise ValueError('model_naive requires a df_actuals argument')
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out = (
# This is not really intended to work with multiple values per sample
df_actuals.drop_duplicates('x')
.merge(df_out_tmp, how='outer')
.sort_values('x')
)
df_out['y'] = (
df_out.y.shift(1)
.fillna(method='ffill')
.fillna(method='bfill')
)
df_out = df_out.loc[df_out.x.isin(a_x)]
# df_out = df_out_tmp.merge(df_out, how='left')
# TODO: CHECK THAT X,DATE order is preserved
# TODO: df_out = df_out.merge(df_out_tmp, how='right')
return df_out.y.values
model_naive = ForecastModel('naive', 0, _f_model_naive)
# - Seasonal naive model
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _fillna_wday(df):
"""
In a time series, shift samples by 1 week
and fill gaps with data from same weekday
"""
def add_col_y_out(df):
df = df.assign(y_out=df.y.shift(1).fillna(method='ffill'))
return df
df_out = (
df
.assign(wday=df.date.dt.weekday)
.groupby('wday', as_index=False).apply(add_col_y_out)
.sort_values(['x'])
.reset_index(drop=True)
)
return df_out
def _f_model_snaive_wday(a_x, a_date, params, is_mult=False, df_actuals=None):
"""Naive model - takes last valid weekly sample"""
if df_actuals is None:
raise ValueError('model_snaive_wday requires a df_actuals argument')
# df_actuals_model - table with actuals samples,
# adding y_out column with naive model values
df_actuals_model = _fillna_wday(df_actuals.drop_duplicates('x'))
# df_last_week - table with naive model values from last actuals week,
# to use in extrapolation
df_last_week = (
df_actuals_model
# Fill null actual values with data from previous weeks
.assign(y=df_actuals_model.y.fillna(df_actuals_model.y_out))
.drop_duplicates('wday', keep='last')
[['wday', 'y']]
.rename(columns=dict(y='y_out'))
)
# Generate table with extrapolated samples
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out_tmp['wday'] = df_out_tmp.date.dt.weekday
df_out_extrapolated = (
df_out_tmp
.loc[~df_out_tmp.date.isin(df_actuals_model.date)]
.merge(df_last_week, how='left')
.sort_values('x')
)
# Filter actuals table - only samples in a_x, a_date
df_out_actuals_filtered = (
# df_actuals_model.loc[df_actuals_model.x.isin(a_x)]
# Using merge rather than simple filtering to account for
# dates with multiple samples
df_actuals_model.merge(df_out_tmp, how='inner')
.sort_values('x')
)
df_out = (
pd.concat(
[df_out_actuals_filtered, df_out_extrapolated],
sort=False, ignore_index=True)
)
return df_out.y_out.values
model_snaive_wday = ForecastModel('snaive_wday', 0, _f_model_snaive_wday)
# - Spike model: :math:`Y = A`, when x_min <= X < x_max
def _f_model_spike(a_x, a_date, params, is_mult=False, **kwargs):
[A, x_min, x_max] = params
if is_mult:
c = 1
else:
c = 0
y = np.concatenate((
np.full(int(x_min), c),
np.full(int(x_max - x_min), A),
np.full(len(a_x) - int(x_max), c)
))
return y
def _f_init_params_spike(a_x=None, a_y=None, a_date=None, is_mult=False):
""" params are spike height, x start, x end """
# if not a_y.any():
if a_y is None:
return [1] + np.random.uniform(0, 1, 1) + [2]
else:
diffs = np.diff(a_y)
# if diffs:
if True:
diff = max(diffs)
x_start = np.argmax(diffs)
x_end = x_start + 1
return np.array([diff, x_start, x_end])
model_spike = ForecastModel('spike', 3, _f_model_spike, _f_init_params_spike)
# - Spike model for dates - dates are fixed for each model
def _f_model_spike_date(
a_x,
a_date,
params,
date_start,
date_end,
is_mult=False):
[A] = params
mask_spike = (a_date >= date_start) * (a_date < date_end)
if is_mult:
y = mask_spike * A + ~mask_spike
else:
y = mask_spike * A
return y
def _f_init_params_spike(a_x=None, a_y=None, a_date=None, is_mult=False):
""" params are spike height, x start, x end """
if a_y is None:
return np.concatenate([np.array([1]) + np.random.uniform(0, 1, 1)])
else:
diffs = np.diff(a_y)
# if diffs:
if True:
diff = max(diffs)
return np.array([diff])
# else:
# rand = np.random.randint(1, len(a_y) - 1)
# return [1]
def get_model_spike_date(date_start, date_end):
f_model = (
lambda a_x, a_date, params, is_mult=False, **kwargs:
_f_model_spike_date(a_x, a_date, params, date_start, date_end, is_mult)
)
model_spike_date = ForecastModel(
'spike_date[{},{}]'.format(
pd.to_datetime(date_start).date(),
pd.to_datetime(date_end).date()),
1,
f_model,
_f_init_params_spike)
return model_spike_date
# - Linear model: :math:`Y = A*x + B`
def _f_model_linear(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
y = A * a_x + B
return y
def _f_init_params_linear(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(low=0, high=1, size=2)
else: # TODO: Improve this
if a_x is not None:
a_x_size = np.unique(a_x).size - 1
else:
a_x_size = a_y.size - 1
A = (a_y[-1] - a_y[0]) / a_x_size
B = a_y[0]
# Uniform low= 0*m, high = 1*m
return np.array([A, B])
model_linear = ForecastModel(
'linear',
2,
_f_model_linear,
_f_init_params_linear)
def f_init_params_linear_nondec(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
params = _f_init_params_linear(a_x, a_y, a_date)
if params[0] < 0:
params[0] = 0
return params
def f_bounds_linear_nondec(a_x=None, a_y=None, a_date=None):
# first param should be between 0 and inf
return [0, -np.inf], [np.inf, np.inf]
model_linear_nondec = ForecastModel('linear_nondec', 2, _f_model_linear,
f_init_params=f_init_params_linear_nondec,
f_bounds=f_bounds_linear_nondec)
# - QuasiLinear model: :math:`Y = A t^{B} + C`
def _f_model_quasilinear(a_x, a_date, params, is_mult=False, **kwargs):
(A, B, C) = params
y = A * np.power(a_x, B) + C
return y
model_quasilinear = ForecastModel('quasilinear', 3, _f_model_quasilinear)
# - Exponential model: math:: Y = A * B^t
# TODO: Deprecate - not safe to use
def _f_model_exp(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
y = A * np.power(B, a_x)
return y
model_exp = ForecastModel('exponential', 2, _f_model_exp)
# - Exponential decay model: math:: Y = A * e^(B*(x-C)) + D
def _f_model_decay(a_x, a_date, params, is_mult=False, **kwargs):
(A, B, D) = params
y = A * np.exp(B * (a_x)) + D
return y
def _f_validate_input_decay(a_x, a_y, a_date):
assert (a_y > 0).all()
def f_init_params_decay(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.array([0, 0, 0])
A = a_y[0] - a_y[-1]
B = np.log(np.min(a_y) / np.max(a_y)) / (len(a_y) - 1)
if B > 0 or B == -np.inf:
B = -0.5
C = a_y[-1]
return np.array([A, B, C])
def f_bounds_decay(a_x=None, a_y=None, a_date=None):
return [-np.inf, -np.inf, -np.inf], [np.inf, 0, np.inf]
model_decay = ForecastModel('decay', 3, _f_model_decay,
f_init_params=f_init_params_decay,
f_bounds=f_bounds_decay,
l_f_validate_input=_f_validate_input_decay)
# - Step function: :math:`Y = {0, if x < A | B, if x >= A}`
# A is the time of step, and B is the step
def _f_step(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
if is_mult:
y = 1 + (B - 1) * np.heaviside(a_x - A, 1)
else:
y = B * np.heaviside(a_x - A, 1)
return y
# TODO: Implement initialisation for multiplicative composition
def _f_init_params_step(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 2)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(1, 'diff').index[0]
b = df['diff'].iloc[a]
return np.array([a, b * 2])
# TODO: Add boundaries for X axis
model_step = ForecastModel('step', 2, _f_step, _f_init_params_step)
# - Spike model for dates - dates are fixed for each model
def _f_model_step_date(a_x, a_date, params, date_start, is_mult=False):
[A] = params
mask_step = (a_date >= date_start).astype(float)
if is_mult:
# y = mask_step*A + ~mask_step
y = mask_step * (A - 1) + 1
else:
y = mask_step * A
return y
# TODO: Implement initialisation for multiplicative composition
def _f_init_params_step_date(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(1, 'diff').index[0]
b = df['diff'].iloc[a]
return np.array([b * 2])
def get_model_step_date(date_start):
date_start = pd.to_datetime(date_start)
f_model = (
lambda a_x, a_date, params, is_mult=False, **kwargs:
_f_model_step_date(a_x, a_date, params, date_start, is_mult)
)
model_step_date = ForecastModel('step_date[{}]'.format(date_start.date()),
1, f_model, _f_init_params_step_date)
return model_step_date
# Two step functions
def _f_n_steps(n, a_x, a_date, params, is_mult=False):
if is_mult:
y = 1
else:
y = 0
for i in range(0, n + 1, 2):
A, B = params[i: i + 2]
if is_mult:
y = y * _f_step(a_x, a_date, (A, B), is_mult)
else:
y = y + _f_step(a_x, a_date, (A, B), is_mult)
return y
def _f_two_steps(a_x, a_date, params, is_mult=False, **kwargs):
return _f_n_steps(
n=2,
a_x=a_x,
a_date=a_date,
params=params,
is_mult=is_mult)
def _f_init_params_n_steps(
n=2,
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, n * 2)
else:
# max difference between consecutive values
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(n, 'diff').index[0:n].values
b = df['diff'].iloc[a].values
params = []
for i in range(0, n):
params += [a[i], b[i]]
return np.array(params)
def _f_init_params_two_steps(a_x=None, a_y=None, a_date=None, is_mult=False):
return _f_init_params_n_steps(
n=2,
a_x=a_x,
a_y=a_y,
a_date=a_date,
is_mult=is_mult)
model_two_steps = ForecastModel(
'two_steps',
2 * 2,
_f_two_steps,
_f_init_params_two_steps)
# - Sigmoid step function: `Y = {A + (B - A) / (1 + np.exp(- D * (a_x - C)))}`
# Spans from A to B, C is the position of the step in x axis
# and D is how steep the increase is
def _f_sigmoid(a_x, a_date, params, is_mult=False, **kwargs):
(B, C, D) = params
if is_mult:
A = 1
else:
A = 0
# TODO check if a_x is negative
y = A + (B - A) / (1 + np.exp(- D * (a_x - C)))
return y
def _f_init_params_sigmoid_step(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 3)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'y': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
c = df.nlargest(1, 'diff').index[0]
b = df.loc[c, 'y']
d = b * b
return b, c, d
def _f_init_bounds_sigmoid_step(a_x=None, a_y=None, a_date=None):
if a_y is None:
return [-np.inf, -np.inf, 0.], 3 * [np.inf]
if a_y.ndim > 1:
a_y = a_y[:, 0]
if a_x.ndim > 1:
a_x = a_x[:, 0]
diff = max(a_y) - min(a_y)
b_min = -2 * diff
b_max = 2 * diff
c_min = min(a_x)
c_max = max(a_x)
d_min = 0.
d_max = np.inf
return [b_min, c_min, d_min], [b_max, c_max, d_max]
# In this model, parameter initialization is aware of number of steps
model_sigmoid_step = ForecastModel(
'sigmoid_step',
3,
_f_sigmoid,
_f_init_params_sigmoid_step,
f_bounds=_f_init_bounds_sigmoid_step)
model_sigmoid = ForecastModel('sigmoid', 3, _f_sigmoid)
# Ramp functions - used for piecewise linear models
# example : model_linear_pw2 = model_linear + model_ramp
# example 2: model_linear_p23 = model_linear + model_ramp + model_ramp
# - Ramp function: :math:`Y = {0, if x < A | B, if x >= A}`
# A is the time of step, and B is the step
def _f_ramp(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
if is_mult:
y = 1 + (a_x - A) * (B) * np.heaviside(a_x - A, 1)
else:
y = (a_x - A) * B * np.heaviside(a_x - A, 1)
return y
def _f_init_params_ramp(a_x=None, a_y=None, a_date=None, is_mult=False):
# TODO: set boundaries: a_x (0.2, 0.8)
if a_y is None:
if a_x is not None:
nfirst_last = int(np.ceil(0.15 * a_x.size))
a = np.random.uniform(a_x[nfirst_last], a_x[-nfirst_last - 1], 1)
else:
a = np.random.uniform(0, 1, 1)
b = np.random.uniform(0, 1, 1)
return np.concatenate([a,
b])
else:
# TODO: FILTER A_Y BY 20-80 PERCENTILE IN A_X
df = pd.DataFrame({'b': a_y})
if a_x is not None:
#
df['x'] = a_x
# Required because we support input with multiple samples per x
# value
df = df.drop_duplicates('x')
df = df.set_index('x')
# max difference between consecutive values -- this assumes no null
# values in series
df['diff2'] = df.diff().diff().abs()
# We ignore the last 15% of the time series
skip_samples = int(np.ceil(df.index.size * 0.15))
a = (df.head(-skip_samples).tail(
-skip_samples).nlargest(1, 'diff2').index[0]
)
b = df['diff2'].loc[a]
# TODO: replace b with estimation of slope in segment 2
# minus slope in segment 1 - see init_params_linear
return np.array([a, b])
def _f_init_bounds_ramp(a_x=None, a_y=None, a_date=None):
if a_x is None:
a_min = -np.inf
a_max = np.inf
else:
# a_min = np.min(a_x)
nfirst_last = int(np.ceil(0.15 * a_x.size))
a_min = a_x[nfirst_last]
a_max = a_x[-nfirst_last]
# a_min = np.percentile(a_x, 15)
# a_max = np.percentile(a_x,85)
if a_y is None:
b_min = -np.inf
b_max = np.inf
else:
# TODO: FILTER A_Y BY 20-80 PERCENTILE IN A_X
# df = pd.DataFrame({'b': a_y})
# #max_diff2 = np.max(df.diff().diff().abs())
# max_diff2 = np.max(np.abs(np.diff(np.diff(a_y))))
#
# b_min = -2*max_diff2
# b_max = 2*max_diff2
b_min = -np.inf
b_max = np.inf
# logger_info('DEBUG: BOUNDS:',(a_min, b_min,a_max, b_max))
return ([a_min, b_min], [a_max, b_max])
model_ramp = ForecastModel(
'ramp',
2,
_f_ramp,
_f_init_params_ramp,
_f_init_bounds_ramp)
# - Weekday seasonality
def _f_model_season_wday(
a_x, a_date, params, is_mult=False,
# cache variables
a_weekday=None,
**kwargs):
# Weekday seasonality model, 6 params
# params_long[0] is default series value,
params_long = np.concatenate([[float(is_mult)], params])
if a_weekday is None:
a_weekday = _f_init_cache_a_weekday(a_x, a_date)
return params_long[a_weekday]
def _f_validate_input_season_wday(a_x, a_y, a_date):
assert a_date is not None
assert a_date.weekday.drop_duplicates().size == 7
model_season_wday = ForecastModel(
'season_wday',
6,
_f_model_season_wday,
l_f_validate_input=_f_validate_input_season_wday,
l_cache_vars=['a_weekday']
)
# - Month seasonality
def _f_init_params_season_month(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None or a_date is None:
return np.random.uniform(low=-1, high=1, size=11)
else: # TODO: Improve this
l_params_long = [np.mean(a_y[a_date.month == i])
for i in np.arange(1, 13)]
l_baseline = l_params_long[-1]
l_params = l_params_long[:-1]
if not is_mult:
l_params_add = l_params - l_baseline
return l_params_add
else:
l_params_mult = l_params / l_baseline
return l_params_mult
def _f_model_season_month(
a_x, a_date, params, is_mult=False,
# cache variables
a_month=None,
**kwargs):
# Month of December is taken as default level, has no parameter
# params_long[0] is default series value
params_long = np.concatenate([[float(is_mult)], params])
if a_month is None:
a_month = _f_init_cache_a_month(a_x, a_date)
return params_long[a_month]
model_season_month = ForecastModel(
'season_month',
11,
_f_model_season_month,
_f_init_params_season_month,
l_cache_vars=['a_month']
)
model_season_month_old = ForecastModel(
'season_month_old', 11, _f_model_season_month)
def _f_model_yearly_season_fourier(
a_x,
a_date,
params,
is_mult=False,
# cache params
a_t_fourier=None,
**kwargs):
if a_t_fourier is None:
a_t_fourier = _f_init_cache_a_t_fourier(None, a_date)
y = np.matmul(params, a_t_fourier)
return y
def _f_init_params_fourier_n_params(
n_params,
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
params = np.random.uniform(0.001, 1, n_params)
else:
# max difference in time series
diff = a_y.max() - a_y.min()
params = diff * np.random.uniform(0.001, 1, n_params)
return params
def _f_init_params_fourier(a_x=None, a_y=None, a_date=None, is_mult=False):
n_params = 2 * _dict_fourier_config['harmonics']
return _f_init_params_fourier_n_params(
n_params, a_x=a_x, a_y=a_y, a_date=a_date, is_mult=is_mult)
def _f_init_bounds_fourier_nparams(n_params, a_x=None, a_y=None, a_date=None):
return n_params * [-np.inf], n_params * [np.inf]
def _f_init_bounds_fourier_yearly(a_x=None, a_y=None, a_date=None):
n_params = 2 * _dict_fourier_config['harmonics']
return _f_init_bounds_fourier_nparams(n_params, a_x, a_y, a_date)
model_season_fourier_yearly = ForecastModel(
name='season_fourier_yearly',
n_params=2 * _dict_fourier_config.get('harmonics'),
f_model=_f_model_yearly_season_fourier,
f_init_params=_f_init_params_fourier,
f_bounds=_f_init_bounds_fourier_yearly,
l_cache_vars='a_t_fourier'
)
def get_fixed_model(forecast_model, params_fixed, is_mult=False):
# Generate model with some fixed parameters
if forecast_model.n_params == 0: # Nothing to do
return forecast_model
if len(params_fixed) != forecast_model.n_params:
err = 'Wrong number of fixed parameters'
raise ValueError(err)
return ForecastModel(
forecast_model.name + '_fixed', 0,
f_model=lambda a_x, a_date, params, is_mult=is_mult, **kwargs:
forecast_model.f_model(
a_x=a_x, a_date=a_date, params=params_fixed, is_mult=is_mult))
def get_iqr_thresholds(s_diff, low=0.25, high=0.75):
# Get thresholds based on inter quantile range
q1 = s_diff.quantile(low)
q3 = s_diff.quantile(high)
iqr = q3 - q1
thr_low = q1 - 1.5 * iqr
thr_hi = q3 + 1.5 * iqr
return thr_low, thr_hi
# TODO: Add option - estimate_outl_size
# TODO: Add option - sigmoid steps
# TODO: ADD option - gaussian spikes
def get_model_outliers(df, window=3):
"""
Identify outlier samples in a time series
:param df: Input time series
:type df: pandas.DataFrame
:param window: The x-axis window to aggregate multiple steps/spikes
:type window: int
:return:
| tuple (mask_step, mask_spike)
| mask_step: True if sample contains a step
| mask_spike: True if sample contains a spike
:rtype: tuple of 2 numpy arrays of booleans
TODO: require minimum number of samples to find an outlier
"""
dfo = df.copy() # dfo - df for outliers
# If df has datetime index, use date logic in steps/spikes
with_dates = 'date' in df.columns
x_col = 'date' if with_dates else 'x'
if df[x_col].duplicated().any():
raise ValueError('Input cannot have multiple values per sample')
# Get the differences
dfo['dif'] = dfo.y.diff()
# We consider as outliers the values that are
# 1.5 * IQR (interquartile range) beyond the quartiles.
# These thresholds are obtained here
thr_low, thr_hi = get_iqr_thresholds(dfo.dif)
# Now identify the changes
dfo['ischange'] = ((dfo.dif < thr_low) | (dfo.dif > thr_hi)).astype(int)
# Whenever there are two or more consecutive changes
# (that is, within `window` samples), we group them together
dfo['ischange_group'] = (
dfo.ischange.rolling(window, win_type=None, center=True).max().fillna(
0).astype(int)
)
# We now have to calculate the difference within the
# same group in order to identify if the consecutive changes
# result in a step, a spike, or both.
# We get the filtered difference
dfo['dif_filt'] = (dfo.dif * dfo.ischange).fillna(0)
# And the absolute value of that
dfo['dif_filt_abs'] = dfo.dif_filt.abs()
dfo['change_group'] = dfo.ischange_group.diff(
).abs().fillna(0).astype(int).cumsum()
# this gets us the average difference of the outliers within each change
# group
df_mean_gdiff = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group')[
'dif_filt'].mean().rename('mean_group_diff').reset_index())
# this gets us the average absolute difference of the outliers within each
# change group
df_mean_gdiff_abs = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group')[
'dif_filt_abs'].mean().rename(
'mean_group_diff_abs').reset_index()
)
# Merge the differences with the original dfo
dfo = dfo.merge(
df_mean_gdiff,
how='left').merge(
df_mean_gdiff_abs,
how='left')
# Fill missing values with zero -> no change
dfo.mean_group_diff = dfo.mean_group_diff.fillna(0)
dfo.mean_group_diff_abs = dfo.mean_group_diff_abs.fillna(0)
# the change group is a step if the mean_group_diff exceeds the thresholds
dfo['is_step'] = dfo['ischange_group'] & (
((dfo.mean_group_diff < thr_low) | (dfo.mean_group_diff > thr_hi)))
# the change group is a spike if the difference between the
# mean_group_diff_abs and the average mean_group_diff exceeds
# the average threshold value
dfo['is_spike'] = (dfo.mean_group_diff_abs -
dfo.mean_group_diff.abs()) > (thr_hi - thr_low) / 2
# Get the outlier start and end points for each group
df_outl = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group').apply(
lambda x: pd.Series(
{'outl_start': x[x_col].iloc[0],
'outl_end': x[x_col].iloc[-1]})).reset_index()
)
if df_outl.empty: # No outliers - nothing to do
return np.full(dfo.index.size, False), np.full(dfo.index.size, False)
dfo = dfo.merge(df_outl, how='left')
# Get the start and end points in dfo
if with_dates:
# Convert to datetime, if we are using dates
dfo['outl_start'] = pd.to_datetime(dfo.outl_start)
dfo['outl_end'] = pd.to_datetime(dfo.outl_end)
# Create the mask for spikes and steps
dfo['mask_spike'] = (dfo['is_spike'] &
(dfo.date >= pd.to_datetime(dfo.outl_start)) &
(dfo.date < pd.to_datetime(dfo.outl_end)))
dfo['mask_step'] = (dfo['is_step'] &
(dfo.date >= pd.to_datetime(dfo.outl_start)) &
(dfo.date <= | pd.to_datetime(dfo.outl_end) | pandas.to_datetime |
import unittest
import os
import shutil
import numpy as np
import pandas as pd
from aistac import ConnectorContract
from ds_discovery import Wrangle, SyntheticBuilder
from ds_discovery.intent.wrangle_intent import WrangleIntentModel
from aistac.properties.property_manager import PropertyManager
class WrangleIntentCorrelateTest(unittest.TestCase):
def setUp(self):
os.environ['HADRON_PM_PATH'] = os.path.join('work', 'config')
os.environ['HADRON_DEFAULT_PATH'] = os.path.join('work', 'data')
try:
os.makedirs(os.environ['HADRON_PM_PATH'])
os.makedirs(os.environ['HADRON_DEFAULT_PATH'])
except:
pass
PropertyManager._remove_all()
def tearDown(self):
try:
shutil.rmtree('work')
except:
pass
@property
def tools(self) -> WrangleIntentModel:
return Wrangle.scratch_pad()
def test_runs(self):
"""Basic smoke test"""
im = Wrangle.from_env('tester', default_save=False, default_save_intent=False,
reset_templates=False, has_contract=False).intent_model
self.assertTrue(WrangleIntentModel, type(im))
def test_correlate_custom(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1, 2, 3]
result = tools.correlate_custom(df, code_str="[x + 2 for x in @['A']]")
self.assertEqual([3, 4, 5], result)
result = tools.correlate_custom(df, code_str="[True if x == $v1 else False for x in @['A']]", v1=2)
self.assertEqual([False, True, False], result)
def test_correlate_choice(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [[1,2,4,6], [1], [2,4,8,1], [2,4]]
result = tools.correlate_choice(df, header='A', list_size=2)
control = [[1, 2], [1], [2, 4], [2, 4]]
self.assertEqual(control, result)
result = tools.correlate_choice(df, header='A', list_size=1)
self.assertEqual([1, 1, 2, 2], result)
def test_correlate_coefficient(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1,2,3]
result = tools.correlate_polynomial(df, header='A', coefficient=[2,1])
self.assertEqual([3, 4, 5], result)
result = tools.correlate_polynomial(df, header='A', coefficient=[0, 0, 1])
self.assertEqual([1, 4, 9], result)
def test_correlate_join(self):
tools = self.tools
df = pd.DataFrame()
df['A'] = [1,2,3]
df['B'] = list('XYZ')
df['C'] = [4.2,7.1,4.1]
result = tools.correlate_join(df, header='B', action="values", sep='_')
self.assertEqual(['X_values', 'Y_values', 'Z_values'], result)
result = tools.correlate_join(df, header='A', action=tools.action2dict(method='correlate_numbers', header='C'))
self.assertEqual(['14.2', '27.1', '34.1'], result)
def test_correlate_columns(self):
tools = self.tools
df = pd.DataFrame({'A': [1,1,1,1,None], 'B': [1,None,2,3,None], 'C': [2,2,2,2,None], 'D': [5,5,5,5,None]})
result = tools.correlate_aggregate(df, headers=list('ABC'), agg='sum')
control = [4.0, 3.0, 5.0, 6.0, 0.0]
self.assertEqual(result, control)
for action in ['sum', 'prod', 'count', 'min', 'max', 'mean']:
print(action)
result = tools.correlate_aggregate(df, headers=list('ABC'), agg=action)
self.assertEqual(5, len(result))
def test_correlate_number(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,3,4.0,5,6,7,8,9,0], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', precision=0)
self.assertCountEqual([1,2,3,4,5,6,7,8,9,0], result)
# Offset
df = pd.DataFrame(data=[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', offset=1, precision=0)
self.assertEqual([2,3,4,5,6,7,8,9,10,1], result)
# str offset
df = pd.DataFrame(data=[1, 2, 3, 4], columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', offset='1-@', precision=0)
self.assertEqual([0,-1,-2,-3], result)
# complex str offset
result = tools.correlate_numbers(df, 'numbers', offset='x + 2 if x <= 2 else x', precision=0)
self.assertEqual([3, 4, 3, 4], result)
# jitter
df = pd.DataFrame(data=[2] * 1000, columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0)
self.assertLessEqual(max(result), 4)
self.assertGreaterEqual(min(result), 0)
df = pd.DataFrame(data=tools._get_number(99999, size=5000), columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=1)
self.assertNotEqual(df['numbers'].to_list(), result)
self.assertEqual(5000, len(result))
for index in range(len(result)):
loss = abs(df['numbers'][index] - result[index])
self.assertLessEqual(loss, 5)
df = pd.DataFrame(data=tools._get_number(99999, size=5000), columns=['numbers'])
result = tools.correlate_numbers(df, 'numbers', jitter=1, precision=1)
self.assertNotEqual(df['numbers'].to_list(), result)
self.assertEqual(5000, len(result))
for index in range(len(result)):
loss = abs(df['numbers'][index] - result[index])
self.assertLessEqual(loss, 1)
def test_correlate_normalize(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,2,3,3,2,2,1], columns=['numbers'])
result = tools.correlate_numbers(df, header='numbers', normalize=(0, 1))
self.assertEqual([0.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 0.0], result)
result = tools.correlate_numbers(df, header='numbers', normalize=(-1, 1))
self.assertEqual([-1.0, 0, 0, 1.0, 1.0, 0, 0, -1.0], result)
def test_correlate_standardise(self):
tools = self.tools
df = pd.DataFrame(data=[1,2,2,3,3,2,2,1], columns=['numbers'])
result = tools.correlate_numbers(df, header='numbers', standardize=True, precision=1)
self.assertEqual([-1.4, 0.0, 0.0, 1.4, 1.4, 0.0, 0.0, -1.4], result)
def test_correlate_number_to_numeric(self):
tools = self.tools
df = pd.DataFrame(data=list("123") + ['4-5'], columns=['numbers'])
with self.assertRaises(ValueError) as context:
result = tools.correlate_numbers(df, header='numbers')
self.assertTrue("The header column is of type" in str(context.exception))
result = tools.correlate_numbers(df, header='numbers', to_numeric=True)
self.assertEqual([1.0, 2.0, 3.0], result[:3])
result = tools.correlate_numbers(df, header='numbers', to_numeric=True, replace_nulls=0, rtn_type='int')
self.assertEqual([1, 2, 3, 0], result.to_list())
def test_correlate_number_extras(self):
tools = self.tools
# weighting
df = pd.DataFrame(columns=['numbers'], data=[2] * 1000)
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0, jitter_freq=[0, 0, 1, 1])
self.assertCountEqual([2,3,4], list(pd.Series(result).value_counts().index))
result = tools.correlate_numbers(df, 'numbers', jitter=5, precision=0, jitter_freq=[1, 1, 0, 0])
self.assertCountEqual([0,1,2], list(pd.Series(result).value_counts().index))
# fill nan
df = pd.DataFrame(columns=['numbers'], data=[1,1,2,np.nan,3,1,np.nan,3,5,np.nan,7])
result = tools.correlate_numbers(df, 'numbers', replace_nulls=1, precision=0)
self.assertEqual([1,1,2,1,3,1,1,3,5,1,7], result)
df = pd.DataFrame(columns=['numbers'], data=[2] * 1000)
# jitter, offset and fillna
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, replace_nulls=2, precision=0)
self.assertCountEqual([2,3,4,5,6], list(pd.Series(result).value_counts().index))
# min
df = pd.DataFrame(columns=['numbers'], data=[2] * 100)
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, min_value=4, precision=0)
self.assertCountEqual([4, 5, 6], list(pd.Series(result).value_counts().index))
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, min_value=6, precision=0)
self.assertCountEqual([6], list(pd.Series(result).value_counts().index))
with self.assertRaises(ValueError) as context:
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, min_value=7, precision=0)
self.assertTrue("The min value 7 is greater than the max result value" in str(context.exception))
# max
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, max_value=4, precision=0)
self.assertCountEqual([2, 3, 4], list(pd.Series(result).value_counts().index))
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, max_value=2, precision=0)
self.assertCountEqual([2], list(pd.Series(result).value_counts().index))
with self.assertRaises(ValueError) as context:
result = tools.correlate_numbers(df, 'numbers', offset=2, jitter=5, max_value=1, precision=0)
self.assertTrue("The max value 1 is less than the min result value" in str(context.exception))
def test_correlate_categories(self):
tools = self.tools
df = pd.DataFrame(columns=['cat'], data=list("ABCDE"))
correlation = ['A', 'D']
action = {0: 'F', 1: 'G'}
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action, default_action=tools.action2dict(method='@header', header='cat'))
self.assertEqual(['F', 'B', 'C', 'G', 'E'], result)
correlation = ['A', 'D']
action = {0: {'method': 'get_category', 'selection': list("HIJ")}, 1: {'method': 'get_number', 'to_value': 10}}
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action)
self.assertIn(result[0], list("HIJ"))
self.assertTrue(0 <= result[3] < 10)
df = pd.DataFrame(columns=['cat'], data=tools._get_category(selection=list("ABCDE"), size=5000))
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action)
self.assertEqual(5000, len(result))
def test_correlate_categories_selection(self):
tools = self.tools
df = pd.DataFrame(columns=['cat'], data=list("ABACDBA"))
correlation = [[tools.select2dict(column='cat', condition="@=='A'")], [tools.select2dict(column='cat', condition="@=='B'")]]
action = {0: 'F', 1: 'G'}
default = 'H'
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action, default_action=default)
self.assertEqual(['F', 'G', 'F', 'H', 'H', 'G', 'F'], result)
correlation = [[tools.select2dict(column='cat', condition="@=='A'")], ['B', 'C'], 'D']
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action, default_action=default)
self.assertEqual(['F', 'G', 'F', 'G', 'H', 'G', 'F'], result)
# use with numbers
df = pd.DataFrame(columns=['cat'], data=[1,2,3,4,2,1])
correlation = [[tools.select2dict(column='cat', condition="@<=2")],
[tools.select2dict(column='cat', condition="@==3")]]
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action, default_action=default)
self.assertEqual(['F', 'F', 'G', 'H', 'F', 'F'], result)
def test_correlate_categories_builder(self):
builder = Wrangle.from_env('test', has_contract=False)
builder.set_persist_contract(ConnectorContract(uri="eb://synthetic_members", module_name='ds_engines.handlers.event_handlers', handler='EventPersistHandler'))
df = pd.DataFrame()
df['pcp_tax_id'] = [993406113, 133757370, 260089066, 448512481, 546434723] * 2
correlations = [993406113, 133757370, 260089066, 448512481, 546434723]
actions = {0: 'LABCORP OF AMERICA', 1: 'LPCH MEDICAL GROUP', 2: 'ST JOSEPH HERITAGE MEDICAL',
3: 'MONARCH HEALTHCARE', 4: 'PRIVIA MEICAL GROUP'}
df['pcp_name'] = builder.tools.correlate_categories(df, header='pcp_tax_id', correlations=correlations,
actions=actions, column_name='pcp_name')
result = builder.tools.run_intent_pipeline(df)
self.assertEqual((10, 2), result.shape)
def test_correlate_categories_multi(self):
tools = self.tools
df = pd.DataFrame(columns=['cat'], data=list("ABCDEFGH"))
df['cat'] = df['cat'].astype('category')
correlation = [list("ABC"), list("DEFGH")]
action = {0: False, 1: True}
result = tools.correlate_categories(df, 'cat', correlations=correlation, actions=action)
self.assertEqual([False, False, False, True, True, True, True, True], result)
def test_correlate_categories_nulls(self):
tools = self.tools
builder = SyntheticBuilder.from_memory().tools
df = pd.DataFrame()
df['pcp_tax_id'] = builder.get_category(selection=['993406113', '133757370', '260089066', '448512481', '546434723'],
quantity=0.9, size=100, column_name='pcp_tax_id')
correlations = ['993406113', '133757370', '260089066', '448512481', '546434723']
actions = {0: 'LABCORP OF AMERICA', 1: 'LPCH MEDICAL GROUP', 2: 'ST JOSEPH HERITAGE MEDICAL',
3: 'MONARCH HEALTHCARE', 4: 'PRIVIA MEICAL GROUP'}
df['pcp_name'] = tools.correlate_categories(df, header='pcp_tax_id', correlations=correlations,
actions=actions, column_name='pcp_name')
print(df.head())
def test_expit(self):
tools = self.tools
df = pd.DataFrame(columns=['num'], data=[-2, 1, 0, -2, 2, 0])
result = tools.correlate_sigmoid(df, header='num')
self.assertEqual([0.119, 0.731, 0.5, 0.119, 0.881, 0.5], result)
def test_correlate_date(self):
tools = self.tools
df = | pd.DataFrame(columns=['dates'], data=['2019/01/30', '2019/02/12', '2019/03/07', '2019/03/07']) | pandas.DataFrame |
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas as pd
from ..common import (
_get,
_getAsync,
_raiseIfNotStr,
_reindex,
_strOrDate,
_strToList,
_toDatetime,
json_normalize,
)
def iexTops(symbols=None, token="", version="stable", format="json"):
"""TOPS provides IEX’s aggregated best quoted bid and offer position in near real time for all securities on IEX’s displayed limit order book.
TOPS is ideal for developers needing both quote and trade data.
https://iexcloud.io/docs/api/#tops
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
symbols = _strToList(symbols)
if symbols:
return _get(
"tops?symbols=" + ",".join(symbols) + "%2b",
token=token,
version=version,
format=format,
)
return _get("tops", token=token, version=version, format=format)
@wraps(iexTops)
async def iexTopsAsync(symbols=None, token="", version="stable", format="json"):
symbols = _strToList(symbols)
if symbols:
return await _getAsync(
"tops?symbols=" + ",".join(symbols) + "%2b",
token=token,
version=version,
format=format,
)
return await _getAsync("tops", token=token, version=version, format=format)
@wraps(iexTops)
def iexTopsDF(*args, **kwargs):
return _reindex(_toDatetime(json_normalize(iexTops(*args, **kwargs))), "symbol")
def iexLast(symbols=None, token="", version="stable", format="json"):
"""Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
symbols = _strToList(symbols)
if symbols:
return _get(
"tops/last?symbols=" + ",".join(symbols) + "%2b",
token=token,
version=version,
format=format,
)
return _get("tops/last", token=token, version=version, format=format)
@wraps(iexLast)
async def iexLastAsync(symbols=None, token="", version="stable", format="json"):
symbols = _strToList(symbols)
if symbols:
return await _getAsync(
"tops/last?symbols=" + ",".join(symbols) + "%2b",
token=token,
version=version,
format=format,
)
return await _getAsync("tops/last", token=token, version=version, format=format)
@wraps(iexLast)
def iexLastDF(*args, **kwargs):
return _reindex(_toDatetime(json_normalize(iexLast(*args, **kwargs))), "symbol")
def iexDeep(symbol=None, token="", version="stable", format="json"):
"""DEEP is used to receive real-time depth of book quotations direct from IEX.
The depth of book quotations received via DEEP provide an aggregated size of resting displayed orders at a price and side,
and do not indicate the size or number of individual orders at any price level.
Non-displayed orders and non-displayed portions of reserve orders are not represented in DEEP.
DEEP also provides last trade price and size information. Trades resulting from either displayed or non-displayed orders matching on IEX will be reported. Routed executions will not be reported.
https://iexcloud.io/docs/api/#deep
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep?symbols=" + symbol, token=token, version=version, format=format
)
return _get("deep", token=token, version=version, format=format)
@wraps(iexDeep)
async def iexDeepAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep?symbols=" + symbol, token=token, version=version, format=format
)
return await _getAsync("deep", token=token, version=version, format=format)
@wraps(iexDeep)
def iexDeepDF(*args, **kwargs):
return _toDatetime(json_normalize(iexDeep(*args, **kwargs)))
def iexAuction(symbol=None, token="", version="stable", format="json"):
"""DEEP broadcasts an Auction Information Message every one second between the Lock-in Time and the auction match for Opening and Closing Auctions,
and during the Display Only Period for IPO, Halt, and Volatility Auctions. Only IEX listed securities are eligible for IEX Auctions.
https://iexcloud.io/docs/api/#deep-auction
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/auction?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/auction", token=token, version=version, format=format)
@wraps(iexAuction)
async def iexAuctionAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/auction?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync("deep/auction", token=token, version=version, format=format)
@wraps(iexAuction)
def iexAuctionDF(*args, **kwargs):
return _toDatetime(json_normalize(iexAuction(*args, **kwargs)))
def iexBook(symbol=None, token="", version="stable", format="json"):
"""Book shows IEX’s bids and asks for given symbols.
https://iexcloud.io/docs/api/#deep-book
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/book?symbols=" + symbol, token=token, version=version, format=format
)
return _get("deep/book", token=token, version=version, format=format)
@wraps(iexBook)
async def iexBookAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/book?symbols=" + symbol, token=token, version=version, format=format
)
return await _getAsync("deep/book", token=token, version=version, format=format)
@wraps(iexBook)
def iexBookDF(*args, **kwargs):
x = iexBook(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(json_normalize(data))
def iexOpHaltStatus(symbol=None, token="", version="stable", format="json"):
"""The Exchange may suspend trading of one or more securities on IEX for operational reasons and indicates such operational halt using the Operational halt status message.
IEX disseminates a full pre-market spin of Operational halt status messages indicating the operational halt status of all securities.
In the spin, IEX will send out an Operational Halt Message with “N” (Not operationally halted on IEX) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System at the start of the Pre-Market Session.
After the pre-market spin, IEX will use the Operational halt status message to relay changes in operational halt status for an individual security.
https://iexcloud.io/docs/api/#deep-operational-halt-status
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/op-halt-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/op-halt-status", token=token, version=version, format=format)
@wraps(iexOpHaltStatus)
async def iexOpHaltStatusAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/op-halt-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/op-halt-status", token=token, version=version, format=format
)
@wraps(iexOpHaltStatus)
def iexOpHaltStatusDF(*args, **kwargs):
x = iexOpHaltStatus(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexOfficialPrice(symbol=None, token="", version="stable", format="json"):
"""The Official Price message is used to disseminate the IEX Official Opening and Closing Prices.
These messages will be provided only for IEX Listed Securities.
https://iexcloud.io/docs/api/#deep-official-price
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/official-price?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/official-price", token=token, version=version, format=format)
@wraps(iexOfficialPrice)
async def iexOfficialPriceAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/official-price?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/official-price", token=token, version=version, format=format
)
@wraps(iexOfficialPrice)
def iexOfficialPriceDF(*args, **kwargs):
return _toDatetime(json_normalize(iexOfficialPrice(*args, **kwargs)))
def iexSecurityEvent(symbol=None, token="", version="stable", format="json"):
"""The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/security-event?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/security-event", token=token, version=version, format=format)
@wraps(iexSecurityEvent)
async def iexSecurityEventAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/security-event?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/security-event", token=token, version=version, format=format
)
@wraps(iexSecurityEvent)
def iexSecurityEventDF(*args, **kwargs):
x = iexSecurityEvent(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexSsrStatus(symbol=None, token="", version="stable", format="json"):
"""In association with Rule 201 of Regulation SHO, the Short Sale Price Test Message is used to indicate when a short sale price test restriction is in effect for a security.
IEX disseminates a full pre-market spin of Short sale price test status messages indicating the Rule 201 status of all securities.
After the pre-market spin, IEX will use the Short sale price test status message in the event of an intraday status change.
The IEX Trading System will process orders based on the latest short sale price test restriction status.
https://iexcloud.io/docs/api/#deep-short-sale-price-test-status
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/ssr-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/ssr-status", token=token, version=version, format=format)
@wraps(iexSsrStatus)
async def iexSsrStatusAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/ssr-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/ssr-status", token=token, version=version, format=format
)
@wraps(iexSsrStatus)
def iexSsrStatusDF(*args, **kwargs):
x = iexSsrStatus(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexSystemEvent(token="", version="stable", format="json"):
"""The System event message is used to indicate events that apply to the market or the data feed.
There will be a single message disseminated per channel for each System Event type within a given trading session.
https://iexcloud.io/docs/api/#deep-system-event
Args:
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
return _get("deep/system-event", token=token, version=version, format=format)
@wraps(iexSystemEvent)
async def iexSystemEventAsync(token="", version="stable", format="json"):
return await _getAsync(
"deep/system-event", token=token, version=version, format=format
)
@wraps(iexSystemEvent)
def iexSystemEventDF(*args, **kwargs):
return _toDatetime(json_normalize(iexSystemEvent(*args, **kwargs)))
def iexTrades(symbol=None, token="", version="stable", format="json"):
"""Trade report messages are sent when an order on the IEX Order Book is executed in whole or in part. DEEP sends a Trade report message for every individual fill.
https://iexcloud.io/docs/api/#deep-trades
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/trades?symbols=" + symbol, token=token, version=version, format=format
)
return _get("deep/trades", token=token, version=version, format=format)
@wraps(iexTrades)
async def iexTradesAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/trades?symbols=" + symbol, token=token, version=version, format=format
)
return await _getAsync("deep/trades", token=token, version=version, format=format)
@wraps(iexTrades)
def iexTradesDF(*args, **kwargs):
x = iexTrades(*args, **kwargs)
data = []
for key in x:
dat = x[key]
for d in dat:
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexTradeBreak(symbol=None, token="", version="stable", format="json"):
"""Trade break messages are sent when an execution on IEX is broken on that same trading day. Trade breaks are rare and only affect applications that rely upon IEX execution based data.
https://iexcloud.io/docs/api/#deep-trade-break
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/trade-breaks?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/trade-breaks", token=token, version=version, format=format)
@wraps(iexTradeBreak)
async def iexTradeBreakAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/trade-breaks?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _getAsync(
"deep/trade-breaks", token=token, version=version, format=format
)
@wraps(iexTradeBreak)
def iexTradeBreakDF(*args, **kwargs):
return _toDatetime(json_normalize(iexTradeBreak(*args, **kwargs)))
def iexTradingStatus(symbol=None, token="", version="stable", format="json"):
"""The Trading status message is used to indicate the current trading status of a security.
For IEX-listed securities, IEX acts as the primary market and has the authority to institute a trading halt or trading pause in a security due to news dissemination or regulatory reasons.
For non-IEX-listed securities, IEX abides by any regulatory trading halts and trading pauses instituted by the primary or listing market, as applicable.
IEX disseminates a full pre-market spin of Trading status messages indicating the trading status of all securities.
In the spin, IEX will send out a Trading status message with “T” (Trading) for all securities that are eligible for trading at the start of the Pre-Market Session.
If a security is absent from the dissemination, firms should assume that the security is being treated as operationally halted in the IEX Trading System.
After the pre-market spin, IEX will use the Trading status message to relay changes in trading status for an individual security. Messages will be sent when a security is:
Halted
Paused*
Released into an Order Acceptance Period*
Released for trading
*The paused and released into an Order Acceptance Period status will be disseminated for IEX-listed securities only. Trading pauses on non-IEX-listed securities will be treated simply as a halt.
https://iexcloud.io/docs/api/#deep-trading-status
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if symbol:
return _get(
"deep/trading-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return _get("deep/trading-status", token=token, version=version, format=format)
@wraps(iexTradingStatus)
async def iexTradingStatusAsync(symbol=None, token="", version="stable", format="json"):
_raiseIfNotStr(symbol)
if symbol:
return await _getAsync(
"deep/trading-status?symbols=" + symbol,
token=token,
version=version,
format=format,
)
return await _get(
"deep/trading-status", token=token, version=version, format=format
)
@wraps(iexTradingStatus)
def iexTradingStatusDF(*args, **kwargs):
x = iexTradingStatus(*args, **kwargs)
data = []
for key in x:
d = x[key]
d["symbol"] = key
data.append(d)
return _toDatetime(pd.DataFrame(data))
def iexHist(date=None, token="", version="stable", format="json"):
"""
Args:
date (datetime): Effective date
token (str): Access token
version (str): API version
format (str): return format, defaults to json
Returns:
dict: result
"""
if date is None:
return _get("hist", token=token, version=version, format=format)
else:
date = _strOrDate(date)
return _get("hist?date=" + date, token=token, version=version, format=format)
@wraps(iexHist)
async def iexHistAsync(date=None, token="", version="stable", format="json"):
if date is None:
return await _get("hist", token=token, version=version, format=format)
else:
date = _strOrDate(date)
return await _getAsync(
"hist?date=" + date, token=token, version=version, format=format
)
@wraps(iexHist)
def iexHistDF(*args, **kwargs):
x = iexHist(*args, **kwargs)
data = []
for key in x:
dat = x[key]
for item in dat:
item["date"] = key
data.append(item)
return _reindex(_toDatetime( | pd.DataFrame(data) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + | Timestamp('2000') | pandas.Timestamp |
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set(rc={'figure.figsize':(11, 4)})
dataDirectory = '../data/'
graphsDirectory = 'graphs/'
def visDay(dfs,sensors,day):
plt.clf()
fig, axs = plt.subplots(len(dfs),sharex=True,sharey=True,gridspec_kw={'hspace': 0.5},figsize=(20, 10))
fig.suptitle('Measurements for day {0}'.format(day))
for i in range(len(dfs)):
axs[i].plot(dfs[i]['measurement'],marker='.', alpha=0.5, linestyle='None')
axs[i].set_title('Sensor {0}'.format(sensors[i]))
axs[i].set_ylabel('Temperature in °C')
plt.ylim([15,30])
plt.savefig(graphsDirectory+"day_{0}_sensors_{1}.pdf".format(day,str(sensors).replace(' ','')))
def visSingleSensor(df,day,sensor):
# print("sensor {0} on day {1}".format(sensor,day))
print(day,'&',sensor,'&',len(df),'&',df['measurement'].max(),"&", df['measurement'].min(),"&",df['measurement'].mean(),'&',df['measurement'][0],'&',df['measurement'][-1])
plt.clf()
plt.figure(figsize=(10, 5))
plt.plot(df['measurement'],marker='.', alpha=0.5, linestyle='None')
plt.title('Temperature for sensor {0} on day {1}'.format(sensor,day))
plt.ylabel('Temperature in °C')
# plt.show()
plt.savefig(graphsDirectory+"day_{0}_sensor_{1}.pdf".format(day,sensor))
def createGraphsDayOne():
firstDate = '2017-02-28'
for sens in [sensors1,sensors24]:
sensorDfs = []
for i in sens:
df = pd.read_csv(dataDirectory + firstDate + '_sensor_{0}.csv'.format(i), dtype={"measurement": float, "voltage": float})
df['time'] = pd.to_datetime(df['time'])
df.set_index('time',inplace=True)
df.index = df.index.time
visSingleSensor(df,1,i)
sensorDfs.append(df)
visDay(sensorDfs,sens,1)
def anomaliesDayOne():
firstDate = '2017-02-28'
for i in [1,24]:
df = pd.read_csv(dataDirectory + firstDate + '_sensor_{0}.csv'.format(i),
dtype={"measurement": float, "voltage": float})
df['time'] = pd.to_datetime(df['time'])
df.set_index('time', inplace=True)
# df.index = df.index.time
groups = df.groupby(pd.Grouper(freq='60s'))
count = 0
for group in groups:
if len(group[1]) > 2:
count += len(group[1]) - 2
print(i,count,len(df)-count)
def createGraphsAllWeek():
for day in range(1,8,1):
date = '2017-03-0{0}'.format(day)
for sens in [sensors1, sensors24]:
sensorDfs = []
for sensor in sens:
df = pd.read_csv(dataDirectory + date + '_sensor_{0}.csv'.format(sensor),
dtype={"measurement": float, "voltage": float})
df['time'] = | pd.to_datetime(df['time']) | pandas.to_datetime |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from pandas.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lenlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sampleSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.append(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.append(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((len(X) - 1) / len(X)) # std factor corretion
mean_ = np.mean(X, 0)
scale_ = np.std(X, 0)
X = X - mean_
X = X / (scale_ * correction)
return X
def gof(self):
r2mean = np.mean(self.r2.T[self.endoexo()[0]].values)
AVEmean = self.AVE().copy()
totalblock = 0
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = len(block.columns.values)
totalblock += block
AVEmean[self.latent[i]] = AVEmean[self.latent[i]] * block
AVEmean = np.sum(AVEmean) / totalblock
return np.sqrt(AVEmean * r2mean)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.copy()
# comun_ = self.data.copy()
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(len(outer_), 1)
loadings = loadings.reshape(len(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = pd.DataFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = pd.concat([outer_residuals, inner_residuals], axis=1)
mean_ = np.mean(self.data, 0)
# comun_ = comun_.apply(lambda row: row + mean_, axis=1)
sumOuterResid = pd.DataFrame.sum(
pd.DataFrame.sum(outer_residuals**2))
sumInnerResid = pd.DataFrame.sum(
pd.DataFrame.sum(inner_residuals**2))
divFun = sumOuterResid + sumInnerResid
return residuals, outer_residuals, inner_residuals, divFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).mean())
return srmr
def implied(self):
corLVs = pd.DataFrame.cov(self.fscores)
implied_ = pd.DataFrame.dot(self.outer_loadings, corLVs)
implied = pd.DataFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(len(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return pd.DataFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = pd.DataFrame(0, index=range(1, 6), columns=manifests)
for i in range(len(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].value_counts()
frequencia = frequencia / len(data) * 100
frequencia = frequencia.reindex_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillna(0).T
frequencia = frequencia[(frequencia.T != 0).any()]
maximo = pd.DataFrame.max(pd.DataFrame.max(data, axis=0))
if int(maximo) & 1:
neg = np.sum(frequencia.ix[:, 1: ((maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((maximo + 1) / 2)]
pos = np.sum(
frequencia.ix[:, (((maximo + 1) / 2) + 1):maximo], axis=1)
else:
neg = np.sum(frequencia.ix[:, 1:((maximo) / 2)], axis=1)
ind = 0
pos = np.sum(frequencia.ix[:, (((maximo) / 2) + 1):maximo], axis=1)
frequencia['Neg.'] = pd.Series(
neg, index=frequencia.index)
frequencia['Ind.'] = pd.Series(
ind, index=frequencia.index)
frequencia['Pos.'] = pd.Series(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMmax = pd.DataFrame.max(SEM)
ok = None
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = pd.concat([block, SEM], axis=1)
for j in range(SEMmax + 1):
dataSEM = (block.loc[data_[segmento] == j]
).drop(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.rename(j + 1)
ok = dataSEM if ok is None else pd.concat(
[ok, dataSEM], axis=1)
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].dropna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.std(self.data, 0)
mean_ = np.mean(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(len(self.data.columns))]
return [mean_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lenlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(len(Beta))]
beta_ = np.diag(beta_)
beta = pd.DataFrame(beta, index=self.latent, columns=self.latent)
mid = pd.DataFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(len(exoVar)):
for i in range(len(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.copy()
beta_ = pd.DataFrame(1, index=np.arange(
len(exoVar)), columns=np.arange(len(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(len(self.path_matrix)))
beta = pd.DataFrame(beta)
partial_ = pd.DataFrame.dot(self.outer_weights, beta.T.values)
prediction = pd.DataFrame.dot(partial_, self.outer_loadings.T.values)
predicted = pd.DataFrame.dot(self.data, prediction)
predicted.columns = self.manifests
mean_ = np.mean(self.data, 0)
intercept = mean_ - np.dot(mean_, prediction)
predictedData = predicted.apply(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.sum(abs(loadings))**2
denominador = numerador + (p - np.sum(loadings ** 2))
cr = numerador / denominador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = len(self.data_)
r2 = self.r2.values
r2adjusted = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
p = sum(self.LVariables['target'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
mean = []
allBlocks = []
for i in range(self.lenlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
allBlocks.append(list(block_.values))
block = htmt_.ix[block_, block_]
mean_ = (block - np.diag(np.diag(block))).values
mean_[mean_ == 0] = np.nan
mean.append(np.nanmean(mean_))
comb = [[k, j] for k in range(self.lenlatent)
for j in range(self.lenlatent)]
comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
for i in range(self.lenlatent ** 2)]
comb__ = []
for i in range(self.lenlatent ** 2):
block = (htmt_.ix[allBlocks[comb[i][1]],
allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.append(np.nanmean(block))
htmt__ = np.divide(comb__, comb_)
where_are_NaNs = np.isnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = pd.DataFrame(np.tril(htmt__.reshape(
(self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
def fornell(self):
cor_ = pd.DataFrame.corr(self.fscores)**2
AVE = self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
for i in range(len(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = | pd.DataFrame.dot(S.T, S) | pandas.DataFrame.dot |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
# Store a second table with shared columns. All shared columns must be of the same type
# This may fail in the presence of empty partitions if the schema validation doesn't account for it
df_shared_cols = df_all_types.loc[:, df_all_types.columns[:3]]
df_shared_cols["different_col"] = "a"
assert df_empty.empty
df_list = [
{
"label": "cluster_1",
"data": [("tableA", df_empty), ("tableB", df_shared_cols.copy(deep=True))],
},
{
"label": "cluster_2",
"data": [
("tableA", df_all_types),
("tableB", df_shared_cols.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableA"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableA"], store=store
)
# Roundtrips for type date are not type preserving
df_stored["date"] = df_stored["date"].dt.date
pdt.assert_frame_equal(df_all_types, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [
[
{
"label": "cluster_1",
"data": [("core", df)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_1"] for v in values_p1}
)
},
},
{
"label": "cluster_2",
"data": [("core", df2)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_2"] for v in values_p2}
)
},
},
]
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df2, df_stored)
assert stored_dataset.indices["P"].to_dict() == {
1: np.array(["cluster_1"], dtype=object),
2: np.array(["cluster_1"], dtype=object),
3: np.array(["cluster_1"], dtype=object),
4: np.array(["cluster_2"], dtype=object),
5: np.array(["cluster_2"], dtype=object),
6: np.array(["cluster_2"], dtype=object),
}
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
{
"label": "cluster_2",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_list_input(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame(
{
"P": np.arange(100, 110),
"L": np.arange(100, 110),
"TARGET": np.arange(10, 20),
}
)
df_list = [df, df2]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store_factory())
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame({"P": np.arange(0, 10), "info": np.arange(100, 110)})
mp = MetaPartition(
label=gen_uuid(),
data={"core": df, "helper": df2},
metadata_version=metadata_version,
)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [
{
"label": "label",
"data": [("order_proposals", df.head(0))],
"indices": {"location": {}},
},
{
"label": "label",
"data": [("order_proposals", df)],
"indices": {"location": {k: ["label"] for k in df["location"].unique()}},
},
]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int16),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int16),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": | pd.Series([2], dtype=np.int32) | pandas.Series |
#scikit learn ensemble workflow for binary probability
import time; start_time = time.time()
import numpy as np
import pandas as pd
from sklearn import ensemble
import xgboost as xgb
from sklearn.metrics import log_loss, make_scorer
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import train_test_split
import random; random.seed(2016)
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
num_train = train.shape[0]
y_train = train['target']
train = train.drop(['target'],axis=1)
id_test = test['ID']
def fill_nan_null(val):
ret_fill_nan_null = 0.0
if val == True:
ret_fill_nan_null = 1.0
return ret_fill_nan_null
df_all = pd.concat((train, test), axis=0, ignore_index=True)
df_all['null_count'] = df_all.isnull().sum(axis=1).tolist()
df_all_temp = df_all['ID']
df_all = df_all.drop(['ID'],axis=1)
df_data_types = df_all.dtypes[:] #{'object':0,'int64':0,'float64':0,'datetime64':0}
d_col_drops = []
for i in range(len(df_data_types)):
df_all[str(df_data_types.index[i])+'_nan_'] = df_all[str(df_data_types.index[i])].map(lambda x:fill_nan_null( | pd.isnull(x) | pandas.isnull |
import os
import requests
import pandas as pd
from random import randint
from django.db.models import Q
from .models import Account
api_key = os.environ.get('IEX_API_KEYS')
TEST_OR_PROD = 'cloud'
def make_position_request(tickers):
data = []
for x in tickers:
response = requests.get("https://{}.iexapis.com/stable/stock/{}/quote?displayPercent=true&token={}".format(TEST_OR_PROD, x, api_key)).json()
data.append(response)
df = | pd.DataFrame(data) | pandas.DataFrame |
import ast
import importlib
import re
from inspect import isclass
from mimetypes import add_type, guess_type
import numpy as np
import pandas as pd
import woodwork as ww
from woodwork.pandas_backport import guess_datetime_format
# Dictionary mapping formats/content types to the appropriate pandas read function
type_to_read_func_map = {
"csv": pd.read_csv,
"text/csv": pd.read_csv,
"parquet": pd.read_parquet,
"application/parquet": pd.read_parquet,
"arrow": pd.read_feather,
"application/arrow": pd.read_feather,
"feather": pd.read_feather,
"application/feather": pd.read_feather,
"orc": pd.read_orc,
"application/orc": pd.read_orc,
}
PYARROW_ERR_MSG = (
"The pyarrow library is required to read from parquet/arrow/feather files.\n"
"Install via pip:\n"
" pip install 'pyarrow>=3.0.0'\n"
"Install via conda:\n"
" conda install 'pyarrow>=3.0.0'"
)
# Add new mimetypes
add_type("application/parquet", ".parquet")
add_type("application/arrow", ".arrow")
add_type("application/feather", ".feather")
add_type("application/orc", ".orc")
def import_or_none(library):
"""Attempts to import the requested library.
Args:
library (str): the name of the library
Returns: the library if it is installed, else None
"""
try:
return importlib.import_module(library)
except ImportError:
return None
def camel_to_snake(s):
s = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", s)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s).lower()
def _convert_input_to_set(semantic_tags, error_language="semantic_tags", validate=True):
"""Takes input as a single string, a list of strings, or a set of strings
and returns a set with the supplied values. If no values are supplied,
an empty set will be returned."""
if not semantic_tags:
return set()
if validate:
_validate_tags_input_type(semantic_tags, error_language)
if isinstance(semantic_tags, str):
return {semantic_tags}
if isinstance(semantic_tags, list):
semantic_tags = set(semantic_tags)
if validate:
_validate_string_tags(semantic_tags, error_language)
return semantic_tags
def _validate_tags_input_type(semantic_tags, error_language):
if type(semantic_tags) not in [list, set, str]:
raise TypeError(f"{error_language} must be a string, set or list")
def _validate_string_tags(semantic_tags, error_language):
if not all([isinstance(tag, str) for tag in semantic_tags]):
raise TypeError(f"{error_language} must contain only strings")
def read_file(
filepath=None,
content_type=None,
name=None,
index=None,
time_index=None,
semantic_tags=None,
logical_types=None,
use_standard_tags=True,
column_origins=None,
replace_nan=False,
validate=True,
**kwargs,
):
"""Read data from the specified file and return a DataFrame with initialized Woodwork typing information.
Note:
As the engine `fastparquet` cannot handle nullable pandas dtypes, `pyarrow` will be used
for reading from parquet and arrow.
Args:
filepath (str): A valid string path to the file to read
content_type (str): Content type of file to read
name (str, optional): Name used to identify the DataFrame.
index (str, optional): Name of the index column.
time_index (str, optional): Name of the time index column.
semantic_tags (dict, optional): Dictionary mapping column names in the dataframe to the
semantic tags for the column. The keys in the dictionary should be strings
that correspond to columns in the underlying dataframe. There are two options for
specifying the dictionary values:
(str): If only one semantic tag is being set, a single string can be used as a value.
(list[str] or set[str]): If multiple tags are being set, a list or set of strings can be
used as the value.
Semantic tags will be set to an empty set for any column not included in the
dictionary.
logical_types (dict[str -> LogicalType], optional): Dictionary mapping column names in
the dataframe to the LogicalType for the column. LogicalTypes will be inferred
for any columns not present in the dictionary.
use_standard_tags (bool, optional): If True, will add standard semantic tags to columns based
on the inferred or specified logical type for the column. Defaults to True.
column_origins (str or dict[str -> str], optional): Origin of each column. If a string is supplied, it is
used as the origin for all columns. A dictionary can be used to set origins for individual columns.
replace_nan (bool, optional): Whether to replace empty string values and string representations of
NaN values ("nan", "<NA>") with np.nan or pd.NA values based on column dtype. Defaults to False.
validate (bool, optional): Whether parameter and data validation should occur. Defaults to True. Warning:
Should be set to False only when parameters and data are known to be valid.
Any errors resulting from skipping validation with invalid inputs may not be easily understood.
**kwargs: Additional keyword arguments to pass to the underlying pandas read file function. For more
information on available keywords refer to the pandas documentation.
Returns:
pd.DataFrame: DataFrame created from the specified file with Woodwork typing information initialized.
"""
if content_type is None:
inferred_type, _ = guess_type(filepath)
if inferred_type is None:
raise RuntimeError(
"Content type could not be inferred. Please specify content_type and try again."
)
content_type = inferred_type
if content_type not in type_to_read_func_map:
raise RuntimeError(
"Reading from content type {} is not currently supported".format(
content_type
)
)
pyarrow_types = [
"parquet",
"application/parquet",
"arrow",
"application/arrow",
"feather",
"application/feather",
"orc",
"application/orc",
]
if content_type in pyarrow_types:
import_or_raise("pyarrow", PYARROW_ERR_MSG)
if content_type in ["parquet", "application/parquet"]:
kwargs["engine"] = "pyarrow"
dataframe = type_to_read_func_map[content_type](filepath, **kwargs)
if replace_nan:
dataframe = _replace_nan_strings(dataframe)
dataframe.ww.init(
name=name,
index=index,
time_index=time_index,
semantic_tags=semantic_tags,
logical_types=logical_types,
use_standard_tags=use_standard_tags,
column_origins=column_origins,
validate=validate,
)
return dataframe
def import_or_raise(library, error_msg):
"""Attempts to import the requested library. If the import fails, raises an
ImportError with the supplied error message.
Args:
library (str): the name of the library
error_msg (str): error message to return if the import fails
"""
try:
return importlib.import_module(library)
except ImportError:
raise ImportError(error_msg)
def _is_s3(string):
"""Checks if the given string is a s3 path. Returns a boolean."""
return "s3://" in string
def _is_url(string):
"""Checks if the given string is an url path. Returns a boolean."""
return "http" in string
def _reformat_to_latlong(latlong, use_list=False):
"""Reformats LatLong columns to be tuples of floats. Uses np.nan for null values."""
if _is_null_latlong(latlong):
return np.nan
if isinstance(latlong, str):
try:
# Serialized latlong columns from csv or parquet will be strings, so null values will be
# read as the string 'nan' in pandas and Dask and 'NaN' in Koalas
# neither of which which is interpretable as a null value
if "nan" in latlong:
latlong = latlong.replace("nan", "None")
if "NaN" in latlong:
latlong = latlong.replace("NaN", "None")
latlong = ast.literal_eval(latlong)
except ValueError:
pass
if isinstance(latlong, (tuple, list)):
if len(latlong) != 2:
raise ValueError(
f"LatLong values must have exactly two values. {latlong} does not have two values."
)
latitude, longitude = map(_to_latlong_float, latlong)
# (np.nan, np.nan) should be counted as a single null value
if | pd.isnull(latitude) | pandas.isnull |
from hashlib import sha256
import time
import random
import uuid
import pandas as pd
import logging
import os
import gnupg
from tempfile import TemporaryDirectory
from datetime import datetime
CSV_SEPARATOR = ";"
PAN_UNENROLLED_PREFIX = "pan_unknown_"
SECONDS_IN_DAY = 86400
MAX_DAYS_BACK = 3
TRANSACTION_FILE_EXTENSION = ".csv"
ENCRYPTED_FILE_EXTENSION = ".pgp"
APPLICATION_PREFIX_FILE_NAME = "CSTAR"
TRANSACTION_LOG_FIXED_SEGMENT = "TRNLOG"
CHECKSUM_PREFIX = "#sha256sum:"
PAYMENT_REVERSAL_RATIO = 100
POS_PHYSICAL_ECOMMERCE_RATIO = 5
PERSON_NATURAL_LEGAL_RATIO = 3
PAR_RATIO = 7
ACQUIRER_CODE = "99999"
CURRENCY_ISO4217 = "978"
PAYMENT_CIRCUITS = [f"{i:02}" for i in range(11)]
OFFSETS = [
".000Z",
".000+01:00",
".000+0200",
".500+01:30"
]
ACQUIRER_ID = "09509"
MERCHANT_ID = "400000080205"
TERMINAL_ID = "80205005"
BIN = "40236010"
MCC = "4900"
FISCAL_CODE = "RSSMRA80A01H501U"
VAT = "12345678903"
class Transactionfilter:
"""Utilities related to the rtd-ms-transaction-filter service, a.k.a. Batch Acquirer"""
def __init__(self, args):
self.args = args
def synthetic_hashpans(self):
"""Produces a synthetic version of the CSV file obtainable from the RTD /hashed-pans endpoint
Parameters:
--pans-prefix: synthetic PANs will be generated as "{PREFIX}{NUMBER}"
--haspans-qty: the number of hashpans to generate
--salt: the salt to use when performing PAN hashing
"""
if not self.args.pans_prefix:
raise ValueError("--pans-prefix is mandatory")
if not self.args.hashpans_qty:
raise ValueError("--hashpans-qty is mandatory")
if not self.args.salt:
raise ValueError("--salt is mandatory")
synthetic_pans = [
f"{self.args.pans_prefix}{i}" for i in range(self.args.hashpans_qty)
]
hpans = [
sha256(f"{pan}{self.args.salt}".encode()).hexdigest()
for pan in synthetic_pans
]
hashpans_df = | pd.DataFrame(hpans, columns=["hashed_pan"]) | pandas.DataFrame |
import re
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
class TestCategoricalAnalytics:
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_not_ordered_raises(self, aggregation):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = f"Categorical is not ordered for operation {aggregation}"
agg_func = getattr(cat, aggregation)
with pytest.raises(TypeError, match=msg):
agg_func()
def test_min_max_ordered(self):
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
@pytest.mark.parametrize(
"categories,expected",
[
(list("ABC"), np.NaN),
([1, 2, 3], np.NaN),
pytest.param(
Series(date_range("2020-01-01", periods=3), dtype="category"),
NaT,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/29962"
),
),
],
)
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_ordered_empty(self, categories, expected, aggregation):
# GH 30227
cat = Categorical([], categories=categories, ordered=True)
agg_func = getattr(cat, aggregation)
result = agg_func()
assert result is expected
@pytest.mark.parametrize(
"values, categories",
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_with_nan(self, values, categories, function, skipna):
# GH 25303
cat = Categorical(values, categories=categories, ordered=True)
result = getattr(cat, function)(skipna=skipna)
if skipna is False:
assert result is np.nan
else:
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_only_nan(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("method", ["min", "max"])
def test_deprecate_numeric_only_min_max(self, method):
# GH 25303
cat = Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
with tm.assert_produces_warning(expected_warning=FutureWarning):
getattr(cat, method)(numeric_only=True)
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_raises(self, method):
cat = Categorical(["a", "b", "c", "b"], ordered=False)
msg = (
f"Categorical is not ordered for operation {method}\n"
"you can use .as_ordered() to change the Categorical to an ordered one"
)
method = getattr(np, method)
with pytest.raises(TypeError, match=re.escape(msg)):
method(cat)
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
msg = (
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
if kwarg == "axis":
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
method(cat, **kwargs)
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
def test_numpy_min_max_axis_equals_none(self, method, expected):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
method = getattr(np, method)
result = method(cat, axis=None)
assert result == expected
@pytest.mark.parametrize(
"values,categories,exp_mode",
[
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
],
)
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
ordered=ordered,
)
ser = Series(cat)
# Searching for single item argument, side='left' (default)
res_cat = cat.searchsorted("apple")
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = ser.searchsorted("apple")
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = cat.searchsorted(["bread"])
res_ser = ser.searchsorted(["bread"])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = cat.searchsorted(["apple", "bread"], side="right")
res_ser = ser.searchsorted(["apple", "bread"], side="right")
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted("cucumber")
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted("cucumber")
# Searching for multiple values one of each is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted(["bread", "cucumber"])
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted(["bread", "cucumber"])
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=["c", "a", "b"])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(["b", "a", "b"], categories=["a", "b"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["c", "b", "a", "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["c", "b", "a"], categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(["b", "a", "a"], categories=["a", "b", "c"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["b", "b", np.nan, "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["b", np.nan, "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), | Index(exp) | pandas.Index |
"""
This module does some post-processing of the stats results
and writes out the results to file
ResultsWriter uis subclassed for each data type.
Currently just the *_write* method is overridden in subclasses
which take into account the differences in output between voxel based data (3D volume) and organ volume results (CSV file)
"""
from pathlib import Path
from typing import Tuple
import logzero
from logzero import logger as logging
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from lama.common import write_array
from lama.stats.standard_stats.stats_objects import Stats
MINMAX_TSCORE = 50
FDR_CUTOFF = 0.05
# 041219
# The stats files lose the header information and are written with an incorrect lps header wothout flippin gthe spaces
# For now override this behaviour by adding RAS header. But this assumes the input is in RAS. Fix ASAP
class ResultsWriter:
def __init__(self,
results: Stats,
mask: np.ndarray,
out_dir: Path,
stats_name: str,
label_map: np.ndarray,
label_info_path: Path):
"""
TODO: map organ names back onto results
Parameters
----------
results
The object containing all the stats results
mask
Mask. Not needed for organ volumes
out_dir
The root directory to create a subdirectory in to store output.
stats_name
The name of the type of analysis (eg intensity)
label_map
for creating filtered labelmap overlays
label_info_path
Label map information
Returns
-------
"""
self.label_info_path = label_info_path
self.label_map = label_map
self.out_dir = out_dir
self.results = results
self.mask = mask
self.shape = results.input_.shape
self.stats_name = stats_name
self.line = results.input_.line
# Write out the line-level results
line_tstats = results.line_tstats
line_qvals = results.line_qvals
line_pvals = results.line_pvalues # Need to get thse into organ volumes
line_threshold_file = self.out_dir / f'Qvals_{stats_name}_{self.line}.csv'
write_threshold_file(line_qvals, line_tstats, line_threshold_file)
# this is for the lama_stats to know where the heatmaps for inversion are
self.line_heatmap = self._write(line_tstats, line_pvals, line_qvals, self.out_dir, self.line) # Bodge. Change!
# 140620: I think this is where it's dying
print('#finished Writing line results')
# pvalue_fdr_plot(results.line_pvalues, results.line_qvals, out_dir)
specimen_out_dir = out_dir / 'specimen-level'
specimen_out_dir.mkdir(exist_ok=True)
# For specimen-level results
for spec_id, spec_res in results.specimen_results.items():
spec_threshold_file = specimen_out_dir / f'Qvals_{stats_name}_{spec_id}.csv'
spec_t = spec_res['t']
spec_q = spec_res['q']
spec_p = spec_res['p']
write_threshold_file(spec_q, spec_t, spec_threshold_file)
self._write(spec_t, spec_p, spec_q, specimen_out_dir, spec_id)
# self.log(self.out_dir, 'Organ_volume stats', results.input_)
@staticmethod
def factory(data_type):
return {'jacobians': VoxelWriter,
'intensity': VoxelWriter,
'organ_volumes': OrganVolumeWriter
}[data_type]
def _write(self):
"""
Write the results to file
"""
raise NotImplementedError
class VoxelWriter(ResultsWriter):
def __init__(self, *args):
"""
Write the line and specimen-level results.
Remove any Nans
Threshold the t-statstistics based on q-value
Write nrrds to file.
Parameters
----------
results
out_dir
The root directory to put the results in
stats_name
An the stats type
label_info:
Not currently used
"""
self.line_heatmap = None
super().__init__(*args)
def _write(self, t_stats, pvals, qvals, outdir, name):
filtered_tstats = result_cutoff_filter(t_stats, qvals)
filtered_result = self.rebuild_array(filtered_tstats, self.shape, self.mask)
unfiltered_result = self.rebuild_array(t_stats, self.shape, self.mask)
heatmap_path = outdir / f'{name}_{self.stats_name}_t_fdr5.nrrd'
heatmap_path_unfiltered = outdir / f'{name}_{self.stats_name}_t.nrrd'
# Write qval-filtered t-stats
write_array(filtered_result, heatmap_path, ras=True)
# Write raw t-stats
write_array(unfiltered_result, heatmap_path_unfiltered, ras=True)
return heatmap_path
@staticmethod
def rebuild_array(array: np.ndarray, shape: Tuple, mask: np.ndarray) -> np.ndarray:
"""
The stats pipeline uses masked data throughout to save on resources
This function rebuilds the output files to the orginal 3D sizes of the input volumes
Parameters
----------
array
1d masked array to rebuild
shape
shape of input volume
mask
3D mask
Returns
-------
3d rebuilt array
"""
array[array > MINMAX_TSCORE] = MINMAX_TSCORE
array[array < -MINMAX_TSCORE] = - MINMAX_TSCORE
full_output = np.zeros(shape)
full_output[mask != False] = array
return full_output.reshape(shape)
class OrganVolumeWriter(ResultsWriter):
def __init__(self, *args):
super().__init__(*args)
self.line_heatmap = None
# Expose the results for clustering
self.organ_volume_results: pd.DataFrame = None#????
def _write(self, t_stats, pvals, qvals, out_dir, name):
# write_csv(self.line_tstats, self.line_qvals, line_out_path, list(results.input_.data.columns), label_info)
out_path = out_dir / f'{name}_{self.stats_name}.csv'
df = pd.DataFrame.from_dict(dict(t=t_stats, p=pvals, q=qvals))
label_info = pd.read_csv(self.label_info_path)
# Merge the results from each label to the label info
# The labels are stored in the InputData
labels = list(self.results.input_.data.columns)
df.index = labels
df.index = df.index.astype(np.int64)
df = df.merge(right=label_info, right_on='label', left_index=True)
df['significant_bh_q_5'] = df['q'] < 0.05
df.sort_values(by='q', inplace=True)
df.to_csv(out_path)
hit_labels = df[df['significant_bh_q_5'] == True]['label']
thresh_labels_out = out_dir / f'{name}_hit_organs.nrrd'
# self._write_thresholded_label_map(self.label_map, hit_labels, thresh_labels_out)
def _write_thresholded_label_map(self, label_map: np.ndarray, hits, out: Path):
"""
Write a label map with only the 'hit' organs in it
"""
if len(hits) > 0:
# Make a copy as it may be being used elsewhere
l = np.copy(label_map)
# Clear any non-hits
l[~np.isin(l, hits)] = 0
write_array(l, out, ras=True)
def result_cutoff_filter(t: np.ndarray, q: np.ndarray) -> np.ndarray:
"""
Convert to numpy arrays and set to zero any tscore that has a corresponding pvalue > 0.05
Parameters
----------
"""
if len(t) != len(q):
raise ValueError
else:
mask = q > FDR_CUTOFF
masked = np.copy(t)
masked[mask] = 0
return masked
def pvalue_fdr_plot(pvals, qvals, outdir: Path):
"""
Write out a fdr correction plot.
Got the idea from: https://www.unc.edu/courses/2007spring/biol/145/001/docs/lectures/Nov12.html
"""
# Make p-value plot
line_fdr_fig = outdir / 'fdr_correction.png'
# debug.
df = | pd.DataFrame.from_dict({'p': pvals, 'q': qvals}) | pandas.DataFrame.from_dict |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
"""
Test indicators.py functions for common indicators to be extracted from an OHLC dataset
Author: <NAME>
"""
import unittest
import indicators
import pandas as pd
class TestIndicators(unittest.TestCase):
def test_checkGreenCandle(self):
candleGreen = {"Open": 1.2, "Close": 1.5}
candleRed = {"Open": 3.4, "Close": 2}
self.assertEqual(indicators.checkGreenCandle(candleGreen),True)
self.assertEqual(indicators.checkGreenCandle(candleRed),False)
def test_checkEngulfingCandleOverPeriod(self):
candleSet = []
candleSet.append({"Open": 1, "Close": 2})
candleSet.append({"Open": 3, "Close": 0.5})
candleSet = pd.DataFrame(candleSet)
self.assertEqual(indicators.checkEngulfingCandleOverPeriod(candleSet), [0,-1])
candleSet = []
candleSet.append({"Open": 5, "Close": 4})
candleSet.append({"Open": 3, "Close": 6})
candleSet = | pd.DataFrame(candleSet) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 12:31:33 2017
@author: Astrid
"""
import os
import pandas as pd
import numpy as np
from collections import Counter
import re
import multiprocessing
def getFileList(dir_name, ext=''):
file_dir_list = list()
file_list = list()
for file in os.listdir(dir_name):
# If no extension is specified, create list with all files
if not ext:
file_dir_list.append(os.path.join(dir_name, file))
file_list.append(file)
# If extension is specified, create list with only ext files
elif file.endswith(ext):
file_dir_list.append(os.path.join(dir_name, file))
file_list.append(file)
return file_list, file_dir_list
def string2vec(string):
vec = []
for t in string.split():
try:
vec.append(float(t))
except ValueError:
pass
return vec
def readDPJ(filename):
#Read .dpj file line by line
file_obj = open(filename, 'r', encoding='utf8')
file = file_obj.readlines()
file_obj.close()
del file_obj
# Search in file for lines that need to be changes, save those lines in a dataframe
# Create an array with the x-discretisation grid, an array with the y-discretisation grid and an array with the assignments
x_discretisation = list()
y_discretisation = list()
assignments = pd.DataFrame(columns=['line','type','range','name'])
parameters = pd.DataFrame(columns = ['line','parameter'])
l=23 #start looking on 24th line
# INITIALISATION SETTINGS
# Find start year and start time
while l < len(file):
if 'START_YEAR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'start year'},ignore_index=True)
parameters = parameters.append({'line': l+1,'parameter':'start time'},ignore_index=True)
l=l+4;
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: start year and start time not found')
l=l+1
# MATERIAL PARAMETERS
k=l
# Find air layer properties - included only when using an air layer to implement an interior climate dependent on V, n, HIR and exterior climate
while l < len(file):
if 'air room' in file[l].lower():
while file[l].strip() != '[MATERIAL]' and '; **' not in file[l]:
if 'CE' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air thermal capacity'},ignore_index=True)
l=l+1
continue
elif 'THETA_POR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_por'},ignore_index=True)
l=l+1
continue
elif 'THETA_EFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_eff'},ignore_index=True)
l=l+1
continue
elif 'THETA_80' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air theta_80'},ignore_index=True)
l=l+1
continue
elif 'Theta_l(RH)' in file[l]:
parameters = parameters.append({'line': l,'parameter':'room air sorption curve'},ignore_index=True)
l=l+1
continue
l=l+1
l=l+5
break
# If the parameter is not found at the end of the file, there is no air layer. We must start looking for the next parameter from the same begin line, so we don't skip part of the file.
elif l == len(file)-2:
l=k
break
l=l+1
# WALLS
# Find wall conditions
while l < len(file):
if '[WALL_DATA]' in file[l]:
parameters = parameters.append({'line': l+2,'parameter':'wall orientation'},ignore_index=True)
parameters = parameters.append({'line': l+3,'parameter':'wall inclination'},ignore_index=True)
parameters = parameters.append({'line': l+4,'parameter':'latitude'},ignore_index=True)
l=l+9
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: wall orientation and inclination not found')
l=l+1
# CLIMATE CONDITIONS
while l < len(file):
if '[CLIMATE_CONDITIONS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: climate conditions section not found')
l=l+1
# Find climatic conditions
# Interior temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'TEMPER' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior temperature'},ignore_index=True)
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: interior temperature not found')
l=l+1
# Exterior temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'TEMPER' in file[l] and 'outside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'exterior temperature'},ignore_index=True)
break
# If the parameter is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: interior temperature not found')
l=l+1
# Interior relative humidity
l=k # start at beginning of climate conditions
while l < len(file):
if 'RELHUM' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior relative humidity'},ignore_index=True)
break
l=l+1
# Exterior relative humidity
l=k # start at beginning of climate conditions
while l < len(file):
if 'RELHUM' in file[l] and 'outside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'exterior relative humidity'},ignore_index=True)
break
l=l+1
# Interior vapour pressure
l=k # start at beginning of climate conditions
while l < len(file):
if 'VAPPRES' in file[l] and 'inside' in file[l+1].lower():
parameters = parameters.append({'line': l+3,'parameter':'interior vapour pressure'},ignore_index=True)
break
l=l+1
# Rain load - imposed flux on vertical surface
l=k # start at beginning of climate conditions
while l < len(file):
if 'NORRAIN' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'rain vertical surface'},ignore_index=True)
break
l=l+1
# Rain load - flux on horizontal surface
l=k # start at beginning of climate conditions
while l < len(file):
if 'HORRAIN' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'rain horizontal surface'},ignore_index=True)
break
l=l+1
# Wind direction
l=k # start at beginning of climate conditions
while l < len(file):
if 'WINDDIR' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'wind direction'},ignore_index=True)
break
l=l+1
# Wind velocity
l=k # start at beginning of climate conditions
while l < len(file):
if 'WINDVEL' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'wind velocity'},ignore_index=True)
break
l=l+1
# Direct sun radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'DIRRAD' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'direct radiation'},ignore_index=True)
break
l=l+1
# Diffuse sun radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'DIFRAD' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'diffuse radiation'},ignore_index=True)
break
l=l+1
# Cloud covering
l=k # start at beginning of climate conditions
while l < len(file):
if 'CLOUDCOV' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'cloud cover'},ignore_index=True)
break
l=l+1
# Sky radiation
l=k # start at beginning of climate conditions
while l < len(file):
if 'SKYEMISS' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'sky radiation'},ignore_index=True)
break
l=l+1
# Sky temperature
l=k # start at beginning of climate conditions
while l < len(file):
if 'SKYTEMP' in file[l]:
parameters = parameters.append({'line': l+3,'parameter':'sky temperature'},ignore_index=True)
break
l=l+1
# BOUNDARY CONDITIONS
while l < len(file):
if '[BOUNDARY_CONDITIONS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: boundary conditions section not found')
l=l+1
# Find exterior heat transfer coefficient
l=k; # start at beginning of boundary conditions
while l < len(file):
if 'HEATCOND' in file[l] and 'outside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'exterior heat transfer coefficient'},ignore_index=True)
if 'EXCH_SLOPE' in file[l+1].strip():
l=l+1
parameters = parameters.append({'line': l,'parameter':'exterior heat transfer coefficient slope'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find interior vapour surface resistance coefficient
l=k # start at beginning of boundary conditions
while l < len(file):
if 'VAPDIFF' in file[l] and 'inside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'interior vapour diffusion transfer coefficient'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find exterior vapour surface resistance coefficient
l=k # start at beginning of boundary conditions
while l < len(file):
if 'VAPDIFF' in file[l] and 'outside' in file[l+1].lower():
while file[l].strip() != '[BOUND_COND]':
if 'EXCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'exterior vapour diffusion transfer coefficient'},ignore_index=True)
if 'EXCH_SLOPE' in file[l+1].strip():
l=l+1
parameters = parameters.append({'line': l,'parameter':'exterior vapour diffusion transfer coefficient slope'},ignore_index=True)
break
l=l+1
break
l=l+1
# Find solar absorption
l=k #start at beginning of boundary conditions
while l < len(file):
if 'SURABSOR' in file[l]:
parameters = parameters.append({'line': l,'parameter':'solar absorption'},ignore_index=True)
break
l=l+1
# Find scale factor catch ratio
l=k #start at beginning of boundary conditions
while l < len(file):
if 'EXPCOEFF' in file[l]:
parameters = parameters.append({'line': l,'parameter':'scale factor catch ratio'},ignore_index=True)
break
l=l+1
# DISCRETISATION
while l < len(file):
if '[DISCRETISATION]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: discretisation section not found')
l=l+1
# Find discretisation
l=k #start at beginning of discretisation
while l < len(file):
if '[DISCRETISATION]' in file[l]:
x_discr_str = file[l+3]
parameters = parameters.append({'line': l+3,'parameter':'x-discretisation'},ignore_index=True)
y_discr_str = file[l+4]
parameters = parameters.append({'line': l+4,'parameter':'y-discretisation'},ignore_index=True)
# remove characters and convert to vector
x_discretisation = string2vec(x_discr_str)
y_discretisation = string2vec(y_discr_str)
break
# If the discretisation is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: discretisation not found')
l=l+1
# %OUTPUTS
while l < len(file):
if '[OUTPUTS]' in file[l]:
k=l
break
elif l == len(file)-1:
print('Error: outputs section not found')
l=l+1
# Find output folder
l=k # start at beginning of outputs
while l < len(file):
if 'OUTPUT_FOLDER' in file[l]:
parameters = parameters.append({'line': l,'parameter':'output folder'},ignore_index=True)
break
#If the output folder is not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: output folder not found')
l=l+1
# Find output files
while l < len(file):
if '[FILES]' in file[l]:
l=l+3
while '; **' not in file[l]:
if 'NAME' in file[l]:
output_file = file[l]
parameters = parameters.append({'line': l,'parameter':output_file[33:]},ignore_index=True)
l=l+5
continue
l=l+1
break
# If the output files are not found at the end of the file, there is a problem in the code
elif l == len(file)-1:
print('Error: output files not found')
l=l+1
# ASSIGNMENTS
while l < len(file):
if '[ASSIGNMENTS]' in file[l]:
k=l
break
elif l == len(file):
print('Error: assignments section not found')
l=l+1
# Find assignments
l=k # start at beginning of assignments
while l < len(file):
if 'RANGE' in file[l]:
assignments = assignments.append({'line': l, 'type': file[l-1][30:-1].strip(),'range': [int(i) for i in string2vec(file[l])],'name': file[l+1][30:-1].strip()},ignore_index=True)
l=l+4
continue
l=l+1
#If the discretisation is not found at the end of the file, there is a problem in the code
if assignments.empty:
print('Error: assignments not found')
return file, x_discretisation, y_discretisation, assignments, parameters
def readccd(ccdfile, date=False):
# Find header
with open(ccdfile, 'r') as f:
l = 0
for line in f:
if '0:00:00' in line:
header = l
break
l = l+1
# Read ccd
value = np.loadtxt(ccdfile,skiprows=header,usecols=2,dtype='f').tolist()
if date:
day = np.loadtxt(ccdfile,skiprows=header,usecols=0,dtype='i').tolist()
hour = np.loadtxt(ccdfile,skiprows=header,usecols=1,dtype='U').tolist()
return value, day, hour
else:
return value
def saveccd(path, value):
days = int(len(value)/24)
df = pd.DataFrame()
df['day'] = np.repeat(list(range(days)),24).tolist()
df['hour'] = ['%02d:00:00' % x for x in range(24)]*days
df['value'] = value
climateparam = re.sub('[0-9_]', '', os.path.basename(path)[:-4])
df.to_csv(path, header=[headerccd(climateparam),'',''], sep=' ', index=False, quotechar=' ')
def headerccd(climateparam):
param_header = pd.DataFrame([{'parameter':'CloudCover', 'header': 'CLOUDCOV ---'},
{'parameter':'DiffuseRadiation', 'header': 'DIFRAD W/m2'},
{'parameter':'DirectRadiation', 'header': 'DIRRAD W/m2'},
{'parameter':'ShortWaveRadiation', 'header': 'SHWRAD W/m2'},
{'parameter':'GlobalRadiation', 'header': 'SKYEMISS W/m2'},
{'parameter':'RelativeHumidity', 'header': 'RELHUM %'},
{'parameter':'VapourPressure', 'header': 'VAPPRES Pa'},
{'parameter':'SkyRadiation', 'header': 'SKYEMISS W/m2'},
{'parameter':'Temperature', 'header': 'TEMPER C'},
{'parameter':'VerticalRain', 'header': 'NORRAIN l/m2h'},
{'parameter':'HorizontalRain', 'header': 'HORRAIN l/m2h'},
{'parameter':'WindDirection', 'header': 'WINDDIR Deg'},
{'parameter':'WindVelocity', 'header': 'WINDVEL m/s'},
])
header = param_header.loc[param_header['parameter'] == climateparam,'header'].tolist()[0]
if len(header) == 0:
print('Error: coud not find climate parameter header')
return header
def marker(param):
param_mark = pd.DataFrame([{'parameter': 'wall orientation','marker': 'Deg'},
{'parameter': 'wall inclination', 'marker': 'Deg'},
{'parameter': 'interior temperature', 'marker': 'C'},
{'parameter': 'exterior temperature', 'marker': 'C'},
{'parameter': 'exterior heat transfer coefficient', 'marker': 'W/m2K'},
{'parameter': 'exterior heat transfer coefficient slope', 'marker': 'J/m3K'},
{'parameter': 'interior relative humidity', 'marker': '%'},
{'parameter': 'exterior relative humidity', 'marker': '%'},
{'parameter': 'interior vapour pressure', 'marker': 'Pa'},
{'parameter': 'exterior vapour pressure', 'marker': 'Pa'},
{'parameter': 'interior vapour diffusion transfer coefficient', 'marker': 's/m'},
{'parameter': 'exterior vapour diffusion transfer coefficient', 'marker': 's/m'},
{'parameter': 'exterior vapour diffusion transfer coefficient slope', 'marker': 's2/m2'},
{'parameter': 'solar absorption', 'marker': '-'},
{'parameter': 'scale factor catch ratio', 'marker': '-'},
{'parameter': 'output folder', 'marker': ''},
{'parameter': 'x-discretisation', 'marker': 'm'},
{'parameter': 'y-discretisation', 'marker': 'm'},
{'parameter': 'start year', 'marker': ''},
{'parameter': 'start time', 'marker': ''}
])
mark = param_mark.loc[param_mark['parameter'] == param,'marker'].tolist()[0]
if len(mark) == 0:
print('Error: coud not find parameter marker')
return mark
def nameccd(climateparam):
param_name = pd.DataFrame([{'parameter': 'cloud cover','name':'CloudCover'},
{'parameter': 'diffuse radiation','name':'DiffuseRadiation'},
{'parameter': 'direct radiation','name':'DirectRadiation'},
{'parameter': 'sky radiation','name':'SkyRadiation'},
{'parameter': 'interior relative humidity','name':'RelativeHumidity'},
{'parameter': 'exterior relative humidity','name':'RelativeHumidity'},
{'parameter': 'interior vapour pressure','name':'VapourPressure'},
{'parameter': 'sky radiation','name':'SkyRadiation'},
{'parameter': 'interior temperature','name':'Temperature'},
{'parameter': 'exterior temperature','name':'Temperature'},
{'parameter': 'rain vertical surface','name':'VerticalRain'},
{'parameter': 'rain horizontal surface','name':'HorizontalRain'},
{'parameter': 'wind direction','name':'WindDirection'},
{'parameter': 'wind velocity','name':'WindVelocity'},
])
name = param_name.loc[param_name['parameter'] == climateparam,'name'].tolist()[0]
if len(name) == 0:
print('Error: coud not find climate parameter name')
return name
def readData(args):
n, files_num, delete = args
output_fn, geometry_fn, elements_fn = dict(), dict(), dict()
geom_x, geom_y = None, None
for file in files_num:
p = re.sub('[0-9_-]', '', file.split('\\')[-1][:-4])
with open(file, 'r', encoding='utf8') as f:
l = 0
for line in f:
# Find geometry line
if 'TABLE GRID' in line:
geom_x = string2vec(f.readline())
geom_y = string2vec(f.readline())
l += 2
# Find output start line
if 'ELEMENTS' in line or 'SIDES 'in line:
elem_f = string2vec(line)
output_f = np.loadtxt(file,skiprows=l+1,usecols=tuple(range(1,len(elem_f)+1)),dtype='f')
break
l +=1
# Combine in dictionary
geometry_fn['geom_x'], geometry_fn['geom_y'], = geom_x, geom_y
elements_fn[p] = elem_f
output_fn[p] = output_f
if delete:
os.remove(file)
return output_fn, geometry_fn, elements_fn
def readOutput(path, exclude=None, delete=False):
# Get list of all files that need to be read
files_all = getFileList(path, ext='.out')[1]
files = list()
if exclude:
for f in files_all:
if not any(e in f for e in exclude):
files.append(f)
else:
files = files_all
if not files:
return [], [], []
else:
# Extract output parameters from list
param = list(Counter([re.sub('[0-9_-]', '', x.split('\\')[-1][:-4]) for x in files]).keys())
# Extract numbers from list
num = list(Counter([re.sub('[a-zA-Z]', '', x[:-4]).split('_')[-1] for x in files]).keys())
do = [[int(x.split('-')[0]) for x in num], [int(x.split('-')[1]) for x in num]]
tuples = list(zip(*do))
# Read files
num_cores = multiprocessing.cpu_count()-1
pool = multiprocessing.Pool(num_cores)
args = [(n, [x for x in files if re.sub('[a-zA-Z]', '', x[:-4]).split('_')[-1] == n], delete) for n in num]
results = pool.map(readData, args)
pool.close()
pool.join()
output = pd.DataFrame([x[0] for x in results], columns=param, index=pd.MultiIndex.from_tuples(tuples))
geometry = pd.DataFrame([x[1] for x in results], columns=param, index= | pd.MultiIndex.from_tuples(tuples) | pandas.MultiIndex.from_tuples |
# author: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-06-12
'''This script read ministries' comments data from interim directory and predicted
labels of question 1 from interim directory, joins both databases, and saves it in specified directory.
There are 2 parameters Input and Output Path where you want to write this data.
Usage: merge_ministry_pred.py --input_dir=<input_dir_path> --output_dir=<destination_dir_path>
Example:
python src/data/merge_ministry_pred.py --input_dir=data/ --output_dir=data/interim/
Options:
--input_dir=<input_dir_path> Location of data Directory
--output_dir=<destination_dir_path> Directory for saving ministries files
'''
import numpy as np
import pandas as pd
import os
from docopt import docopt
opt = docopt(__doc__)
def main(input_dir, output_dir):
assert os.path.exists(input_dir), "The path entered for input_dir does not exist. Make sure to enter correct path \n"
assert os.path.exists(output_dir), "The path entered for output_dir does not exist. Make sure to enter correct path \n"
print("\n--- START: merge_ministry_pred.py ---")
### Question 1 - Predictions on 2015 dataset ###
# Ministries data
print("Loading Q1 ministries' data and predictions into memory.")
# QUAN 2015
ministries_q1 = pd.read_excel(input_dir + "/interim/question1_models/advance/ministries_Q1.xlsx")
ministries_2015 = pd.read_excel(input_dir + "/interim/question1_models/advance/ministries_2015.xlsx")
pred_2015 = np.load(input_dir + "/output/theme_predictions/theme_question1_2015.npy")
assert len(ministries_q1) > 0, 'no records in ministries_q1.xlsx'
assert len(ministries_2015) > 0, 'no records in ministries_2015.xlsx'
columns_basic = ['Telkey', 'Comment', 'Year', 'Ministry', 'Ministry_id']
columns_labels = ['CPD', 'CB', 'EWC', 'Exec', 'FEW', 'SP', 'RE', 'Sup',
'SW', 'TEPE', 'VMG', 'OTH']
columns_order = columns_basic + columns_labels
pred_2015 = pd.DataFrame(pred_2015, columns=columns_labels)
ministries_q1 = ministries_q1[columns_order]
ministries_pred_2015 = | pd.concat([ministries_2015, pred_2015], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv( | StringIO(self.data1) | pandas.compat.StringIO |
import pandas as pd
import re
from collections import OrderedDict
#
# This file includes functions, used in training procedure. The functions are simple and self-explaining.
# Please use README, that describes the sequence of steps.
#
def helper_sentence_to_tokens(snt):
step1 = []
for token in snt.split(' '):
handled = False
if '-' in token:
subkns = token.split('-')
for i in range(0, len(subkns) - 1):
step1.append(subkns[i])
step1.append('-')
step1.append(subkns[len(subkns) - 1])
handled = True
if not handled:
step1.append(token)
step2 = []
for token in step1:
m = re.search("^([0-9:\.,½¼¾⅛⅔⅓$¥€£]+)([/А-яа-яA-Za-z²³2\"\'\.\,]+)$", token)
if m:
num = m.group(1)
suffix = m.group(2)
step2.append(num)
step2.append(suffix)
else:
step2.append(token)
return step2
def sentence_to_words_and_chars(input_file, output_file):
sentences = open(input_file, "r+", encoding='utf-8').readlines()
processed_sentences = []
for snt in sentences:
new_snt = []
for token in helper_sentence_to_tokens(snt):
if not re.match("\<T[0-9]*\>", token) and not re.match("\</T[0-9]*\>", token) and \
re.match("^[A-Za-z0-9+-г\./]*$", token) or re.match(
"^[A-Za-z#0+-9½¼¾⅛⅔⅓_—\-\,\.\$¥€£\:\%\(\)\\\/]*$",
token) or re.match("^[А-Я]*$", token) or \
(re.match("^[А-Яа-я]*$", token) and (sum(1 for c in token if c.isupper()) > 2)) or \
re.match("^[А-Я]\.[А-Я]\.$", token) or re.match("^[А-Я]\.[А-Я]\.[А-Я]\.$", token):
new_snt = new_snt + list(token)
else:
new_snt = new_snt + [token]
processed_sentences.append(" ".join(new_snt))
res_out = open(output_file, "w+", encoding='utf-8')
res_out.writelines(processed_sentences)
res_out.close()
def disjoin_source_target(data_file, src_file, tgt_file):
data = pd.read_csv(data_file, encoding='utf-8')
in_df_l = [] # pd.DataFrame(index=False, columns=["sentence_id","token_id","before"])
out_df_l = [] # pd.DataFrame(index=False, columns=["id","after"])
for (sid, tid, before, after) in data[['sentence_id', 'token_id', 'before', 'after']].values:
in_df_l.append([sid, tid, before])
out_df_l.append(["%s_%s" % (sid, tid), after])
in_df = pd.DataFrame(data=in_df_l, columns=["sentence_id", "token_id", "before"])
out_df = pd.DataFrame(data=out_df_l, columns=["id", "after"])
in_df.to_csv(src_file, encoding='utf-8', index=False)
out_df.to_csv(tgt_file, encoding='utf-8', index=False)
def source_to_sentences(input_file, output_file):
# Processing 'in' file first - writing it into sentences.
source_data = pd.read_csv(input_file, encoding='utf-8')
sentences_dataset = OrderedDict()
for (sid, tid, before) in source_data[['sentence_id', 'token_id', 'before']].values:
if (not isinstance(before, str)):
before = str(before)
if sid not in sentences_dataset:
sentences_dataset[sid] = [];
wrap = True
if wrap: sentences_dataset[sid].append("<T%d>" % tid)
for key in before.split(" "):
sentences_dataset[sid].append(key)
if wrap: sentences_dataset[sid].append("</T%d>" % tid)
out_f = open(output_file, "w+", encoding='utf-8')
for snt in sentences_dataset.values():
out_f.write("%s\n" % " ".join(snt))
out_f.close()
def target_to_sentences(input_file, output_file):
# Processing 'out' file now - writing it into sentences.
target_data = | pd.read_csv(input_file, encoding='utf-8') | pandas.read_csv |
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import io
import os
import math
import copy
import pickle
import zipfile
from textwrap import wrap
from pathlib import Path
from itertools import zip_longest
from collections import defaultdict
from urllib.error import URLError
from urllib.request import urlopen
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
from torch.optim.lr_scheduler import _LRScheduler
plt.style.use('ggplot')
def set_random_seed(state=1):
gens = (np.random.seed, torch.manual_seed, torch.cuda.manual_seed)
for set_state in gens:
set_state(state)
RANDOM_STATE = 1
set_random_seed(RANDOM_STATE)
def try_download(url, download_path):
archive_name = url.split('/')[-1]
folder_name, _ = os.path.splitext(archive_name)
try:
r = urlopen(url)
except URLError as e:
print('Cannot download the data. Error: %s' % url)
return
assert r.status == 200
data = r.read()
with zipfile.ZipFile(io.BytesIO(data)) as arch:
arch.extractall(download_path)
print('The archive is extracted into folder: %s' % download_path)
def read_data(path):
files = {}
for filename in glob.glob(path+'\*'):
bname = os.path.basename(filename)
suffix = os.path.splitext(bname)[-1]
stem = os.path.splitext(bname)[0]
if suffix == '.csv':
files[stem] = | pd.read_csv(filename) | pandas.read_csv |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = | pd.cut(df[0], grps) | pandas.cut |
# -*- coding: utf-8 -*-
""" bokeh_warnings_graphs.py
Usage:
bokeh_warnings_graphs.py <project_code> [options]
Arguments:
project_code unique project code consisting of 'projectnumber_projectModelPart'
like 456_11 , 416_T99 or 123_N
Options:
-h, --help Show this help screen.
--html_path=<html> path to store html bokeh graphs, default in /commands/qc/*.html
"""
from docopt import docopt
import os.path as op
from collections import defaultdict
from bs4 import BeautifulSoup
import re
import pandas as pd
import datetime
import colorful
from bokeh.plotting import figure, output_file, save, ColumnDataSource
from bokeh.layouts import column
from bokeh.models import Legend, Plot, Square, Range1d, Text, HoverTool
from bokeh.palettes import viridis
from bokeh.models import DatetimeTickFormatter
# TODO categorize warnings
# TODO adjust bokeh graph accordingly
def command_get_paths(verbose=False):
path_dict = defaultdict()
current_dir = op.dirname(op.abspath(__file__))
commands_dir = op.dirname(current_dir)
root_dir = op.dirname(commands_dir)
journals_dir = op.join(root_dir, "journals")
logs_dir = op.join(root_dir, "logs")
warnings_dir = op.join(root_dir, "warnings")
path_dict["current_file"] = __file__
path_dict["current_dir"] = current_dir
path_dict["commands_dir"] = commands_dir
path_dict["root_dir"] = root_dir
path_dict["journals_dir"] = journals_dir
path_dict["logs_dir"] = logs_dir
path_dict["warnings_dir"] = warnings_dir
if verbose:
for pathname in path_dict.keys():
print("{} - {}".format(pathname, path_dict[pathname]))
return path_dict
def timestamp_now():
return datetime.datetime.now().strftime("%Y%m%dT%H%M%SZ")
def read_warning_html(html_warnings_file):
warning_ids = defaultdict(list)
warning_count = defaultdict(int)
with open(html_warnings_file, 'r', encoding="utf-16") as html_warnings:
data = html_warnings.read()
soup = BeautifulSoup(data, "html.parser")
h1s = soup.findAll("h1")
re_h1_date = re.compile(r"\((.*)\)")
p_date = re_h1_date.findall(h1s[0].text)
warn_time_stamp = datetime.datetime.strptime(p_date[0], "%d.%m.%Y %H:%M:%S")
iso_time_stamp = warn_time_stamp.strftime("%Y-%m-%d %H:%M:%S")
warning_table = soup.findAll("td")
pattern = re.compile(r"\bid\s*?(\d+)")
id_line = re.compile(r" : ")
last_seen_err_type = ""
for line in warning_table:
err_ids = []
if id_line.findall(line.text):
for elem_id in pattern.findall(line.text):
err_ids.append(elem_id)
warning_ids[last_seen_err_type] = err_ids
else:
err_type = line.text.strip()
last_seen_err_type = err_type
warning_count[err_type] += 1
html_warnings_df = pd.DataFrame({iso_time_stamp: warning_count})
warnings_id_df = | pd.DataFrame({iso_time_stamp: warning_ids}) | pandas.DataFrame |
import pandas as pd
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error
import matplotlib.pyplot as plt
import os
count = 0
reg = SGDRegressor()
predict_for = "NANOUSD.csv"
batch_size = "30T"
stop = pd.to_datetime("2020-08-01", format="%Y-%m-%d")
for pair_csv in os.listdir("../../../bots/bitsurfer/data"):
pair_path = "../../../bots/bitsurfer/data/" + pair_csv
df = pd.read_csv(pair_path, sep=",", names=["ts", "price", "vol"])
df["datetime"] = pd.to_datetime(df["ts"], unit="s")
df = df.set_index("datetime")
df_test = df[df.index >= stop]
df = df[df.index < stop]
if df.empty:
continue
# df = df_test
# train df
ts_df = df[["ts"]].resample(batch_size).mean()
price_df = df[["price"]].resample(batch_size).mean().fillna(0)
vol_df = df[["vol"]].resample(batch_size).sum().fillna(0)
resampled_df = pd.DataFrame(index=ts_df.index)
resampled_df["price"] = price_df["price"].values / max(price_df["price"].values)
resampled_df["vol"] = vol_df["vol"].values / max(vol_df["vol"].values)
resampled_df["price_t-1"] = resampled_df.shift(1)["price"]
resampled_df["price_t-2"] = resampled_df.shift(2)["price"]
resampled_df["vol_t-1"] = resampled_df.shift(1)["vol"]
resampled_df["vol_t-2"] = resampled_df.shift(2)["vol"]
resampled_df["target"] = resampled_df.shift(-1)["price"]
resampled_df = resampled_df.loc[(resampled_df[["price", "vol"]] != 0).any(axis=1)]
resampled_df = resampled_df.loc[
(resampled_df[["price_t-1", "vol_t-1"]] != 0).any(axis=1)
]
resampled_df = resampled_df.loc[
(resampled_df[["price_t-2", "vol_t-2"]] != 0).any(axis=1)
]
resampled_df = resampled_df.loc[(resampled_df[["target"]] != 0).any(axis=1)]
resampled_df = resampled_df.dropna()
# test df
if pair_csv == predict_for:
ts_df = df_test[["ts"]].resample(batch_size).mean()
price_df = df_test[["price"]].resample(batch_size).mean().fillna(0)
vol_df = df_test[["vol"]].resample(batch_size).sum().fillna(0)
resampled_test_df = pd.DataFrame(index=ts_df.index)
resampled_test_df["price"] = price_df["price"].values / max(
price_df["price"].values
)
resampled_test_df["vol"] = vol_df["vol"].values / max(vol_df["vol"].values)
resampled_test_df = resampled_test_df.loc[
(resampled_test_df[["price", "vol"]] != 0).any(axis=1)
]
resampled_test_df["price_t-1"] = resampled_test_df.shift(1)["price"]
resampled_test_df["price_t-2"] = resampled_test_df.shift(2)["price"]
resampled_test_df["vol_t-1"] = resampled_test_df.shift(1)["vol"]
resampled_test_df["vol_t-2"] = resampled_test_df.shift(2)["vol"]
# resampled_test_df["target"] = resampled_test_df.shift(-1)["price"]
actual_df = resampled_test_df[["price"]]
resampled_test_df = resampled_test_df.dropna()
features = resampled_test_df[
["price", "vol", "price_t-1", "price_t-2", "vol_t-1", "vol_t-2"]
]
predict_df = features
# TRAINING
features = resampled_df[
["price", "vol", "price_t-1", "price_t-2", "vol_t-1", "vol_t-2"]
]
target = resampled_df["target"]
reg.partial_fit(X=features, y=target)
print(resampled_df.tail())
results = | pd.DataFrame(index=predict_df.index) | pandas.DataFrame |
import sys
import numpy.random
import pandas as pd
import numpy as np
from numpy.random import normal
from pandarallel import pandarallel
pandarallel.initialize(nb_workers=8, progress_bar=True)
def create_cross_table(pandas_df):
cross_table = | pd.crosstab(pandas_df.iloc[:, 2], pandas_df.iloc[:, 1], margins=True, margins_name='Total_Reports') | pandas.crosstab |
import librosa
import numpy as np
import pandas as pd
from os import listdir
from os.path import isfile, join
from audioread import NoBackendError
def extract_features(path, label, emotionId, startid):
"""
提取path目录下的音频文件的特征,使用librosa库
:param path: 文件路径
:param label: 情绪类型
:param startid: 开始的序列号
:return: 特征矩阵 pandas.DataFrame
"""
id = startid # 序列号
feature_set = pd.DataFrame() # 特征矩阵
# 单独的特征向量
labels = pd.Series()
emotion_vector = pd.Series()
songname_vector = pd.Series()
tempo_vector = pd.Series()
total_beats = pd.Series()
average_beats = pd.Series()
chroma_stft_mean = pd.Series()
# chroma_stft_std = pd.Series()
chroma_stft_var = pd.Series()
# chroma_cq_mean = pd.Series()
# chroma_cq_std = pd.Series()
# chroma_cq_var = pd.Series()
# chroma_cens_mean = pd.Series()
# chroma_cens_std = pd.Series()
# chroma_cens_var = pd.Series()
mel_mean = pd.Series()
# mel_std = pd.Series()
mel_var = pd.Series()
mfcc_mean = pd.Series()
# mfcc_std = pd.Series()
mfcc_var = pd.Series()
mfcc_delta_mean = pd.Series()
# mfcc_delta_std = pd.Series()
mfcc_delta_var = pd.Series()
rmse_mean = pd.Series()
# rmse_std = pd.Series()
rmse_var = pd.Series()
cent_mean = pd.Series()
# cent_std = pd.Series()
cent_var = pd.Series()
spec_bw_mean = pd.Series()
# spec_bw_std = pd.Series()
spec_bw_var = pd.Series()
contrast_mean = pd.Series()
# contrast_std = pd.Series()
contrast_var = pd.Series()
rolloff_mean = pd.Series()
# rolloff_std = pd.Series()
rolloff_var = pd.Series()
poly_mean = pd.Series()
# poly_std = pd.Series()
poly_var = pd.Series()
tonnetz_mean = pd.Series()
# tonnetz_std = pd.Series()
tonnetz_var = pd.Series()
zcr_mean = pd.Series()
# zcr_std = pd.Series()
zcr_var = pd.Series()
harm_mean = pd.Series()
# harm_std = pd.Series()
harm_var = pd.Series()
perc_mean = pd.Series()
# perc_std = pd.Series()
perc_var = pd.Series()
frame_mean = pd.Series()
# frame_std = pd.Series()
frame_var = | pd.Series() | pandas.Series |
import pandas as pd
import sys
# To edit for dev
if sys.platform == 'linux':
path_data = "/n/groups/patel/uk_biobank/project_52887_41230/ukb41230.csv"
path_dictionary = "/n/groups/patel/samuel/HMS-Aging/Data_Dictionary_Showcase.csv"
path_features = "/n/groups/patel/samuel/data_final/page3_featureImp/FeatureImp/"
#path_features = "/n/groups/patel/samuel/feature_importances_final/"
path_predictions = "/n/groups/patel/samuel/predictions_final_2/"
path_inputs = "/n/groups/patel/samuel/final_inputs/"
path_input = "/n/groups/patel/samuel/Biomarkers_raw.csv"
path_HC_features="/n/groups/patel/samuel/HC_features/"
path_clusters = "/n/groups/patel/samuel/AutomaticClusters/"
path_output_linear_study = "/n/groups/patel/samuel/LinearOutput/"
path_data2 = "/n/groups/patel/uk_biobank/project_52887_42640/ukb42640.csv"
def read_ethnicity_data(**kwargs):
dict_ethnicity_codes = {'1': 'White', '1001': 'British', '1002': 'Irish',
'1003': 'White_Other',
'2': 'Mixed', '2001': 'White_and_Black_Caribbean', '2002': 'White_and_Black_African',
'2003': 'White_and_Asian', '2004': 'Mixed_Other',
'3': 'Asian', '3001': 'Indian', '3002': 'Pakistani', '3003': 'Bangladeshi',
'3004': 'Asian_Other',
'4': 'Black', '4001': 'Caribbean', '4002': 'African', '4003': 'Black_Other',
'5': 'Chinese',
'6': 'Other_ethnicity',
'-1': 'Do_not_know',
'-3': 'Prefer_not_to_answer',
'-5': 'NA'}
df = pd.read_csv(path_data, usecols = ['21000-0.0', '21000-1.0', '21000-2.0', 'eid'], **kwargs).set_index('eid')
df.columns = ['Ethnicity', 'Ethnicity_1', 'Ethnicity_2']
eids_missing_ethnicity = df.index[df['Ethnicity'].isna()]
#print(eids_missing_ethnicity)
for eid in eids_missing_ethnicity:
sample = df.loc[eid, :]
if not | pd.isna(sample['Ethnicity_1']) | pandas.isna |
'''
Created on Feb. 9, 2021
@author: cefect
'''
#===============================================================================
# imports
#===============================================================================
import os, datetime
start = datetime.datetime.now()
import pandas as pd
import numpy as np
from pandas import IndexSlice as idx
from hlpr.basic import view, force_open_dir
from vfunc_conv.vcoms import VfConv
#from model.modcom import DFunc
mod_name = 'misc.jrc_global'
today_str = datetime.datetime.today().strftime('%Y%m%d')
class JRConv(VfConv):
def __init__(self,
libName = 'Huzinga_2017',
prec=5, #precision
**kwargs):
self.libName = libName
super().__init__(
prec=prec,
**kwargs) #initilzie teh baseclass
def load(self,
fp = r'C:\LS\02_WORK\IBI\202011_CanFlood\04_CALC\vfunc\lib\Huzinga_2017\copy_of_global_flood_depth-damage_functions__30102017.xlsx',
):
#===============================================================================
# inputs
#===============================================================================
dx_raw = pd.read_excel(fp, sheet_name = 'Damage functions', header=[1,2], index_col=[0,1])
#clean it
df = dx_raw.drop('Standard deviation', axis=1, level=0)
dxind = df.droplevel(level=0, axis=1)
dxind.index = dxind.index.set_names(['cat', 'depth_m'])
#get our series
boolcol = dxind.columns.str.contains('North AMERICA')
"""
no Transport or Infrastructure curves for North America
"""
dxind = dxind.loc[:, boolcol]
#handle nulls
dxind = dxind.replace({'-':np.nan}).dropna(axis=0, how='any')
self.dxind = dxind
return self.dxind
def convert(self,
dxind=None,
metac_d = {
'desc':'JRC Global curves',
'location':'USA',
'date':'2010',
'source':'(Huizinga et al. 2017)',
'impact_var':'loss',
'impact_units':'pct',
'exposure_var':'flood depth',
'exposure_units':'m',
'scale_var':'maximum damage (national average)',
'scale_units':'pct',
},
):
#=======================================================================
# defaults
#=======================================================================
if dxind is None: dxind=self.dxind
#=======================================================================
# setup meta
#=======================================================================
crve_d = self.crve_d.copy() #start with a copy
crve_d['file_conversion']='CanFlood.%s_%s'%(mod_name, today_str)
#check keys
miss_l = set(metac_d.keys()).difference(crve_d.keys())
assert len(miss_l)==0, 'requesting new keys: %s'%miss_l
#crve_d = {**metac_d, **crve_d}
crve_d.update(metac_d) #preserves order
"""
crve_d.keys()
"""
#check it
assert list(crve_d.keys())[-1]=='exposure', 'need last entry to be eexposure'
#=======================================================================
# curve loop
#=======================================================================
cLib_d = dict()
#loop and collect
for cval, cdf_raw in dxind.groupby('cat', axis=0, level=0):
#===================================================================
# get the tag
#===================================================================
tag = cval.strip().replace(' ','')
for k,v in self.tag_cln_d.items():
tag = tag.replace(k, v).strip()
#===================================================================
# depth-damage
#===================================================================
ddf = cdf_raw.droplevel(level=0, axis=0).astype(np.float).round(self.prec)
dd_d = ddf.iloc[:,0].to_dict()
#===================================================================
# meta
#===================================================================
dcurve_d = crve_d.copy()
dcurve_d['tag'] = tag
#assemble
dcurve_d = {**dcurve_d, **dd_d}
self.check_crvd(dcurve_d)
cLib_d[tag] = dcurve_d
#=======================================================================
# convert and summarize
#=======================================================================
rd = dict()
for k, sd in cLib_d.items():
"""need this to ensure index is formatted for plotters"""
df = | pd.Series(sd) | pandas.Series |
from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
node1 = | pd.read_csv("../Data/Node1.csv", index_col="AbsT") | pandas.read_csv |
# Copyright 2020 The GenoML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Import the necessary packages
import subprocess
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from pandas_plink import read_plink1_bin
from scipy import stats
# Define the munging class
import genoml.dependencies
class Munging(object):
def __init__(
self,
pheno_path: str,
run_prefix="GenoML_data",
impute_type="median",
skip_prune="no",
p_gwas: float = 0.001,
addit_path: Optional[str] = None,
gwas_path: Optional[str] = None,
geno_path: Optional[str] = None,
refColsHarmonize=None,
r2_cutoff="0.5",
):
self.pheno_path = pheno_path
self.run_prefix = run_prefix
if impute_type not in ["mean", "median"]:
# Currently only supports mean and median
raise ValueError(
"The 2 types of imputation currently supported are 'mean' and 'median'"
)
self.impute_type = impute_type
self.p_gwas = p_gwas
self.r2 = r2_cutoff
if skip_prune == "no":
self.skip_prune = False
elif skip_prune == "yes":
self.skip_prune = True
else:
raise ValueError(
f'`skip_prune` should be one of "yes" or "no", not {skip_prune}'
)
self.refColsHarmonize = refColsHarmonize
# Reading in the phenotype file
self.pheno_df = pd.read_csv(pheno_path, engine="c")
# Raise an error and exit if the phenotype file is not properly formatted
if not {"ID", "PHENO"}.issubset(self.pheno_df.columns):
raise ValueError(
"Error: It doesn't look as though your phenotype file is properly "
"formatted.\n"
"Did you check that the columns are 'ID' and 'PHENO' and that "
"controls=0 and cases=1?"
)
# # Typecase to read in the ID column as a string and the PHENO as an integer
self.pheno_df["ID"] = self.pheno_df["ID"].astype(str)
self.pheno_df["PHENO"] = self.pheno_df["PHENO"].astype(int)
self.addit_path = addit_path
self.gwas_path = gwas_path
self.geno_path = geno_path
if not addit_path:
print("No additional features as predictors? No problem, we'll stick to genotypes.")
else:
self.addit_df = pd.read_csv(addit_path, engine="c")
if not gwas_path:
print("So you don't want to filter on P values from external GWAS? No worries, we don't usually either (if the dataset is large enough).")
else:
self.gwas_df = pd.read_csv(gwas_path, engine="c")
if not geno_path:
print(
"So no genotypes? Okay, we'll just use additional features provided for the predictions."
)
else:
print("Exporting genotype data")
self.output_datafile = self.run_prefix + ".dataForML.h5"
self.merged: Optional[pd.DataFrame] = None
def plink_inputs(self):
# Initializing some variables
self.pheno_df.to_hdf(self.output_datafile, key="pheno", mode="w")
addit_df = None
genotype_df = self.load_genotypes()
# Checking the imputation of non-genotype features
if self.addit_path:
addit_df = self.munge_additional_features()
merged = _merge_dfs([self.pheno_df, genotype_df, addit_df], col_id="ID")
del addit_df, genotype_df # We dont need these copies anymore
merged = self.harmonize_refs(merged)
merged.to_hdf(self.output_datafile, key="dataForML")
features_list = merged.columns.tolist()
features_listpath = f"{self.run_prefix}.list_features.txt"
with open(features_listpath, "w") as f:
f.write("\n".join([str(feature) for feature in features_list]))
print(
f"An updated list of {len(features_list)} features, including ID and PHENO,"
f" that is in your munged dataForML.h5 file can be found here "
f"{features_listpath}"
"\n"
f"Your .dataForML file that has been fully munged can be found here: "
f"{self.output_datafile}"
)
self.merged = merged
return self.merged
def load_genotypes(self) -> Optional[pd.DataFrame]:
if not self.geno_path:
return None
self._run_external_plink_commands()
# read_plink1_bin reads in non-reference alleles. When `ref="a1"` (default),
# it counts "a0" alleles. We want to read homozygous minor allele as 2 and
# homozygous for major allele as 0. ref="a0" ensures we are counting the
# homozygous minor alleles. This matches the .raw labels.
g = read_plink1_bin("temp_genos.bed", ref="a0")
g = g.drop(
["fid", "father", "mother", "gender", "trait", "chrom", "cm", "pos","a1"]
)
g = g.set_index({"sample": "iid", "variant": "snp"})
genotype_df = g.to_pandas()
del g # We dont need this copy anymore
genotype_df.reset_index(inplace=True)
genotype_df.rename(columns={"sample": "ID"}, inplace=True)
# now, remove temp_genos
bash_rm_temp = "rm temp_genos.*"
print(bash_rm_temp)
subprocess.run(bash_rm_temp, shell=True)
# Checking the impute flag and execute
# Currently only supports mean and median
merged = _fill_impute_na(self.impute_type, genotype_df)
return merged
def _run_external_plink_commands(self) -> None:
"""Runs the external plink commands from the command line."""
if not self.geno_path:
return
cmds_a, cmds_b = get_plink_bash_scripts_options(
self.skip_prune, self.geno_path, self.run_prefix, self.r2
)
if self.gwas_path:
p_thresh = self.p_gwas
gwas_df_reduced = self.gwas_df[["SNP", "p"]]
snps_to_keep = gwas_df_reduced.loc[(gwas_df_reduced["p"] <= p_thresh)]
outfile = self.run_prefix + ".p_threshold_variants.tab"
snps_to_keep.to_csv(outfile, index=False, sep="\t")
prune_str = "" if self.skip_prune else "prior to pruning "
print(f"Your candidate variant list {prune_str}is right here: {outfile}.")
cmds = cmds_b
else:
cmds = cmds_a
pruned = "" if self.skip_prune else "pruned "
print(
f"A list of {pruned}variants and the allele being counted in the dosages "
"(usually the minor allele) can be found here: "
f"{self.run_prefix}.variants_and_alleles.tab"
)
for cmd in cmds:
subprocess.run(cmd, shell=True)
def munge_additional_features(self) -> Optional[pd.DataFrame]:
"""Munges additional features and cleans up statistically insignificant data.
* Z-Scales the features.
* Remove any columns with a standard deviation of zero
"""
if not self.addit_path:
return None
addit_df = _fill_impute_na(self.impute_type, self.addit_df)
# Remove the ID column
cols = [col for col in addit_df.columns if not col == "ID"]
addit_df.drop(labels="ID", axis=0, inplace=True)
# Remove any columns with a standard deviation of zero
print(
"Removing any columns that have a standard deviation of 0 prior to "
"Z-scaling..."
)
std = addit_df.std()
if any(std == 0.0):
print(
"\n"
"Looks like there's at least one column with a standard deviation "
"of 0. Let's remove that for you..."
"\n"
)
addit_keep = addit_df.drop(std[std == 0.0].index.values, axis=1)
addit_keep_list = list(addit_keep.columns.values)
addit_df = addit_df[addit_keep_list]
addit_keep_list.remove("ID")
removed_list = np.setdiff1d(cols, addit_keep_list)
for removed_column in range(len(removed_list)):
print(f"The column {removed_list[removed_column]} was removed")
cols = addit_keep_list
# Z-scale the features
print("Now Z-scaling your non-genotype features...\n")
for col in cols:
if (addit_df[col].min() == 0.0) and (addit_df[col].max() == 1.0):
print(
f"{col} is likely a binary indicator or a proportion and will not "
"be scaled, just + 1 all the values of this variable and rerun to "
"flag this column to be scaled.",
)
else:
addit_df[col] = stats.zscore(addit_df[col], ddof=0)
print(
"\n"
"You have just Z-scaled your non-genotype features, putting everything on "
"a numeric scale similar to genotypes.\n"
"Now your non-genotype features might look a little closer to zero "
"(showing the first few lines of the left-most and right-most columns)..."
)
print("#" * 70)
print(addit_df.describe())
print("#" * 70)
return addit_df
def harmonize_refs(self, merged_df: pd.DataFrame) -> pd.DataFrame:
"""Harmonizes data columns with an external reference file.
> Checking the reference column names flag
> If this is a step that comes after harmonize, then a .txt file with columns
> to keep should have been produced. This is a list of column names from the
> reference dataset that the test dataset was harmonized against. We want to
> compare apples to apples, so we will only keep the column names that match.
"""
if not self.refColsHarmonize:
return merged_df
print(
"\n"
f"Looks like you are munging after the harmonization step. Great! We will "
f"keep the columns generated from your reference dataset from that "
f"harmonize step that was exported to this file: {self.refColsHarmonize}\n"
)
with open(self.refColsHarmonize, "r") as refCols_file:
ref_column_names_list = refCols_file.read().splitlines()
# Keep the reference columns from the test dataset if found in test data
matching_cols = merged_df[
np.intersect1d(merged_df.columns, ref_column_names_list)
]
# Make a list of final features that will be included in the model
# This will be used again when remunging the reference dataset
matching_cols_list = matching_cols.columns.values.tolist()
# Save out the final list
intersecting_cols_outfile = f"{self.run_prefix}.finalHarmonizedCols_toKeep.txt"
with open(intersecting_cols_outfile, "w") as f:
for col in matching_cols_list:
f.write(f"{col}\n")
print(
"A final list of harmonized columns between your reference and test "
f"dataset has been generated here: {intersecting_cols_outfile}\n"
"Use this to re-train your reference dataset in order to move on to "
"testing."
)
return matching_cols
def get_plink_bash_scripts_options(
skip_prune: bool, geno_path: str, run_prefix: str, r2: Optional[str] = None
) -> Tuple[List, List]:
"""Gets the PLINK bash scripts to be run from CLI."""
plink_exec = genoml.dependencies.check_plink()
if not skip_prune:
# Set the bashes
bash1a = f"{plink_exec} --bfile {geno_path} --indep-pairwise 1000 50 {r2}"
bash1b = (
f"{plink_exec} --bfile {geno_path} --extract "
f"{run_prefix}.p_threshold_variants.tab --indep-pairwise 1000 50 {r2}"
)
# may want to consider outputting temp_genos to dir in run_prefix
bash2 = (
f"{plink_exec} --bfile {geno_path} --extract plink.prune.in --make-bed"
f" --out temp_genos"
)
bash3 = f"cut -f 2,5 temp_genos.bim > {run_prefix}.variants_and_alleles.tab"
bash4 = "rm plink.log"
bash5 = "rm plink.prune.*"
# bash6 = "rm " + self.run_prefix + ".log"
# Set the bash command groups
cmds_a = [bash1a, bash2, bash3, bash4, bash5]
cmds_b = [bash1b, bash2, bash3, bash4, bash5]
else:
bash1a = f"{plink_exec} --bfile {geno_path}"
bash1b = (
f"{plink_exec} --bfile {geno_path} --extract "
f"{run_prefix}.p_threshold_variants.tab --make-bed --out temp_genos"
)
# may want to consider outputting temp_genos to dir in run_prefix
bash2 = f"{plink_exec} --bfile {geno_path} --make-bed --out temp_genos"
bash3 = f"cut -f 2,5 temp_genos.bim > {run_prefix}.variants_and_alleles.tab"
bash4 = "rm plink.log"
# Set the bash command groups
cmds_a = [bash1a, bash2, bash3, bash4]
cmds_b = [bash1b, bash3, bash4]
return cmds_a, cmds_b
def _fill_impute_na(impute_type: str, df: pd.DataFrame) -> pd.DataFrame:
"""Imputes the NA fields for a dataframe."""
if impute_type.lower() == "mean":
df = df.fillna(df.mean())
elif impute_type.lower() == "median":
df = df.fillna(df.median())
print(
"\n"
"You have just imputed your genotype features, covering up NAs with "
f"the column {impute_type} so that analyses don't crash due to "
"missing data.\n"
"Now your genotype features might look a little better (showing the "
"first few lines of the left-most and right-most columns)..."
)
print("#" * 70)
print(df.describe())
print("#" * 70)
print("")
return df
def _merge_dfs(dfs: List[pd.DataFrame], col_id: str) -> pd.DataFrame:
merged = None
for df in dfs:
if df is None:
continue
if merged is None:
merged = df
else:
merged = | pd.merge(merged, df, on=col_id, how="inner") | pandas.merge |
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Generate a plot for global atmospheric SF6 concentration from NOAA GML data
data = | pd.read_csv('../processed/monthly_global_sf6_data_processed.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""Supports OMNI Combined, Definitive, IMF and Plasma Data, and Energetic
Proton Fluxes, Time-Shifted to the Nose of the Earth's Bow Shock, plus Solar
and Magnetic Indices. Downloads data from the NASA Coordinated Data Analysis
Web (CDAWeb). Supports both 5 and 1 minute files.
Properties
----------
platform
'omni'
name
'hro'
tag
Select time between samples, one of {'1min', '5min'}
inst_id
None supported
Note
----
Files are stored by the first day of each month. When downloading use
omni.download(start, stop, freq='MS') to only download days that could possibly
have data. 'MS' gives a monthly start frequency.
This material is based upon work supported by the
National Science Foundation under Grant Number 1259508.
Any opinions, findings, and conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views
of the National Science Foundation.
Warnings
--------
- Currently no cleaning routine. Though the CDAWEB description indicates that
these level-2 products are expected to be ok.
- Module not written by OMNI team.
Custom Functions
----------------
time_shift_to_magnetic_poles
Shift time from bowshock to intersection with one of the magnetic poles
calculate_clock_angle
Calculate the clock angle and IMF mag in the YZ plane
calculate_imf_steadiness
Calculate the IMF steadiness using clock angle and magnitude in the YZ plane
calculate_dayside_reconnection
Calculate the dayside reconnection rate
"""
import datetime as dt
import functools
import numpy as np
import pandas as pds
import scipy.stats as stats
import warnings
from pysat import logger
from pysat.instruments.methods import general as mm_gen
from pysatNASA.instruments.methods import cdaweb as cdw
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'omni'
name = 'hro'
tags = {'1min': '1-minute time averaged data',
'5min': '5-minute time averaged data'}
inst_ids = {'': [tag for tag in tags.keys()]}
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'1min': dt.datetime(2009, 1, 1),
'5min': dt.datetime(2009, 1, 1)}}
# ----------------------------------------------------------------------------
# Instrument methods
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
ackn_str = ''.join(('For full acknowledgement info, please see: ',
'https://omniweb.gsfc.nasa.gov/html/citing.html'))
self.acknowledgements = ackn_str
self.references = ' '.join(('<NAME> and <NAME>, Solar',
'wind spatial scales in and comparisons',
'of hourly Wind and ACE plasma and',
'magnetic field data, J. Geophys. Res.,',
'Vol. 110, No. A2, A02209,',
'10.1029/2004JA010649.'))
logger.info(ackn_str)
return
def clean(self):
""" Cleaning function for OMNI data
Note
----
'clean' - Replace default fill values with NaN
'dusty' - Same as clean
'dirty' - Same as clean
'none' - Preserve original fill values
"""
for key in self.data.columns:
if key != 'Epoch':
fill = self.meta[key, self.meta.labels.fill_val][0]
idx, = np.where(self[key] == fill)
# Set the fill values to NaN
self[idx, key] = np.nan
# Replace the old fill value with NaN and add this to the notes
fill_notes = "".join(["Replaced standard fill value with NaN. ",
"Standard value was: {:}".format(
self.meta[key,
self.meta.labels.fill_val])])
notes = '\n'.join([str(self.meta[key, self.meta.labels.notes]),
fill_notes])
self.meta[key, self.meta.labels.notes] = notes
self.meta[key, self.meta.labels.fill_val] = np.nan
return
# ----------------------------------------------------------------------------
# Instrument functions
#
# Use the default CDAWeb and pysat methods
# Set the list_files routine
fname = ''.join(['omni_hro_{tag:s}_{{year:4d}}{{month:02d}}{{day:02d}}_',
'v{{version:02d}}.cdf'])
supported_tags = {inst_id: {tag: fname.format(tag=tag) for tag in tags.keys()}
for inst_id in inst_ids.keys()}
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags,
file_cadence=pds.DateOffset(months=1))
# Set the load routine
load = functools.partial(cdw.load, file_cadence= | pds.DateOffset(months=1) | pandas.DateOffset |
from pandas_datareader import data as pdr
import time
import yfinance as yf
import json
import sys
import pandas as pd
import numpy as np
yf.pdr_override() # <== that's all it takes :-)
# download dataframe
# data = pdr.get_data_yahoo("2317.TW", start="2019-01-01", end="2020-03-18")
data = pdr.get_data_yahoo((sys.argv[1]), start=sys.argv[2], end=sys.argv[3])
def upbreak(tsLine, tsRefLine):
# tsLine=data.Close[data.Close.index[20:][0]:]#tsLine=Close[boundDC.index[0]:]
#tsRefLine=boundDC.upboundDC#
n = min(len(tsLine), len(tsRefLine))
tsLine = tsLine[-n:]
tsRefLine = tsRefLine[-n:] # 倒倏第N個
signal = pd.Series(0, index=tsLine.index)
for i in range(1, len(tsLine)):
if all([tsLine[i] > tsRefLine[i], tsLine[i-1] < tsRefLine[i-1]]):
signal[i] = 1
return(signal)
# 向下突破函數
def downbreak(tsLine, tsRefLine):
n = min(len(tsLine), len(tsRefLine))
tsLine = tsLine[-n:]
tsRefLine = tsRefLine[-n:]
signal = pd.Series(0, index=tsLine.index)
for i in range(1, len(tsLine)):
if all([tsLine[i] < tsRefLine[i], tsLine[i-1] > tsRefLine[i-1]]):
signal[i] = 1
return(signal)
# 布林通道
def bbands(tsPrice, period=20, times=2):
upBBand = pd.Series(0.0, index=tsPrice.index)
midBBand = pd.Series(0.0, index=tsPrice.index)
downBBand = pd.Series(0.0, index=tsPrice.index)
sigma = pd.Series(0.0, index=tsPrice.index)
for i in range(period-1, len(tsPrice)):
midBBand[i] = np.nanmean(tsPrice[i-(period-1):(i+1)])
sigma[i] = np.nanstd(tsPrice[i-(period-1):(i+1)])
upBBand[i] = midBBand[i]+times*sigma[i]
downBBand[i] = midBBand[i]-times*sigma[i]
BBands = pd.DataFrame({'upBBand': upBBand[(period-1):],
'midBBand': midBBand[(period-1):],
'downBBand': downBBand[(period-1):],
'sigma': sigma[(period-1):]})
return(BBands)
# -----------------------------------------------------交易變數
# 交易次數
timesOfTradeArr = [] # 交易次數
principalArr = [] # 每筆交易次數本金
#optimalprofit = []
# 變數設定(更改這裡的變數)
# mean = 20
# std = 1.2
# principal = 1000000 # 本金
mean = int(sys.argv[4])
std = float(sys.argv[5])
principal = int(sys.argv[6]) # 本金
# data = pdr.get_data_yahoo(sys.argv[1]+".TW", start=sys.argv[2], end=sys.argv[3])
# 交易策略變數設定
tsmcBBands = bbands(data.Close, mean, std) # 交易策略
buy = 0
sell = 0
cost = 0 # 目前花的
count = 0 # 算贏的次數
profit = [0] # 獲利
date = [] # 獲利的日期
# 2019-5-4 updated 對應數值
buyArr = [] # 存取每次buy
sellArr = [] # 存取每次sell
for i in range((len(tsmcBBands)-1)): # (len(tsmcBBands)-1)
# if(data.Close[i+mean] < data.Close[i+mean-1] and data.Close[i+mean] > tsmcBBands.upBBand[i]):
# 高於2倍標準差下賣出
if(data.Close[i+mean] > tsmcBBands.upBBand[i]):
if(i == len(tsmcBBands)-2): # 若到期時還賣出訊號就不要
continue
# 賣出條件(當股價跌到downband底下,看到賣出訊號隔天開盤價賣出)
if(buy != 0):
profit.append((data.Open[i+mean+1] * buy - cost)) # 隔天開盤價賣出
principal = principal + profit[-1] * 1000 # profit[-1]為最新的獲利
#print("append 到profit:", (data.Open[i+mean+1] * buy - cost ), "目前 cost: ",cost)
date.append(data.index[i+mean+1]) # 賣出的天數也存起來
for x in range(0, buy):
sellArr.append(data.Open[i+mean+1])
buy = 0
cost = 0 # 2019-5-07 v1 初始化
#print("賣出全部","buy", buy, "個","價格", data.Open[i+mean+1],"加進 sellArr")
#print("目前sellArr:", sellArr)
#print("目前buyArr: ", buyArr)
if(i == len(tsmcBBands)-2): # 到期時(最後一天)
if(buy != 0): # 若買進不等於0,則當天平倉
sell = buy
profit.append((data.Close[i+mean] * buy - cost))
principal = principal + profit[-1] * 1000
#print("append 到profit:", (data.Close[i+mean] * buy - cost ))
date.append(data.index[i+mean])
for x in range(0, sell):
sellArr.append(data.Close[i+mean])
#print("[最後]全部賣出","buy",buy,"賣出價格 ",data.Close[i+mean], "放進 sellArr ")
# 平倉完後要初始化
cost = 0 # 2019-5-07 v1 初始化
buy = 0
sell = 0
#print("[最後]目前sellArr:", sellArr)
#print("[最後]目前buyArr: ", buyArr)
# ------------------------買進狀況
# 連漲兩天 且 低於一倍標準差下買進
# 低於2倍標準差下買進
if(data.Close[i+mean] < tsmcBBands.downBBand[i]):
# 若到期時還有買進訊號就不要買
if(i == len(tsmcBBands)-2):
continue
cost = cost + data.Open[i+mean+1]
# 本金小於花費就不能買
if (principal < cost * 1000):
cost = cost - data.Open[i+mean+1]
continue
else:
buy = buy + 1
buyArr.append(data.Open[i+mean+1])
#print("買進","buy", buy, "加進 buyArr", data.Open[i+mean+1])
# --------------------------------績效-------------------------------------------
profit = profit[1:] # 去掉第一個0
# 每筆報酬收入
rate = []
for k in range(0, len(sellArr)):
# rate.append((sellArr[i]-buyArr[i])/buyArr[i])
rate.append((sellArr[k]-buyArr[k])*0.994)
# 每筆報酬率
profit2 = pd.Series(rate)
# 平均每筆賺多少
if(len(profit2[profit2 > 0]) == 0):
meanWin = 0
else:
# sum(profit2[profit2>0])/len(profit2[profit2>0])
meanWin = sum(profit2[profit2 > 0])/len(profit2[profit2 > 0])
# 平均每筆賠多少
if(len(profit2[profit2 < 0]) == 0):
meanLoss = 0
else:
meanLoss = sum(profit2[profit2 < 0])/len(profit2[profit2 < 0])
# 勝率
winRate = len(profit2[profit2 > 0])/len(profit2[profit2 != 0])*100
# 績效表現
perform = {'winRate': winRate, 'meanWin': meanWin, 'meanLoss': meanLoss}
# 累積報酬
title = ['profit']
df = | pd.DataFrame(0, index=data.index, columns=title) | pandas.DataFrame |
# standard libraries
import os
# third-party libraries
import pandas as pd
# local imports
from .. import count_data
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestCsvToDf:
"""
Tests converting a csv with various headers into a processible DataFrame
"""
def test_timestamp(self):
"""
Check if a csv w/ a timestamp is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp.csv')
element_id = 'tagID'
timestamp = 'timestamp'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == -6761865716520410554
def test_timestamp_ba(self):
"""
Check if a csv w/ a timestamp and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp_ba.csv')
element_id = 'tagID'
timestamp = 'timestamp'
boardings = 'boardings'
alightings = 'alightings'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7008548250528393651
def test_session(self):
"""
Check if a csv w/ session times is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7098407329788286247
def test_session_ba(self):
"""
Check if a csv w/ session times and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session_ba.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
boardings = 'boardings'
alightings = 'alightings'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 2589903708124850504
class TestStandardizeDatetime:
"""
Tests ensuring all times are datetime format
"""
def test_no_change_needed(self):
"""
Tests if all timestamps are already datetime and no change is needed
"""
test_times = ['2018-02-22 20:08:00', '2018-02-09 18:05:00', '2018-02-09 18:26:00']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
test_df['timestamp'] = pd.to_datetime(test_df['timestamp'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['timestamp'].dtype == 'datetime64[ns]'
def test_timestamp_epoch(self):
"""
Tests if timestamp is an epoch time
"""
test_times = ['1519330080', '1518199500', '1518200760']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['timestamp'].dtype == 'datetime64[ns]'
def test_session_epoch(self):
"""
Tests if session times are epoch times
"""
test_times = [['1519330080', '1518199500'], ['1518200760', '1519330080'], ['1518199500', '1518200760']]
test_df = pd.DataFrame(test_times, columns=['session_start', 'session_end'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['session_start'].dtype == 'datetime64[ns]'
assert processed_df['session_end'].dtype == 'datetime64[ns]'
class TestStandardizeEpoch:
"""
Tests ensuring all times are unix epoch
"""
def test_no_change_needed(self):
"""
Tests if all timestamps are already epochs and no change is needed
"""
test_times = [1519330080, 1518199500, 1518200760]
test_df = pd.DataFrame(test_times, columns=['timestamp'])
processed_df = count_data.standardize_epoch(test_df)
assert processed_df['timestamp'].dtype == 'int64'
def test_timestamp_datetime(self):
"""
Tests if timestamp is a datetime
"""
test_times = ['2018-02-22 20:08:00', '2018-02-09 18:05:00', '2018-02-09 18:26:00']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
test_df['timestamp'] = pd.to_datetime(test_df['timestamp'])
processed_df = count_data.standardize_epoch(test_df)
assert processed_df['timestamp'].dtype == 'int64'
def test_session_datetime(self):
"""
Tests if session times are datetimes
"""
test_times = [['2018-02-22 20:08:00', '2018-02-09 18:05:00'], ['2018-02-09 18:26:00', '2018-02-22 20:08:00'],
['2018-02-09 18:05:00', '2018-02-09 18:26:00']]
test_df = pd.DataFrame(test_times, columns=['session_start', 'session_end'])
test_df['session_start'] = pd.to_datetime(test_df['session_start'])
test_df['session_end'] = | pd.to_datetime(test_df['session_end']) | pandas.to_datetime |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = Series([False, True, False, True])
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
# check s is not changed
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00"),
"AAA",
Timestamp("2011-01-03 10:00"),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{1: Timestamp("2011-01-02 10:00"), 3: Timestamp("2011-01-04 10:00")}
)
expected = Series(
[
Timestamp("2011-01-01 10:00"),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00"),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# DatetimeTZBlock
idx = DatetimeIndex(["2011-01-01 10:00", NaT, "2011-01-03 10:00", NaT], tz=tz)
ser = Series(idx)
assert ser.dtype == f"datetime64[ns, {tz}]"
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-02 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz))
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(Timestamp("2011-01-02 10:00", tz=tz).to_pydatetime())
idx = DatetimeIndex(
[
"2011-01-01 10:00",
"2011-01-02 10:00",
"2011-01-03 10:00",
"2011-01-02 10:00",
],
tz=tz,
)
expected = Series(idx)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna("AAA")
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
"AAA",
Timestamp("2011-01-03 10:00", tz=tz),
"AAA",
],
dtype=object,
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00"),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
result = ser.fillna(
{
1: Timestamp("2011-01-02 10:00", tz=tz),
3: Timestamp("2011-01-04 10:00", tz=tz),
}
)
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2011-01-02 10:00", tz=tz),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2011-01-04 10:00", tz=tz),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
# filling with a naive/other zone, coerce to object
result = ser.fillna(Timestamp("20130101"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
with tm.assert_produces_warning(FutureWarning, match="mismatched timezone"):
result = ser.fillna(Timestamp("20130101", tz="US/Pacific"))
expected = Series(
[
Timestamp("2011-01-01 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
Timestamp("2011-01-03 10:00", tz=tz),
Timestamp("2013-01-01", tz="US/Pacific"),
]
)
tm.assert_series_equal(expected, result)
tm.assert_series_equal(isna(ser), null_loc)
def test_fillna_dt64tz_with_method(self):
# with timezone
# GH#15855
ser = Series([Timestamp("2012-11-11 00:00:00+01:00"), NaT])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="pad"), exp)
ser = Series([NaT, Timestamp("2012-11-11 00:00:00+01:00")])
exp = Series(
[
Timestamp("2012-11-11 00:00:00+01:00"),
Timestamp("2012-11-11 00:00:00+01:00"),
]
)
tm.assert_series_equal(ser.fillna(method="bfill"), exp)
def test_fillna_pytimedelta(self):
# GH#8209
ser = Series([np.nan, Timedelta("1 days")], index=["A", "B"])
result = ser.fillna(timedelta(1))
expected = Series(Timedelta("1 days"), index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_fillna_period(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
res = ser.fillna(Period("2012-01", freq="M"))
exp = Series([Period("2011-01", freq="M"), Period("2012-01", freq="M")])
tm.assert_series_equal(res, exp)
assert res.dtype == "Period[M]"
def test_fillna_dt64_timestamp(self, frame_or_series):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
obj = frame_or_series(ser)
# reg fillna
result = obj.fillna(Timestamp("20130104"))
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130104"),
Timestamp("20130103 9:01:01"),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = obj
tm.assert_equal(result, expected)
def test_fillna_dt64_non_nao(self):
# GH#27419
ser = Series([ | Timestamp("2010-01-01") | pandas.Timestamp |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.linear_model import LinearRegression
import pandas as pd
import rolldecayestimators.filters
import rolldecayestimators.measure as measure
from sklearn.metrics import r2_score
class CutTransformer(BaseEstimator, TransformerMixin):
""" Rolldecay transformer that cut time series from roll decay test for estimator.
Parameters
----------
phi_max : float, default=np.deg2rad(90)
Start cutting value is below this value [rad]
phi_min : float, default=0
Stop cutting value is when below this value [rad]
Attributes
----------
n_features_ : int
The number of features of the data passed to :meth:`fit`.
"""
def __init__(self, phi_max=np.deg2rad(90), phi_min=0, phi1d_start_tolerance=0.005):
self.phi_max = phi_max # Maximum Roll angle [rad]
self.phi_min = phi_min # Minimum Roll angle [rad]
self.phi_key = 'phi' # Roll angle [rad]
self.remove_end_samples = 200 # Remove this many samples from end (funky stuff may happen during end of tests)
self.phi1d_start_tolerance = phi1d_start_tolerance
def fit(self, X, y=None):
"""Do the cut
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
#X = check_array(X, accept_sparse=True)
self.n_features_ = X.shape[1]
phi = X[self.phi_key]
if (self.phi_max < phi.abs().min()):
raise ValueError('"phi_max" is too small')
if (self.phi_min > phi.abs().max()):
raise ValueError('"phi_min" is too large')
if not isinstance(self.remove_end_samples,int):
raise ValueError('"remove_end_samples" should be integer')
if self.remove_end_samples<1:
raise ValueError('"remove_end_samples" > 1')
# Return the transformer
return self
def transform(self, X):
""" A reference implementation of a transform function.
Parameters
----------
X : {array-like, sparse-matrix}, shape (n_samples, n_features)
The input samples.
Returns
-------
X_transformed : array, shape (n_samples, n_features)
The array containing the element-wise square roots of the values
in ``X``.
"""
# Check is fit had been called
check_is_fitted(self, 'n_features_')
# Input validation
#X = check_array(X, accept_sparse=True)
# Check that the input is of the same shape as the one passed
# during fit.
#if X.shape[1] != self.n_features_:
# raise ValueError('Shape of input is different from what was seen'
# 'in `fit`')
#Remove initial part (by removing first to maximums):
phi = X[self.phi_key]
index = phi.abs().idxmax()
X_cut = X.loc[index:].copy()
if (len(X_cut) > 10*self.remove_end_samples):
X_cut = X_cut.iloc[0:-self.remove_end_samples]
phi = X_cut[self.phi_key]
phi_max_sign = np.sign(phi.loc[index])
if phi_max_sign == 1:
index2 = phi.idxmin()
else:
index2 = phi.idxmax()
X_cut = X_cut.loc[index2:].copy()
X_interpolated = measure.sample_increase(X=X_cut, increase=5)
X_zerocrossings = measure.get_peaks(X=X_interpolated)
mask = X_interpolated.index >= X_zerocrossings.index[0]
X_interpolated = X_interpolated.loc[mask]
# Remove some large angles at start
mask = X_zerocrossings['phi'].abs() < self.phi_max
X_zerocrossings2 = X_zerocrossings.loc[mask].copy()
if len(X_zerocrossings2) > 0:
mask2 = X_interpolated.index > X_zerocrossings2.index[0]
X_interpolated = X_interpolated.loc[mask2]
# Remove some small angles at end
mask = X_zerocrossings2['phi'].abs() < self.phi_min
X_zerocrossings3 = X_zerocrossings2.loc[mask].copy()
if len(X_zerocrossings3) > 0:
mask3 = X_interpolated.index < X_zerocrossings3.index[0]
X_interpolated = X_interpolated.loc[mask3]
if 'phi1d' in X_cut:
phi1d_start = np.abs(X_interpolated.iloc[0]['phi1d'])
if phi1d_start > self.phi1d_start_tolerance:
raise ValueError('Start phi1d exceeds phi1d_start_tolerance (%f > %f)' % (phi1d_start, self.phi1d_start_tolerance) )
mask = ((X_cut.index >= X_interpolated.index[0]) & (X_cut.index <= X_interpolated.index[-1]))
X_cut=X_cut.loc[mask].copy()
return X_cut
class LowpassFilterDerivatorTransformer(BaseEstimator, TransformerMixin):
""" Rolldecay transformer that lowpass filters the roll signal for estimator.
Parameters
----------
phi_max : float, default=np.deg2rad(90)
Start cutting value is below this value [rad]
phi_min : float, default=0
Stop cutting value is when below this value [rad]
Attributes
----------
n_features_ : int
The number of features of the data passed to :meth:`fit`.
"""
def __init__(self, cutoff=0.5, order=5, minimum_score=0.999):
self.cutoff = cutoff
self.order = order
self.phi_key = 'phi' # Roll angle [rad]
self.phi_filtered_key = 'phi_filtered' # Filtered roll angle [rad]
self.phi1d_key = 'phi1d' # Roll velocity [rad/s]
self.phi2d_key = 'phi2d' # Roll acceleration [rad/s2]
self.minimum_score = minimum_score
def fit(self, X, y=None):
"""Do the cut
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
#X = check_array(X, accept_sparse=True)
self.n_features_ = X.shape[1]
assert self.score(X=X) > self.minimum_score
# Return the transformer
return self
def transform(self, X):
""" A reference implementation of a transform function.
Parameters
----------
X : {array-like, sparse-matrix}, shape (n_samples, n_features)
The input samples.
Returns
-------
X_transformed : array, shape (n_samples, n_features)
The array containing the element-wise square roots of the values
in ``X``.
"""
# Check is fit had been called
check_is_fitted(self, 'n_features_')
# Input validation
#X = check_array(X, accept_sparse=True)
# Check that the input is of the same shape as the one passed
# during fit.
#if X.shape[1] != self.n_features_:
# raise ValueError('Shape of input is different from what was seen'
# 'in `fit`')
# Lowpass filter the signal:
self.X = X.copy()
self.X_filter = X.copy()
ts = np.mean(np.diff(self.X_filter.index))
fs = 1 / ts
self.X_filter[self.phi_filtered_key] = rolldecayestimators.filters.lowpass_filter(data=self.X_filter['phi'],
cutoff=self.cutoff, fs=fs,
order=self.order)
self.X_filter = self.add_derivatives(X=self.X_filter)
return self.X_filter
def plot_filtering(self):
fig, axes = plt.subplots(nrows=3)
ax = axes[0]
self.X.plot(y='phi', ax=ax)
self.X_filter.plot(y='phi_filtered', ax=ax, style='--')
ax.legend();
ax = axes[1]
self.X_filter.plot(y='phi1d', ax=ax, style='--')
ax.legend();
ax = axes[2]
self.X_filter.plot(y='phi2d', ax=ax, style='--')
ax.legend();
def add_derivatives(self, X):
# Add accelerations:
assert self.phi_key in X
X = X.copy()
X[self.phi1d_key] = np.gradient(X[self.phi_filtered_key].values, X.index.values)
X[self.phi2d_key] = np.gradient(X[self.phi1d_key].values, X.index.values)
return X
def score(self, X, y=None, sample_weight=None):
"""
Return the coefficient of determination R_b^2 of the prediction.
The coefficient R_b^2 is defined as (1 - u/v), where u is the residual sum of squares
((y_true - y_pred) ** 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum().
The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse).
A constant model that always predicts the expected value of y, disregarding the input features,
would get a R_b^2 score of 0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed kernel matrix or a list of generic
objects instead, shape = (n_samples, n_samples_fitted), where n_samples_fitted is the number of samples
used in the fitting for the estimator.
y : Dummy not used
sample_weight : Dummy
Returns
-------
score : float
R_b^2 of self.predict(X) wrt. y.
"""
X_filter = self.transform(X)
y_true = X[self.phi_key]
y_pred = X_filter[self.phi_filtered_key]
return r2_score(y_true=y_true, y_pred=y_pred)
class ScaleFactorTransformer(BaseEstimator, TransformerMixin):
""" Rolldecay to full scale using scale factor
Parameters
----------
phi_max : float, default=np.deg2rad(90)
Start cutting value is below this value [rad]
phi_min : float, default=0
Stop cutting value is when below this value [rad]
Attributes
----------
n_features_ : int
The number of features of the data passed to :meth:`fit`.
"""
def __init__(self, scale_factor):
self.scale_factor = scale_factor
self.phi1d_key = 'phi1d' # Roll velocity [rad/s]
self.phi2d_key = 'phi2d' # Roll acceleration [rad/s2]
def fit(self, X, y=None):
"""Do the cut
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
#X = check_array(X, accept_sparse=True)
self.n_features_ = X.shape[1]
if | pd.isnull(self.scale_factor) | pandas.isnull |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys, pickle, os
import pymc3 as pm
import nipymc
from nipymc import *
import pandas as pd
from theano import shared
# 1st argument = which region to analyze
region = str(sys.argv[1])
# global variables...
SAMPLES = 3000
BURN = 1000
# get data
activation = pd.read_csv('../data/extra/IAPS_activation.csv', index_col=0)
ratings = pd.read_csv('../data/extra/IAPS_ratings.csv')
data = ratings.merge(activation, left_on='Files', how='inner', right_index=True)
X = data.copy()
recode_vars = ['Valence', 'Subject', 'Picture', 'SEX', 'RACE']
for v in recode_vars:
print("Processing %s... (%d values)" % (v, len(X[v].unique())))
vals = X[v].unique()
repl_dict = dict(zip(vals, list(range(len(vals)))))
col = [repl_dict[k] for k in list(X[v].values)]
X[v] = col
# Standardize ratings within-subject
X['rating'] = X.groupby('Subject')['Rating'].apply(lambda x: (x - x.mean())/x.std()).values
# random stimulus model
model = pm.Model()
with model:
# Intercept
mu = pm.Normal('intercept', 0, 10)
# mu = 0
# Categorical fixed effects
betas = {}
# cat_fe = ['Valence', 'RACE']
cat_fe = ['Valence']
for cfe in cat_fe:
dummies = | pd.get_dummies(X[cfe], drop_first=True) | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = | pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner') | pandas.concat |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in JavaScript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from io import StringIO
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
import pandas as pd
from superset.common.chart_data import ChartDataResultFormat
from superset.utils.core import (
DTTM_ALIAS,
extract_dataframe_dtypes,
get_column_names,
get_metric_names,
)
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
def get_column_key(label: Tuple[str, ...], metrics: List[str]) -> Tuple[Any, ...]:
"""
Sort columns when combining metrics.
MultiIndex labels have the metric name as the last element in the
tuple. We want to sort these according to the list of passed metrics.
"""
parts: List[Any] = list(label)
metric = parts[-1]
parts[-1] = metrics.index(metric)
return tuple(parts)
def pivot_df( # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches
df: pd.DataFrame,
rows: List[str],
columns: List[str],
metrics: List[str],
aggfunc: str = "Sum",
transpose_pivot: bool = False,
combine_metrics: bool = False,
show_rows_total: bool = False,
show_columns_total: bool = False,
apply_metrics_on_rows: bool = False,
) -> pd.DataFrame:
metric_name = f"Total ({aggfunc})"
if transpose_pivot:
rows, columns = columns, rows
# to apply the metrics on the rows we pivot the dataframe, apply the
# metrics to the columns, and pivot the dataframe back before
# returning it
if apply_metrics_on_rows:
rows, columns = columns, rows
axis = {"columns": 0, "rows": 1}
else:
axis = {"columns": 1, "rows": 0}
# pivot data; we'll compute totals and subtotals later
if rows or columns:
# pivoting with null values will create an empty df
df = df.fillna("NULL")
df = df.pivot_table(
index=rows,
columns=columns,
values=metrics,
aggfunc=pivot_v2_aggfunc_map[aggfunc],
margins=False,
)
else:
# if there's no rows nor columns we have a single value; update
# the index with the metric name so it shows up in the table
df.index = pd.Index([*df.index[:-1], metric_name], name="metric")
# if no rows were passed the metrics will be in the rows, so we
# need to move them back to columns
if columns and not rows:
df = df.stack()
if not isinstance(df, pd.DataFrame):
df = df.to_frame()
df = df.T
df = df[metrics]
df.index = pd.Index([*df.index[:-1], metric_name], name="metric")
# combining metrics changes the column hierarchy, moving the metric
# from the top to the bottom, eg:
#
# ('SUM(col)', 'age', 'name') => ('age', 'name', 'SUM(col)')
if combine_metrics and isinstance(df.columns, pd.MultiIndex):
# move metrics to the lowest level
new_order = [*range(1, df.columns.nlevels), 0]
df = df.reorder_levels(new_order, axis=1)
# sort columns, combining metrics for each group
decorated_columns = [(col, i) for i, col in enumerate(df.columns)]
grouped_columns = sorted(
decorated_columns, key=lambda t: get_column_key(t[0], metrics)
)
indexes = [i for col, i in grouped_columns]
df = df[df.columns[indexes]]
elif rows:
# if metrics were not combined we sort the dataframe by the list
# of metrics defined by the user
df = df[metrics]
# compute fractions, if needed
if aggfunc.endswith(" as Fraction of Total"):
total = df.sum().sum()
df = df.astype(total.dtypes) / total
elif aggfunc.endswith(" as Fraction of Columns"):
total = df.sum(axis=axis["rows"])
df = df.astype(total.dtypes).div(total, axis=axis["columns"])
elif aggfunc.endswith(" as Fraction of Rows"):
total = df.sum(axis=axis["columns"])
df = df.astype(total.dtypes).div(total, axis=axis["rows"])
# convert to a MultiIndex to simplify logic
if not isinstance(df.index, pd.MultiIndex):
df.index = pd.MultiIndex.from_tuples([(str(i),) for i in df.index])
if not isinstance(df.columns, pd.MultiIndex):
df.columns = pd.MultiIndex.from_tuples([(str(i),) for i in df.columns])
if show_rows_total:
# add subtotal for each group and overall total; we start from the
# overall group, and iterate deeper into subgroups
groups = df.columns
for level in range(df.columns.nlevels):
subgroups = {group[:level] for group in groups}
for subgroup in subgroups:
slice_ = df.columns.get_loc(subgroup)
subtotal = pivot_v2_aggfunc_map[aggfunc](df.iloc[:, slice_], axis=1)
depth = df.columns.nlevels - len(subgroup) - 1
total = metric_name if level == 0 else "Subtotal"
subtotal_name = tuple([*subgroup, total, *([""] * depth)])
# insert column after subgroup
df.insert(int(slice_.stop), subtotal_name, subtotal)
if rows and show_columns_total:
# add subtotal for each group and overall total; we start from the
# overall group, and iterate deeper into subgroups
groups = df.index
for level in range(df.index.nlevels):
subgroups = {group[:level] for group in groups}
for subgroup in subgroups:
slice_ = df.index.get_loc(subgroup)
subtotal = pivot_v2_aggfunc_map[aggfunc](
df.iloc[slice_, :].apply(pd.to_numeric), axis=0
)
depth = df.index.nlevels - len(subgroup) - 1
total = metric_name if level == 0 else "Subtotal"
subtotal.name = tuple([*subgroup, total, *([""] * depth)])
# insert row after subgroup
df = pd.concat(
[df[: slice_.stop], subtotal.to_frame().T, df[slice_.stop :]]
)
# if we want to apply the metrics on the rows we need to pivot the
# dataframe back
if apply_metrics_on_rows:
df = df.T
return df
def list_unique_values(series: pd.Series) -> str:
"""
List unique values in a series.
"""
return ", ".join(set(str(v) for v in pd.Series.unique(series)))
pivot_v2_aggfunc_map = {
"Count": pd.Series.count,
"Count Unique Values": pd.Series.nunique,
"List Unique Values": list_unique_values,
"Sum": pd.Series.sum,
"Average": pd.Series.mean,
"Median": pd.Series.median,
"Sample Variance": lambda series: | pd.series.var(series) | pandas.series.var |
import timeit
import tensorflow as tf
import pandas as pd
from tqdm import tqdm
class DataProcessing():
def __init__(self, in_path, out_path):
if in_path == False:
self.out_path = out_path
elif out_path == False:
self.in_path = in_path
elif in_path == False and out_path == False:
pass
else:
self.in_path = in_path
self.out_path = out_path
def train_txt2tfrecords(self):
"""
将train转化为tfrecords格式的文件
:param in_path:
:param out_path:
:return:
"""
print("\nStart to convert {} to {}\n".format(self.in_path,self.out_path))
start_time = timeit.default_timer()
writer = tf.python_io.TFRecordWriter(self.out_path)
num = 0
with open(self.in_path,mode="r",encoding="utf-8") as rf:
lines = rf.readlines()
for line in tqdm(lines):
num += 1
data = line.split("\t")
try:
txt_id = [bytes(data[0],"utf-8")]
txt_title = [bytes(data[1],"utf-8")]
txt_content = [bytes(data[2],"utf-8")]
txt_label = [bytes(data[3][0:-1],"utf-8")]
except:
txt_id = [bytes(str(data[0])),"utf-8"]
txt_title = [bytes(str(" ").strip()),"utf-8"]
txt_content = [bytes(str(" ").strip(),"utf-8")]
txt_label = [bytes(str(data[3][0:-1]),"utf-8")]
example = tf.train.Example(features=tf.train.Features(feature={
"txt_id":
tf.train.Feature(bytes_list=tf.train.BytesList(value=txt_id)),
"txt_title":
tf.train.Feature(bytes_list=tf.train.BytesList(value=txt_title)),
"txt_content":
tf.train.Feature(bytes_list=tf.train.BytesList(value=txt_content)),
"txt_label":
tf.train.Feature(bytes_list=tf.train.BytesList(value=txt_label))
}))
writer.write(example.SerializeToString()) # 序列化为字符串
writer.close()
print("Successfully convert {} to {}".format(self.in_path,self.out_path))
end_time = timeit.default_timer()
print("\nThe pretraining process ran for {0} minutes\n".format((end_time - start_time) / 60))
print("the count is: ",num)
return num, self.out_path
def test_txt2tfrecords(self):
"""
将测试集转化为tfrecords文件格式
:param in_path:
:param out_path:
:return:
"""
print("\nStart to convert {} to {}\n".format(self.in_path,self.out_path))
start_time = timeit.default_timer()
writer = tf.python_io.TFRecordWriter(self.out_path)
num = 0
with open(self.in_path,mode="r",encoding="gbk") as rf:
lines = rf.readlines()
for line in tqdm(lines):
num += 1
data = line.strip().split("\t")
try:
txt_id = [bytes(data[0],"utf-8")]
txt_title = [bytes(data[1],"utf-8")]
txt_content = [bytes(data[2],"utf-8")]
except:
txt_id = [bytes(str(data[0]),"utf-8")]
txt_title = [bytes(str(" "),"utf-8")]
txt_content = [bytes(str(" "),"utf-8")]
# 将数据转化为原生 bytes
example = tf.train.Example(features=tf.train.Features(feature={
"txt_id":
tf.train.Feature(bytes_list=tf.train.BytesList(value=txt_id)),
"txt_title":
tf.train.Feature(bytes_list=tf.train.BytesList(value=txt_title)),
"txt_content":
tf.train.Feature(bytes_list=tf.train.BytesList(value=txt_content))
}))
writer.write(example.SerializeToString()) # 序列化为字符串
writer.close()
print("Successfully convert {} to {}".format(self.in_path,self.out_path))
end_time = timeit.default_timer()
print("\nThe pretraining process ran for {0} minutes\n".format((end_time - start_time) / 60))
print("the count is: ",num)
return num,self.out_path
def parase_tfrecords_to_dataFrame(self, data_shape):
"""
解析预测完的tfrecords,并且生成需要提交的文件
:param filename:
:param data_shape:
:return:
"""
data_list = []
with tf.Session() as sess:
filename_queue = tf.train.string_input_producer([self.in_path],shuffle=False)
read = tf.TFRecordReader()
_,serialized_example = read.read(filename_queue)
features = tf.parse_single_example(serialized_example,
features={
"txt_id": tf.FixedLenFeature([],tf.string),
"label": tf.FixedLenFeature([],tf.float32),
})
txt_id = features['txt_id']
label = features["label"]
init_op = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in tqdm(range(data_shape)):
content_list = sess.run([txt_id,label])
c_l = []
c_l.append(str(content_list[0],"utf-8"))
c_l.append(content_list[1])
data_list.append(c_l)
coord.request_stop()
coord.join(threads)
sess.close()
data_pd = | pd.DataFrame(data_list,columns=["txt_id","label"]) | pandas.DataFrame |
from math import floor, ceil
import numpy as np
import matplotlib.pyplot as plt
import datetime
import folium
import random
import seaborn as sns
import pandas as pd
import plotly.express as px
import geopandas as gpd
# import movingpandas as mpd
# from statistics import mean
from shapely.geometry import Polygon, MultiPoint
import json
from branca.colormap import linear
# from copy import copy
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
class Visualiser():
def __init__(self):
print("Initializing visualisation class") # do we need anything?
def st_cube_simple(self, points):
""" To plot a space-time cube of one trajectory. Checks for the start time
and calculates seconds passed from it for every next point
Keyword Arguments:
points {dataframe} -- A Pandas dataframe of a trajectory
Returns:
No Return
"""
def seconds_from_start(x, start):
date_time_obj = datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S')
seconds = (date_time_obj-start).total_seconds()
return int(seconds)
points['lat'] = points['geometry'].apply(lambda coord: coord.y)
points['lng'] = points['geometry'].apply(lambda coord: coord.x)
start_time = datetime.datetime.strptime(
points.time.iloc[0], '%Y-%m-%dT%H:%M:%S')
points['time_seconds'] = np.vectorize(seconds_from_start)(
np.array(points.time.values.tolist()), start_time)
# plot the space-time cube
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(points['lng'], points['lat'], points['time_seconds'])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.set_zlabel('Seconds since start')
fig.canvas.set_window_title('Space-Time Cube')
plt.show()
def plot_full_correlation(self, points_df):
""" To plot a correlation matrix for all columns that contain word
'.value' in their name
Keyword Arguments:
points_df {dataframe} -- A Pandas dataframe of a trajectory
Returns:
No Return
"""
value_names = [s for s in points_df.columns if
'.value' in s]
value_columns = [np.array(
points_df[column].values.tolist()) for column
in value_names]
values_transposed = np.transpose(value_columns)
values_df = pd.DataFrame(values_transposed)
values_df.columns = value_names
f, ax = plt.subplots(figsize=(10, 8))
corr = values_df.corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
def plot_pair_correlation(self, points_df, column_1, column_2,
sort_by='id', regression=False):
""" To plot a pairwise relationship in a dataset.
Special case for the Acceleration values to see difference
(if any) between accelerating and braking.
Keyword Arguments:
points_df {dataframe} -- A Pandas dataframe of a trajectory
column_1, column_2 {string} -- names of 2 columns to analyse
sort_by {string} -- 'id' or 'temperature'
regression {boolean} -- defines which kind of plot to plot
Returns:
No Return
"""
if (sort_by == 'temperature'):
bins = [-10, 0, 5, 10, 20, 30, 40]
copied = points_df.copy()
copied['Intake Temperature.value'] = \
copied['Intake Temperature.value'].astype(int)
copied['binned_temp'] = pd.cut(copied['Intake Temperature.value'],
bins)
if (column_2 == "Acceleration.value" or
column_1 == "Acceleration.value"):
df1 = copied[copied["Acceleration.value"] > 0]
df2 = copied[copied["Acceleration.value"] < 0]
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=df1, palette="viridis")
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=df2, palette="viridis")
else:
sns.pairplot(df1, vars=[column_1, column_2],
hue="binned_temp")
sns.pairplot(df2, vars=[column_1, column_2],
hue="binned_temp")
else:
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=copied)
else:
sns.pairplot(copied, vars=[column_1, column_2],
hue="binned_temp")
else:
if (column_2 == "Acceleration.value" or
column_1 == "Acceleration.value"):
df1 = points_df[points_df["Acceleration.value"] > 0]
df2 = points_df[points_df["Acceleration.value"] < 0]
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=df1, palette="viridis")
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=df2, palette="viridis")
else:
sns.pairplot(df1, vars=[column_1, column_2],
hue="track.id")
sns.pairplot(df2, vars=[column_1, column_2],
hue="track.id")
else:
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=points_df, palette="viridis")
else:
sns.pairplot(points_df, vars=[column_1, column_2],
hue="track.id")
def plot_distribution(self, points, column):
fig, (ax1, ax2, ax3) = plt.subplots(
1, 3, figsize=(15, 5), gridspec_kw={'width_ratios': [5, 5, 5]})
sns.boxplot(x=points[column], ax=ax1)
ax1.set_title('Boxplot')
sns.kdeplot(points[column], shade=True, color="r", ax=ax2)
ax2.set_title('Gaussian kernel density estimate')
sns.distplot(points[column], kde=False, ax=ax3)
ax3.set_title('Histogram')
fig.tight_layout()
plt.show()
def create_map(self, trajectories):
""" To create a Folium Map object (in case its not already available)
Keyword Arguments:
trajectories {mpd trajectory collection} -- A Moving Pandas
Trajectory Collection
Returns:
map {folium map} -- Newly created map object
"""
map_zoom_point = []
map_zoom_point.append(trajectories[0].df['geometry'][0].y)
map_zoom_point.append(trajectories[0].df['geometry'][0].x)
map = folium.Map(location=[map_zoom_point[0], map_zoom_point[1]],
zoom_start=12, tiles='cartodbpositron')
return map
def plot_flows(self, flows, flow_map):
""" To plot provided aggregated flows over the provided map
Keyword Arguments:
flows {mpd aggregated flows} -- A Moving Pandas Aggreagtion
function output
flow_map {folium map} -- Map over which trajectories are to be
plotted
Returns:
No Return
"""
index = 0
# to extract coordiantes from "FLOWS"
for row in range(0, len(flows)):
my_poylyline = []
mylng = flows.loc[index, 'geometry'].coords[0][0]
mylat = flows.loc[index, 'geometry'].coords[0][1]
my_poylyline.append([mylat, mylng])
mylng = flows.loc[index, 'geometry'].coords[1][0]
mylat = flows.loc[index, 'geometry'].coords[1][1]
my_poylyline.append([mylat, mylng])
# to plot point's coordinates over the map as polyline based on
# weight
myweight = int(flows.loc[index, 'weight'])
my_line = folium.PolyLine(locations=my_poylyline,
weight=round((myweight/2)))
# as minimize very big weight number
flow_map.add_child(my_line)
index += 1
def plot_point_values(self, points, value):
""" To show points on a map
Keyword Arguments:
points {GeoDataFrame} -- points input
value {string} -- column value to use for colouriing
Returns:
No Return
"""
points['lat'] = points['geometry'].apply(lambda coord: coord.y)
points['lng'] = points['geometry'].apply(lambda coord: coord.x)
# Visualizing points by the desired value
fig = px.scatter_mapbox(points, lat="lat", lon="lng", color=value,
title=value + " visualisation", zoom=8)
fig.update_layout(mapbox_style="open-street-map",
margin={"r": 5, "t": 50, "l": 10, "b": 5})
fig.show()
def plot_region(self, region, region_map, region_color, label):
""" To plot provided regions over the provided map
Keyword Arguments:
region {shapely Polygon} -- A shapely based Polygon
region_map {folium map} -- Map over which trajectories are to be
plotted
region_color {string} -- Name of the Color in String
label {String} -- Label for popup
Returns:
No Return
"""
region_coords = []
# to extract coordiantes from provided region
index = 0
for value in range(0, len(region.exterior.coords)):
temp = []
temp.append(region.exterior.coords[index][1])
temp.append(region.exterior.coords[index][0])
region_coords.append(temp)
index += 1
# to plot point's coordinates over the map as polygon
region_plot = folium.Polygon(locations=region_coords,
color=region_color, popup=label)
region_map.add_child(region_plot)
def plot_weeks_trajectory(self, weekwise_trajectory_collection,
trajectory_map, marker_radius):
""" To iterate over list with weekwise trajectory collection and plot
each over provided folium map object
Keyword Arguments:
weekwise_trajectory_collection {list of mpd trajectory collection}
-- 7 indices respective of each day of the week
trajectory_map {folium map} -- Map over which trajectories are to
be plotted
marker_radius {integer} -- Radius of each point marker (circle)
Returns:
No Return
"""
# Dictionary to assign color based on a week day
colors = {0: "crimson", 1: "blue", 2: "purple", 3: "yellow",
4: "orange", 5: "black", 6: "green"}
day = 0
for traj_day in weekwise_trajectory_collection:
track_id = -1 # to store track id of each track for Pop Up
trajectory_points = [] # to store coordiante points for each track
traj_row = 0
# if trajectory collection has atleast a single trajectory
if(len(traj_day.trajectories) > 0):
for traj in traj_day.trajectories:
point_row = 0
track_id = traj.df['track.id'][0]
for point in range(len(traj_day.trajectories[
traj_row].df)):
temp = []
temp.append(traj.df['geometry'][point_row].y)
temp.append(traj.df['geometry'][point_row].x)
trajectory_points.append(temp)
point_row += 1
traj_row += 1
# Plotting day wise point's coordinate plot with a single
# color and track id as popup
for row in trajectory_points:
folium.Circle(radius=marker_radius, location=row,
color=colors[day], popup=track_id).add_to(
trajectory_map)
day += 1
def get_trajectories_coords(self, trajectories):
""" To iterate over trajectory collection and return individual track points
Keyword Arguments:
trajectories {mpd trajectory collection} -- A Moving Pandas
Trajectory Collection
Returns:
trajectory_list -- A list of two elements at each index,
track_id & array of associated point's coordinates
"""
trajectory_list = []
for traj in trajectories:
track_points = []
# Extracting Point's coordinate for each trajectory
for i in range(len(traj.df)):
temp = []
temp.append(traj.df['geometry'][i].y)
temp.append(traj.df['geometry'][i].x)
track_points.append(temp)
# Extracting Track_Id for each trajectory
track_id = []
track_id.append(traj.df['track.id'][0])
# Creating a list with [id,coordinates] for each individual
# trajectory
traj_temp = [track_id, track_points]
trajectory_list.append(traj_temp)
return trajectory_list
def plot_trajectories(self, trajectory_collection, trajectory_map,
marker_radius):
""" To iterate over trajectory collection and plot each over
provided folium map object
Keyword Arguments:
trajectory_collection {mpd trajectory collection}
-- A Moving Pandas Trajectory Collection
trajectory_map {folium map} -- Map over which trajectories are
to be plotted
marker_radius {integer} -- Radius of each point marker (circle)
Returns:
No Return
"""
# Function to get random hexcode to assign unique color to each
# trajectory
def get_hexcode_color():
random_number = random.randint(0, 16777215)
hex_number = str(hex(random_number))
hex_number = '#' + hex_number[2:]
return hex_number
# Call to function to iterate over trajectory collection
# and return individual track points
traj_list = self.get_trajectories_coords(trajectory_collection)
traj_index = 0
for traj in traj_list:
# Extracting Track_Id and Point's coordinate for each trajectory
track_id = traj[0]
track_points = traj[1]
# Call to function to random color for this trajectory
track_color = get_hexcode_color()
# Plotting points of each trajectory with a single color
point_index = 0
for row in track_points:
# Pop-Up will contain Track Id
folium.Circle(radius=marker_radius, location=row,
color=track_color, popup=track_id).add_to(
trajectory_map)
point_index += 1
traj_index += 1
##################################
# RELATED TO WEEK WISE BAR GRAPH #
def extract_daywise_lengths(self, weekly_trajectories):
""" To iterate over list with weekwise trajectory collection and
extract point's coordinates for day wise trajectories
Keyword Arguments:
weekly_trajectories {list of mpd trajectory collection}
-- 7 indices respective of each day of the week
Returns:
day_length {list} -- list with total length for each day
"""
days = {0: "Monday", 1: "Tuesday", 2: "Wednesday", 3: "Thursday",
4: "Friday", 5: "Saturday", 6: "Sunday"}
day_length = [] # to store total length for each day at each index
day = 0
for traj_day in range(len(weekly_trajectories)):
temp = []
# if trajectory collection has atleast a single trajectory
if(len(weekly_trajectories[day].trajectories) > 0):
traj_row = 0
length_sum = 0 # to store total sum of track length for each
# day's collection
for traj in range(len(weekly_trajectories[day].trajectories)):
length_sum += round(weekly_trajectories[day].trajectories[
traj_row].df['track.length'][0], 2)
traj_row += 1
temp.append(days[day]) # storing weekday name like Monday,
# Tuesday etc at first index of list
temp.append(length_sum) # storing assocaited total length
# at second index of list
day_length.append(temp)
else:
temp.append(days[day])
temp.append(0)
day_length.append(temp)
day += 1
return day_length
def extract_barplot_info(self, day_length):
""" To extract information for matplotlib plot
Keyword Arguments:
day_length {list} -- list with total length for each day
Returns:
day, height, highest, highest_index, average {strings/integers}
-- attributes required for plots
"""
day = []
height = []
highest = 0
highest_index = -1
total = 0
index = 0
for row in day_length:
day.append(row[0][:3]) # extracting name of day of the week
# in form of Mon, Tue etc.
track_length = round(row[1], 2) # extracting total length
# associated with each day rounded to 2 decimals
height.append(track_length)
# extracting the highest value out of 'total lengths' from all
# weekdays
if(track_length > highest):
highest = track_length
highest_index = index
total += track_length
index += 1
average_value = total/7 # extracting average value out of
# 'total lengths' from all weekdays
average = []
for row in day:
average.append(average_value) # a list of same value at each
# index, just to plot a horizontal line in plot
return day, height, highest, highest_index, average
def plot_daywise_track(self, week_trajectories):
""" To plot bar graphy of week day vs total length of that day
(all tracks combined)
Keyword Arguments:
weekly_trajectories {list of mpd trajectory collection}
-- 7 indices respective of each day of the week
Returns:
No Return
"""
# Call to function to extract daywise lengths
daywise_length = self.extract_daywise_lengths(week_trajectories)
# Call to function to extract attributes for plot
day, height, highest, highest_index, average = \
self.extract_barplot_info(daywise_length)
bar_plot = plt.bar(day, height, color=(0.1, 0.1, 0.1, 0.1),
edgecolor='blue')
bar_plot[highest_index].set_edgecolor('r')
plt.ylabel('Total Distance Travelled (Km)')
axes2 = plt.twinx()
axes2.set_ylim(0, highest+1)
axes2.plot(day, average, color='b', label='Average Distance')
plt.suptitle('Which day has a different movement pattern than others?')
plt.legend()
plt.show()
def aggregateByGrid(df, field, summary, gridSize):
"""
Aggregates the specified field with chosen summary type and user
defined grid size. returns aggregated grids with summary
Parameters
----------
df : geopandas dataframe
field : string
field to be summarized.
summary : string
type of summary to be sumarized. eg. min, max,sum, median
gridSize : float
the size of grid on same unit as geodataframe coordinates.
Returns
-------
geodataframe
Aggregated grids with summary on it
"""
def round_down(num, divisor):
return floor(num / divisor) * divisor
def round_up(num, divisor):
return ceil(num / divisor) * divisor
# Get crs from data
sourceCRS = df.crs
targetCRS = {"init": "EPSG:3857"}
# Reproject to Mercator\
df = df.to_crs(targetCRS)
# Get bounds
xmin, ymin, xmax, ymax = df.total_bounds
print(xmin, ymin, xmax, ymax)
height, width = gridSize, gridSize
top, left = round_up(ymax, height), round_down(xmin, width)
bottom, right = round_down(ymin, height), round_up(xmax, width)
rows = int((top - bottom) / height)+1
cols = int((right - left) / width)+1
XleftOrigin = left
XrightOrigin = left + width
YtopOrigin = top
YbottomOrigin = top - height
polygons = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom = YbottomOrigin
for j in range(rows):
polygons.append(Polygon([(XleftOrigin, Ytop),
(XrightOrigin, Ytop),
(XrightOrigin, Ybottom),
(XleftOrigin, Ybottom)]))
Ytop = Ytop - height
Ybottom = Ybottom - height
XleftOrigin = XleftOrigin + width
XrightOrigin = XrightOrigin + width
grid = gpd.GeoDataFrame({'geometry': polygons})
grid.crs = df.crs
# Assign gridid
numGrid = len(grid)
grid['gridId'] = list(range(numGrid))
# Identify gridId for each point
points_identified = gpd.sjoin(df, grid, op='within')
# group points by gridid and calculate mean Easting,
# store it as dataframe
# delete if field already exists
if field in grid.columns:
del grid[field]
grouped = points_identified.groupby('gridId')[field].agg(summary)
grouped_df = pd.DataFrame(grouped)
new_grid = grid.join(grouped_df, on='gridId').fillna(0)
grid = new_grid.to_crs(sourceCRS)
summarized_field = summary+"_"+field
final_grid = grid.rename(columns={field: summarized_field})
final_grid = final_grid[final_grid[summarized_field] > 0].sort_values(
by=summarized_field, ascending=False)
final_grid[summarized_field] = round(final_grid[summarized_field], 1)
final_grid['x_centroid'], final_grid['y_centroid'] = \
final_grid.geometry.centroid.x, final_grid.geometry.centroid.y
return final_grid
def plotAggregate(grid, field):
"""
Plots the aggregated data on grid. Please call aggregateByGrid
function before this step.
Parameters
----------
grid :polygon geodataframe
The grid geodataframe with grid and aggregated data in a column.
Grid shoud have grid id or equivalent unique ids
field : string
Fieldname with aggregated data
Returns
-------
m : folium map object
Folium map with openstreetmap as base.
"""
# Prepare for grid plotting using folium
grid.columns = [cols.replace('.', '_') for cols in grid.columns]
field = field.replace('.', '_')
# Convert grid id to string
grid['gridId'] = grid['gridId'].astype(str)
# Convert data to geojson and csv
atts = pd.DataFrame(grid)
grid.to_file("grids.geojson", driver='GeoJSON')
atts.to_csv("attributes.csv", index=False)
# load spatial and non-spatial data
data_geojson_source = "grids.geojson"
# data_geojson=gpd.read_file(data_geojson_source)
data_geojson = json.load(open(data_geojson_source))
# Get coordiantes for map centre
lat = grid.geometry.centroid.y.mean()
lon = grid.geometry.centroid.x.mean()
# Intialize a new folium map object
m = folium.Map(location=[lat, lon], zoom_start=10,
tiles='OpenStreetMap')
# Configure geojson layer
folium.GeoJson(data_geojson,
lambda feature: {'lineOpacity': 0.4,
'color': 'black',
'fillColor': None,
'weight': 0.5,
'fillOpacity': 0}).add_to(m)
# add attribute data
attribute_pd = pd.read_csv("attributes.csv")
attribute = pd.DataFrame(attribute_pd)
# Convert gridId to string to ensure it matches with gridId
attribute['gridId'] = attribute['gridId'].astype(str)
# construct color map
minvalue = attribute[field].min()
maxvalue = attribute[field].max()
colormap_rn = linear.YlOrRd_09.scale(minvalue, maxvalue)
# Create Dictionary for colormap
population_dict_rn = attribute.set_index('gridId')[field]
# create map
folium.GeoJson(
data_geojson,
name='Choropleth map',
style_function=lambda feature: {
'lineOpacity': 0,
'color': 'green',
'fillColor': colormap_rn(
population_dict_rn[feature['properties']['gridId']]),
'weight': 0,
'fillOpacity': 0.6
},
highlight_function=lambda feature: {'weight': 3, 'color': 'black',
'fillOpacity': 1},
tooltip=folium.features.GeoJsonTooltip(fields=[field],
aliases=[field])
).add_to(m)
# format legend
field = field.replace("_", " ")
# add a legend
colormap_rn.caption = '{value} per grid'.format(value=field)
colormap_rn.add_to(m)
# add a layer control
folium.LayerControl().add_to(m)
return m
def spatioTemporalAggregation(df, field, summary, gridSize):
"""
Aggregates the given field on hour and weekday basis.
Prepares data for mosaic plot
Parameters
----------
df : geopandas dataframe
field : string
field to be summarized.
summary : string
type of summary to be sumarized. eg. min, max,sum, median
gridSize : float
the size of grid on same unit as geodataframe coordinates.
Returns
-------
geodataframes: one each for larger grid and other for subgrids
(for visualization purpose only)
Aggregated grids with summary on it
"""
def round_down(num, divisor):
return floor(num / divisor) * divisor
def round_up(num, divisor):
return ceil(num / divisor) * divisor
# Get crs from data
sourceCRS = df.crs
targetCRS = {'init': "epsg:3857"}
# Reproject to Mercator\
df = df.to_crs(targetCRS)
# Get bounds
xmin, ymin, xmax, ymax = df.total_bounds
height, width = gridSize, gridSize
top, left = round_up(ymax, height), round_down(xmin, width)
bottom, right = round_down(ymin, height), round_up(xmax, width)
rows = int((top - bottom) / height)+1
cols = int((right - left) / width)+1
XleftOrigin = left
XrightOrigin = left + width
YtopOrigin = top
YbottomOrigin = top - height
polygons = []
for i in range(cols):
Ytop = YtopOrigin
Ybottom = YbottomOrigin
for j in range(rows):
polygons.append(Polygon(
[(XleftOrigin, Ytop), (XrightOrigin, Ytop),
(XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]))
Ytop = Ytop - height
Ybottom = Ybottom - height
XleftOrigin = XleftOrigin + width
XrightOrigin = XrightOrigin + width
grid = gpd.GeoDataFrame({'geometry': polygons})
grid.crs = (targetCRS)
# Assign gridid
numGrid = len(grid)
grid['gridId'] = list(range(numGrid))
# Identify gridId for each point
df['hour'] = df['time'].apply(
lambda x: datetime.datetime.strptime(
x, '%Y-%m-%dT%H:%M:%S')).dt.hour
df['weekday'] = df['time'].apply(
lambda x: datetime.datetime.strptime(
x, '%Y-%m-%dT%H:%M:%S')).dt.dayofweek
points_identified = gpd.sjoin(df, grid, op='within')
# group points by gridid and calculate mean Easting,
# store it as dataframe
# delete if field already exists
if field in grid.columns:
del grid[field]
# Aggregate by weekday, hour and grid
grouped = points_identified.groupby(
['gridId', 'weekday', 'hour']).agg({field: [summary]})
grouped = grouped.reset_index()
grouped.columns = grouped.columns.map("_".join)
modified_fieldname = field+"_"+summary
# Create Subgrids
subgrid, mainGrid, rowNum, columnNum, value = [], [], [], [], []
unikGrid = grouped['gridId_'].unique()
for currentGrid in unikGrid:
dataframe = grid[grid['gridId'] == currentGrid]
xmin, ymin, xmax, ymax = dataframe.total_bounds
xminn, xmaxx, yminn, ymaxx = xmin + \
(xmax-xmin)*0.05, xmax-(xmax-xmin)*0.05, ymin + \
(ymax-ymin)*0.05, ymax-(ymax-ymin)*0.05
rowOffset = (ymaxx-yminn)/24.0
colOffset = (xmaxx - xminn)/7.0
for i in range(7):
for j in range(24):
topy, bottomy, leftx, rightx = ymaxx-j*rowOffset, ymaxx - \
(j+1)*rowOffset, xminn+i * \
colOffset, xminn+(i+1)*colOffset
subgrid.append(
Polygon([(leftx, topy), (rightx, topy),
(rightx, bottomy), (leftx, bottomy)]))
mainGrid.append(currentGrid)
rowNum.append(j)
columnNum.append(i)
if len(grouped[(grouped['gridId_'] == currentGrid)
& (grouped['weekday_'] == i)
& (grouped['hour_'] == j)]) != 0:
this_value = grouped[
(grouped['gridId_'] == currentGrid)
& (grouped['weekday_'] == i)
& (grouped['hour_'] == j)].iloc[0][
modified_fieldname]
value.append(this_value)
else:
value.append(np.nan)
subgrid_gpd = gpd.GeoDataFrame({'geometry': subgrid})
subgrid_gpd.crs = targetCRS
# Reproject to Mercator\
subgrid_gpd = subgrid_gpd.to_crs(sourceCRS)
subgrid_gpd['gridId'] = mainGrid
subgrid_gpd['Weekday'] = columnNum
subgrid_gpd['hour'] = rowNum
subgrid_gpd['gridId'] = subgrid_gpd.apply(lambda x: str(
x['gridId'])+"_"+str(x['Weekday'])+"_"+str(x['hour']), axis=1)
subgrid_gpd[modified_fieldname] = value
subgrid_gpd = subgrid_gpd.dropna()
grid = grid.to_crs(sourceCRS)
grid = grid[grid['gridId'].isin(unikGrid)]
return grid, subgrid_gpd
# final_subgrid=subgrid_gpd[subgrid_gpd['value'].notnull()]
# return final_subgrid
def MosaicPlot(mainGrid, grid, field):
"""
Performs spatio temporal aggregation of data on weekday and hour,
and prepares mosaicplot.
Parameters
----------
mainGrid :polygon geodataframe
The grid geodataframe with grid and aggregated data in a column.
Grid shoud have grid id or equivalent unique ids
grid: Small subgrids, prepared for visualization purpose
only represents an hour of a weekday
field : string
Fieldname with aggregated data
Returns
-------
m : folium map object
Folium map with openstreetmap as base.
"""
# Prepare for grid plotting using folium
grid.columns = [cols.replace('.', '_') for cols in grid.columns]
field = field.replace('.', '_')
# Convert grid id to string
grid['gridId'] = grid['gridId'].astype(str)
# Convert maingrid,subgrid to geojson and csv
mainGrid.to_file("mainGrids.geojson", driver='GeoJSON')
atts = | pd.DataFrame(grid) | pandas.DataFrame |
import pandas as pd
import argparse
import gspread
from gspread_dataframe import get_as_dataframe
"""This module is use to access,preporcess & summarize data
from google sheet. Three files (raw_data,clean_data &
summarized_data) are saved to the following directory:
./cow_disease_detection/data/
Example
-------
$ python pull_and_clean_data.py --average_by day --from_date 2021-12-31
Functions
--------
1. argument_parser
2. get_data
3. data_preprocessing
4. summarize_data
"""
def argument_parser():
"""This function captures the comandline arguments for clean_dataset module"""
# passing arguments to average dataset
parse = argparse.ArgumentParser(description="summarize the dataset by average")
# summary option
parse.add_argument(
"-a",
"--average_by",
type=str,
required=False,
default="hour",
dest="average_by",
help="Summarized the data by average",
choices=["month", "week", "day", "hour", "minute"],
)
#
parse.add_argument(
"-f",
"--from_date",
type=str,
required=False,
default="2021-01-01",
dest="pull_data_from_date",
help="To pull historical data from a specific date e.g 2021-12-31",
)
return parse.parse_args()
def get_data() -> pd.DataFrame:
"""This function pull data from google account
Saves the data to:
./cow_disease_detection/data/
as raw_data.csv
TODO: use secrets in the future to hide credentials
Parameters
----------
NULL
Returns
-------
pd.DataFrame
This function return a pandas dataset.
"""
file = "./cow_disease_detection/fetch_data/credentials.json"
key = "<KEY>"
# pull data
gc = gspread.service_account(filename=file)
sh = gc.open_by_key(key)
worksheet = sh.sheet1
df = get_as_dataframe(worksheet)
# save to disk
df.to_csv("./cow_disease_detection/data/raw_data.csv", index=False)
return df
# data preprocessing
def data_preprocessing(input_data: pd.DataFrame) -> pd.DataFrame:
"""This function does the following processing on the fetch data:
1. Remove unnecessary columns.
2. Drop records where any feature is blank.
3. Create a date_time column.
4. Generate more time features such as year,month,week,day,hour,minute.
Output is saved to:
./cow_disease_detection/data/
as clean_data.csv
Parameters
----------
input_data : pd.DataFrame
Raw data produced by the get_data() function.
Returns
-------
DataFrame
Returns a cleaned and preprocessed dataset.
"""
df = get_data() # input data.copy()
# remove 'unnamed' columns
df = df.loc[:, ~df.columns.str.contains("Unnamed:")]
# drop null records
df = df.dropna()
# concatenate date and time column
date_time = | pd.to_datetime(df["date"] + " " + df["time"]) | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from Bio.SeqUtils.ProtParam import ProteinAnalysis
import numpy as np
import os
from datetime import datetime
def create_sequence_properties_dataframe(sequences):
print("---- Creating properties for the all data. This may take a few mins depending on data size ----")
params = ['sequence', 'aa_counts', 'aa_percentages', 'molecular_weight', 'aromaticity', 'instability_index',
'isoelectric_point', 'sec_struc', 'helix', 'turn', 'sheet', 'epsilon_prot', 'with_reduced_cysteines',
'with_disulfid_bridges', 'gravy', 'flexibility','net_charge_at_pH7point4', 'length']
seq_properties = pd.DataFrame(columns=params)
for seq in sequences.Sequence:
X = ProteinAnalysis(seq)
aa_counts = X.count_amino_acids()
aa_percentages = X.get_amino_acids_percent()
molecular_weight = X.molecular_weight()
aromaticity = X.aromaticity()
instability_index = X.instability_index()
isoelectric_point = X.isoelectric_point()
sec_struc = X.secondary_structure_fraction()
helix = sec_struc[0]
turn = sec_struc[1]
sheet = sec_struc[2]
epsilon_prot = X.molar_extinction_coefficient()
with_reduced_cysteines = epsilon_prot[0]
with_disulfid_bridges = epsilon_prot[1]
gravy = X.gravy() # hydrophobicity related
flexibility = X.flexibility()
# X.protein_scale()
net_charge_at_pH7point4 = X.charge_at_pH(7.4)
length = X.length
row = pd.DataFrame([[seq, aa_counts, aa_percentages, molecular_weight, aromaticity, instability_index,
isoelectric_point, sec_struc, helix, turn, sheet, epsilon_prot, with_reduced_cysteines,
with_disulfid_bridges, gravy, flexibility, net_charge_at_pH7point4, length]], columns=params)
seq_properties = seq_properties.append(row)
return seq_properties
# ---- Visualizations on seq properties ----
def create_distributions(data, properties, save=False, saving_dir="../reports/figures/distribution_" ):
kwargs = dict(hist_kws={'alpha': .7}, kde_kws={'linewidth': 4}, bins=200)
print("---- Plotting distribution ----")
for property in properties:
print(property)
plt.figure(figsize=(10, 7), dpi=80)
sns.displot(data, x=property, hue="activity")
plt.legend()
plt.title(property)
if save:
plt.savefig(saving_dir + '/distribution_' + property + ".png")
plt.show()
def create_box_plots(data, properties, save=False, saving_dir = "../reports/figures/paper/box_plot_"):
for property in properties:
print(property)
plt.figure(figsize=(10, 7), dpi=80)
ax = sns.boxenplot(x="activity", y=property, hue="activity", data=data)
plt.legend()
# plt.title(property)
if save:
plt.savefig(saving_dir + '/box_plot_' + property + ".png")
plt.show()
def create_iqr_hist(data, properties, save=False, saving_dir="../reports/figures/iqr_hist_"):
print("---- Plotting IQR histogram ----")
kwargs = dict(hist_kws={'alpha': .7}, kde_kws={'linewidth': 4}, bins=100)
for property in properties:
print(property)
plt.figure(figsize=(10, 7), dpi=80)
sns.displot(data, x=property, hue="activity")
Q1 = 999
Q3 = -999
for activity in data.activity:
subset = data[data["activity"] == activity]
Q1_subset = np.percentile(subset[property], 25, interpolation='midpoint')
Q3_subset = np.percentile(subset[property], 75, interpolation='midpoint')
if Q1_subset < Q1:
Q1 = Q1_subset
if Q3_subset > Q3:
Q3 = Q3_subset
plt.xlim([Q1, Q3])
plt.legend()
plt.title(property)
if save:
plt.savefig(saving_dir + '/iqr_hist_' + property + ".png")
plt.show()
def create_aa_propensity_boxplot(data, save=False, saving_dir="../reports/figures/aa_propensity_"):
print("---- Creating amino acid propensity plots ----")
for activity in data.activity.unique():
print(activity)
subset = data[data["activity"] == activity]
df = pd.DataFrame(subset["aa_percentages"].to_list())
plt.figure(figsize=(10, 7), dpi=80)
sns.boxplot(data=df * 100)
plt.ylabel("Amino Acid %")
plt.title("Amino acid propensity - " + activity)
if save:
plt.savefig(saving_dir + '/animo_acid_propensity_' + activity + ".png")
plt.show()
def create_density_plots(data, properties, save=False, saving_dir="../reports/figures/density_"):
"""
Very good explanation of density plots: https://towardsdatascience.com/histograms-and-density-plots-in-python-f6bda88f5ac0
"""
kwargs = dict(hist_kws={'alpha': .7}, kde_kws={'linewidth': 4}, bins=200)
print("---- Plotting distribution ----")
for property in properties:
print(property)
plt.figure(figsize=(10, 7), dpi=80)
sns.displot(data, x=property, hue="activity", kind = 'kde')
plt.legend()
plt.title(property)
if save:
plt.savefig(saving_dir + '/density_' + property + ".png")
plt.show()
pass
def create_properties_and_plots(csv_file_with_location_and_activity='src/features/metadata.csv', directory_to_save_properties_file_and_plots='reports/'):
"""
By default paths are from the root folder: antiviral_peptide_prediction
headers: path, activity
you can give absolute paths
All data should have a column with header Sequence. Does not matter if they have other columns too.
saving all plots by default
"""
save_plots = True
properties_to_plot = ['molecular_weight', 'aromaticity', 'instability_index', 'isoelectric_point', 'helix', 'turn', 'sheet', 'with_reduced_cysteines', 'with_disulfid_bridges', 'gravy', 'net_charge_at_pH7point4']
properties_for_box_plot = ['molecular_weight', 'aromaticity', 'instability_index', 'isoelectric_point', 'helix', 'turn', 'sheet', 'gravy', 'net_charge_at_pH7point4']
dt = datetime.now().__str__()
saving_dir = directory_to_save_properties_file_and_plots #+ dt
os.mkdir(saving_dir)
metadata = pd.read_csv(csv_file_with_location_and_activity)
activities = metadata.shape[0]
all_data = pd.DataFrame()
for row in range(activities):
path = metadata.iloc[row].path
activity = metadata.iloc[row].activity
sequences = pd.read_csv(path)
seq_properties = create_sequence_properties_dataframe(sequences)
seq_properties['activity'] = activity
all_data = all_data.append(seq_properties, ignore_index=True)
all_data.to_csv(saving_dir + '/properties_paperRun.csv')
create_box_plots(all_data, properties_for_box_plot, save_plots, saving_dir)
create_distributions(all_data, properties_to_plot, save_plots, saving_dir)
create_density_plots(all_data, properties_for_box_plot, save_plots, saving_dir)
create_iqr_hist(all_data, properties_to_plot, save_plots, saving_dir)
create_aa_propensity_boxplot(all_data, save_plots, saving_dir)
return
def get_peptide_composition_each_seq(data_path, kmer):
sequences = pd.read_csv(data_path)
# seq_properties = pd.DataFrame(columns=params)
for seq in sequences.Sequence:
grams = []
for i in range(kmer):
grams.append(zip(*[iter(seq[i:])] * kmer))
str_ngrams = []
for ngrams in grams:
for ngram in ngrams:
str_ngrams.append("".join(ngram))
npeptide = pd.Series(str_ngrams).value_counts()
return npeptide.to_dict()
# row = pd.DataFrame([[seq, aa_counts, aa_percentages, ]], columns=params)
# seq_properties = seq_properties.append(row)
def get_peptide_composition_full_file(seq_list, kmer):
"""
This is occurance of peptide in the full file. Multiple occurances in same peptide are also counted
"""
str_ngrams = []
for seq in seq_list:
grams = []
for i in range(kmer):
grams.append(zip(*[iter(seq[i:])] * kmer))
for ngrams in grams:
for ngram in ngrams:
str_ngrams.append("".join(ngram))
npeptide = pd.Series(str_ngrams).value_counts()
# print(npeptide)
print(npeptide)
return npeptide
def get_peptide_composition_in_number_of_sequences(seq_list, kmer):
"""
This will return the number of sequences the peptide has occured in.
"""
unique_kmers_in_peptide = []
for seq in seq_list:
temp=[]
grams = []
for i in range(kmer):
grams.append(zip(*[iter(seq[i:])] * kmer)) # all kmers in the sequence has been made
for ngrams in grams:
for ngram in ngrams:
temp.append("".join(ngram)) #flattening it out
unique_kmers_in_peptide.append(list(set(temp)))
flattened = [val for sublist in unique_kmers_in_peptide for val in sublist]
npeptide = | pd.Series(flattened) | pandas.Series |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = | Series(data) | pandas.Series |
import pandas as pd, numpy as np, matplotlib.pyplot as plt
import glob, pywt, pyclustering
from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from pyclustering.cluster.elbow import elbow
import xarray as xr
class HyCluster:
def __init__(
self,
data,
projection=Basemap(projection="spstere", lon_0=180, boundinglat=-30),
scale=False,
):
self.data = data
self.projection = projection
self.scale = scale
self.feat = HyWave(data, projection=projection).fit(scale=scale)
def fit(self, kmax=50, method="KMeans", pyclus=True, scale=False):
labels = Trajclustering(self.feat).fit(kmax=kmax, pyclus=pyclus)
self.labels = pd.DataFrame(labels).T
return self.labels
def get_kmeans_cluster(self, n_clusters=4):
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(self.feat)
labels = pd.Series(kmeans.labels_, index=self.feat.index)
self.labels = pd.DataFrame(labels).T
return self.labels
class HyWave:
def __init__(
self, data, projection=Basemap(projection="spstere", lon_0=180, boundinglat=-30)
):
self.data = data
self.m = projection
self.time = data.time.to_pandas()
def fit(self, scale=True):
ln, lt = self.m(
self.data.sel(geo="lon").values, self.data.sel(geo="lat").values
)
ff = pd.concat([self._wavelet_features(lt), self._wavelet_features(ln)])
ff.index = [
"latmin",
"lat25",
"lat50",
"lat75",
"latmax",
"lonmin",
"lon25",
"lon50",
"lon75",
"lonmax",
]
if scale:
ff = (ff - ff.min()) / (ff.max() - ff.min())
return ff.T
def _wavelet_features(self, data):
wv = pywt.dwt(data.T, "haar")[0]
wv = pd.DataFrame(wv, self.time).T.describe().iloc[3:]
return wv
class Trajclustering:
def __init__(self, data):
self.traj = data
def fit(self, kmax=50, pyclus=False):
n, wce, labels = self.get_kmeans_cluster(kmax, plot=False, pyclus=pyclus)
return labels
def _elbow_method(self, kmax=50):
wce = []
nums = np.arange(1, kmax)
for num in nums:
kmeans = KMeans(n_clusters=num, random_state=0).fit(self.traj)
wce.append(kmeans.inertia_)
x0, y0 = 0.0, wce[0]
x1, y1 = float(len(wce)), wce[-1]
elbows = []
for index_elbow in range(1, len(wce) - 1):
x, y = float(index_elbow), wce[index_elbow]
segment = abs((y0 - y1) * x + (x1 - x0) * y + (x0 * y1 - x1 * y0))
norm = np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)
distance = segment / norm
elbows.append(distance)
n = nums[np.argmax(elbows) + 1]
return n, wce
def _optimal_cluster(self, kmax=50):
elbow_instance = elbow(self.traj.values, 1, kmax)
elbow_instance.process()
amount_clusters = elbow_instance.get_amount()
wce = elbow_instance.get_wce()
return amount_clusters, wce
def get_kmeans_cluster(self, kmax=50, plot=True, pyclus=False):
if pyclus:
n, wce = self._optimal_cluster(kmax=kmax)
else:
n, wce = self._elbow_method(kmax=kmax)
kmeans = KMeans(n_clusters=n, random_state=0).fit(self.traj)
labels = | pd.Series(kmeans.labels_, index=self.traj.index) | pandas.Series |
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = | tm.makeDataFrame() | pandas._testing.makeDataFrame |
import os
import numpy
import pandas as pd
import scipy.stats as st
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs')
def summary_cost(int_details,ctrl_m,ctrl_f,trt_m,trt_f, text):
int_dwc = 1 / (1 + discount_rate) ** numpy.array(range(time_horizon))
int_c = numpy.array([[prog_cost] * time_horizon for i in range(1)])
int_cost = numpy.sum(numpy.dot(int_c, int_dwc))
female_pop = 188340000
male_pop = 196604000
pop = female_pop + male_pop
f_prop = female_pop / pop
m_prop = male_pop / pop
samples = ctrl_m.shape[0]
cs = 0
nq = 0
ic = [0.00 for i in range(samples)]
q_gained = [0.00 for i in range(samples)]
q_inc_percent = [0.00 for i in range(samples)]
htn_cost = [0.00 for i in range(samples)]
cvd_cost = [0.00 for i in range(samples)]
net_cost = [0.00 for i in range(samples)]
exp_inc_per = [0.00 for i in range(samples)]
for i in range(samples):
q_gained[i] = (((ctrl_m.loc[i, "Average DALYs"] - trt_m.loc[i, "Average DALYs"])* m_prop) + ((ctrl_f.loc[i, "Average DALYs"] - trt_f.loc[i, "Average DALYs"])* f_prop))
q_inc_percent[i] = q_gained[i] * 100/((ctrl_m.loc[i, "Average DALYs"] * m_prop) + (ctrl_f.loc[i, "Average DALYs"] *f_prop))
htn_cost[i] = int_cost + ((trt_m.loc[i, "Average HTN Cost"] - ctrl_m.loc[i, "Average HTN Cost"]) * m_prop) + ((trt_f.loc[i, "Average HTN Cost"] - ctrl_f.loc[i, "Average HTN Cost"]) * f_prop)
cvd_cost[i] = ((trt_m.loc[i, "Average CVD Cost"] - ctrl_m.loc[i, "Average CVD Cost"] + trt_m.loc[i, "Average Chronic Cost"] - ctrl_m.loc[i, "Average Chronic Cost"]) * m_prop) + ((trt_f.loc[i, "Average CVD Cost"] - ctrl_f.loc[i, "Average CVD Cost"] + trt_f.loc[i, "Average Chronic Cost"] - ctrl_f.loc[i, "Average Chronic Cost"]) * f_prop)
exp_inc_per[i] = (((trt_m.loc[i, "Average Cost"] - ctrl_m.loc[i, "Average Cost"] + int_cost) * m_prop) + ((trt_f.loc[i, "Average Cost"] - ctrl_f.loc[i, "Average Cost"] + int_cost) * f_prop)) * 100 / ((ctrl_m.loc[i, "Average Cost"] * m_prop ) + (ctrl_f.loc[i, "Average Cost"] * f_prop))
net_cost[i] = htn_cost[i] + cvd_cost[i]
ic[i] = net_cost[i] / q_gained[i]
if net_cost[i] < 0:
cs = cs + 1
if q_gained[i] < 0:
nq = nq + 1
budget_impact = numpy.mean(net_cost) * pop / time_horizon
htn_percap = numpy.mean(htn_cost) / time_horizon
cvd_percap = numpy.mean(cvd_cost) / time_horizon
htn_annual = numpy.mean(htn_cost) * pop / time_horizon
cvd_annual = numpy.mean(cvd_cost) * pop / time_horizon
cost_inc = numpy.mean(exp_inc_per)
ICER = numpy.mean(ic)
QALY = numpy.mean(q_inc_percent)
HTN = numpy.mean(htn_cost)
CVD = numpy.mean(cvd_cost)
icer_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(ic), scale=st.sem(ic))
qaly_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(q_inc_percent), scale=st.sem(q_inc_percent))
htn = st.t.interval(0.95, samples - 1, loc=numpy.mean(htn_cost), scale=st.sem(htn_cost))
cvd = st.t.interval(0.95, samples - 1, loc=numpy.mean(cvd_cost), scale=st.sem(cvd_cost))
cost_inc_95 = st.t.interval(0.95, samples - 1, loc=numpy.mean(exp_inc_per), scale=st.sem(exp_inc_per))
if budget_impact < 0:
m_icer = 'Cost Saving'
s_icer = 'CS'
else:
m_icer = numpy.mean(net_cost) / numpy.mean(q_gained)
s_icer = str(numpy.round(m_icer,1))
m_daly = str(numpy.round(QALY,3)) + "\n(" + str(numpy.round(qaly_95[0],3)) + " to " + str(numpy.round(qaly_95[1],3)) + ")"
m_htn = str(numpy.round(HTN,2)) + "\n(" + str(numpy.round(htn[0],2)) + " to " + str(numpy.round(htn[1],2)) + ")"
m_cvd = str(numpy.round(CVD,2)) + "\n(" + str(numpy.round(cvd[0],2)) + " to " + str(numpy.round(cvd[1],2)) + ")"
m_costinc = str(numpy.round(cost_inc, 2)) + "\n(" + str(numpy.round(cost_inc_95[0], 2)) + " to " + str(numpy.round(cost_inc_95[1], 2)) + ")"
m_budget = str(numpy.round(budget_impact,0)/1000)
err_cost = 1.96 * st.sem(exp_inc_per)
err_daly = 1.96 * st.sem(q_inc_percent)
str_icer = text + " (" + s_icer + ")"
detailed = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], ICER, icer_95[0],icer_95[1], QALY, qaly_95[0], qaly_95[1], htn[0], htn[1], cvd[0], cvd[1], budget_impact, htn_annual, cvd_annual, htn_percap, cvd_percap, cs, nq]
manuscript = [int_details[2], int_details[0], int_details[1], int_details[3], int_details[4], m_icer, m_daly, m_costinc, m_htn, m_cvd, m_budget, cs]
plot = [text, str_icer, cost_inc, QALY, err_cost, err_daly]
return detailed, manuscript, plot
summary_output = []
appendix_output = []
plot_output = []
'''Analysis 0: Baseline'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Base Case')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 1: Doubled Medication Cost'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 2, 0, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 2, 0, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'2X Medication Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 2: Increased Programmatic Cost'''
time_horizon = 20
prog_cost = 0.13*4
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'4X Programmatic Cost')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 3: 20% reduction in baseline CVD risk'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PSAFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 1, 0.2, 20]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 1, 0.2, 20]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Reduced Baseline Risk')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 4: NPCDCS Medication Protocol'''
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/15Aug_AWS3')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 0, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_m = pd.read_csv(file_name_m)
treatment_f = pd.read_csv(file_name_f)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'NPCDCS Treatment Guideline')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 5: Private Sector Cost'''
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PvtFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_m = pd.read_csv(file_name_m)
treatment_f = pd.read_csv(file_name_f)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Private Sector')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 6: PubPvt Mix Cost'''
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PubPvtFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_m = pd.read_csv(file_name_m)
treatment_f = pd.read_csv(file_name_f)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Public-Private Mix')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
#Analysis 7: 10-year Time Horizon#
time_horizon = 10
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/10yFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 1, 0, 10]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 1, 0, 10]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'10 year Horizon')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 8: 40-year Time Horizon'''
time_horizon = 40
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/40yFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6, 1, 0, 40]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8, 1, 0, 40]
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) +"_CF_"+ str(fname[5]) + "_RR_"+ str(fname[6]) + "_TH_"+ str(fname[7]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'40 year Horizon')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 9: Inlusion of Pill Disutility'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PillDisFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 1, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = pd.read_csv(file_name_m)
res = summary_cost(fname, control_m, control_f, treatment_m, treatment_f,'Pill Disutility')
summary_output.append(res[0])
appendix_output.append(res[1])
plot_output.append(res[2])
'''Analysis 10: Inclusion of Pill Disutility'''
time_horizon = 20
prog_cost = 0.13
discount_rate = 0.03
os.chdir('/Users/jarvis/Dropbox/Apps/HypertensionOutputs/PillDisFinal')
fname = [0.4, 0.3, 0, 0.8, 0.6]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
control_m = pd.read_csv(file_name_m)
control_f = pd.read_csv(file_name_f)
fname = [0.7, 0.7, 0, 0.8, 0.8]
file_name_m = ("Aspire_Male_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
file_name_f = ("Aspire_Female_Cov_" + str(fname[0]) + "_Comp_" + str(fname[1]) + "_Pro_" + str(round(fname[2])) + "_Ini_" + str(fname[3]) + "_Per_" + str(
fname[4]) + ".csv")
treatment_f = pd.read_csv(file_name_f)
treatment_m = | pd.read_csv(file_name_m) | pandas.read_csv |
#######################################################################################################
# AUTHOR : <EMAIL>
# AIM : Script to create cleaned parallel dataset from uncleaned parallel dataset.
# The input dataset must have only two column and c1 must be in english,
# c2 language code must be specified
# USAGE : python3 exec_jobid_datasetmaker.py -j "ALIGN-JOB-ID" -s "hi" -o "/home/downloads/output/"
#######################################################################################################
import sys
import argparse
import pandas as pd
import urllib.request, json
from parallelcleaner import parallelcleanerfn
msg = "Adding description"
parser = argparse.ArgumentParser(description = msg)
parser.add_argument("-i", "--input", help = "Input txt or Single column csv/tsv file")
parser.add_argument("-o", "--output", help = "Output txt file")
parser.add_argument("-s", "--secondlang", help = "second column language locale")
parser.add_argument("-e", "--encoding", help = "encoding type (utf8 or utf-16)")
args = parser.parse_args()
if args.input is None:
sys.exit("ERROR : input variable missing!")
if args.output is None:
sys.exit("ERROR : output variable missing!")
if args.secondlang is None:
sys.exit("ERROR : language locale missing!")
if args.encoding is None:
sys.exit("ERROR : encoding type missing!")
if args.output[-4:][0] is not ".":
sys.exit("ERROR : check output extension")
print("Passed inputs : ")
print("----------------")
print("Input File : " + args.input)
print("Output File : " + args.output)
print("Lang Locale : " + args.secondlang)
print("Enc Type : " + args.encoding)
input_path = args.input
output_path = args.output
secondlang = args.secondlang
enctype = args.encoding
if(args.input[-3:]=="csv"):
seperator=','
else:
seperator='\t'
df1=pd.read_csv(input_path,encoding=enctype, sep=seperator,header=None,error_bad_lines=False,warn_bad_lines=True)
df= | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_url_tls() -> str:
conn = os.environ["POSTGRES_URL_TLS"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_rootcert() -> str:
cert = os.environ["POSTGRES_ROOTCERT"]
return cert
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(postgres_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(postgres_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(postgres_url: str) -> None:
query = "select MAX(test_int), MIN(test_int) from test_table"
df = read_sql(postgres_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(postgres_url: str) -> None:
query = "select increment(test_int) as test_int from test_table ORDER BY test_int"
df = read_sql(postgres_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df = df.sort_values("test_int").reset_index(drop=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(postgres_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(postgres_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 0, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, 5, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "a", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 3.1, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, None, False, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_without_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": | pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64") | pandas.Series |
import logging as logger
import re
import regex
import unicodedata
from abc import abstractmethod
from collections import defaultdict
import pandas as pd
import nltk
# noinspection PyPackageRequirements
from iso639 import languages
from langdetect import detect, DetectorFactory
from nltk.corpus import stopwords
# noinspection PyPackageRequirements
from spellchecker import SpellChecker
from textstat import textstat
from langdetect.lang_detect_exception import LangDetectException
from shift_detector.precalculations.precalculation import Precalculation
from shift_detector.precalculations.text_precalculation import TokenizeIntoLowerWordsPrecalculation
from shift_detector.utils import ucb_list
from shift_detector.utils.column_management import ColumnType
from shift_detector.utils.text_metadata_utils import most_common_n_to_string_frequency, \
most_common_n_to_string_alphabetically, delimiters
class GenericTextMetadata(Precalculation):
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return hash(self.__class__)
def __lt__(self, other):
return self.metadata_name() < other.metadata_name()
def __le__(self, other):
return self.metadata_name() <= other.metadata_name()
def __gt__(self, other):
return self.metadata_name() > other.metadata_name()
def __ge__(self, other):
return self.metadata_name() >= other.metadata_name()
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, text):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = | pd.DataFrame() | pandas.DataFrame |
# Import libraries
import glob
import pandas as pd
import numpy as np
import pickle
import requests
import json
import fiona
import contextily as ctx
import matplotlib.pyplot as plt
import seaborn as sns
import geopandas as gpd
from shapely.geometry import Point, LineString, MultiPoint, Polygon
start_year,end_year = 2014,2018 # todo: read from user input
severity_color_dict = {'Fatal Crash':'black','A Injury Crash':'red','B Injury Crash':'orange', 'C Injury Crash':'green', 'No Injuries':'#4f94d4'}
time_sorter_dict = {'crashyear': np.arange(start_year,end_year+1),
'crashmonth': np.arange(1,13),
'dayofweekc': np.arange(1,8),
'crashhour': np.arange(0,25)}
time_label_dict = {'crashyear': 'Year',
'crashmonth': 'Month',
'dayofweekc': 'Day of week',
'crashhour': 'Hour'}
time_xticklabel_dict = {'crashyear': np.arange(start_year,end_year+1),
'crashmonth': ['Jan.','Feb.','Mar.','Apr.','May','Jun.','Jul.','Aug.','Sep.','Oct.','Nov.','Dec.'],
'dayofweekc': ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'],
'crashhour': np.arange(0,25)}
def crash_severity_mapping(SiteCrashes):
# This function maps crashes by severity levels
fig, ax = plt.subplots(figsize = (15,10))
# 150 square buffer around the intersection
site_bounds = SiteCrashes.geometry.buffer(150, cap_style = 3).boundary.geometry.total_bounds
xmin, ymin, xmax, ymax = site_bounds
xlim = ([xmin-50, xmax+50])
ylim = ([ymin-50, ymax+50])
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# Location_Rural_Intx.plot(ax=ax,color='blue')
# SiteCrashes.plot(ax=ax,color='red')
legend_elements = []
scatters = []
textplace = 0
for inj in pd.unique(SiteCrashes['crashinjur']):
df = SiteCrashes[SiteCrashes['crashinjur']==inj]
df.plot(ax=ax,c=severity_color_dict[inj],markersize=50)
ctx.add_basemap(ax, crs = "EPSG:3435", source = ctx.providers.OpenStreetMap.Mapnik)
ax.text(0.8, 0.92, str(SiteCrashes.shape[0])+ ' crashes in total',verticalalignment='center', horizontalalignment='left',
transform=ax.transAxes, color='black', fontsize=10)
ax.legend(handles=legend_elements, loc=[0.8,0.8], prop={'size':20})
def counts_by_index(col,sorter,SiteCrashes):
col_counts = pd.DataFrame(index = sorter)
for i in sorter: # padding for 0
if i in pd.unique(SiteCrashes[col].value_counts().index):
col_counts.loc[i,col] = SiteCrashes[col].value_counts()[i]
else:
col_counts.loc[i,col] = 0
return col_counts
def counts_by_time(col,LocationName,start_year,end_year,SiteCrashes):
"""
This function plot the crash counts by a specific time dimension (col) and save the counts results in a csv file
col can be one of ['crashyear','crashmonth','dayofweekc','crashhour']
"""
SiteCrashes_target = SiteCrashes[(SiteCrashes['crashyear']>=start_year)&(SiteCrashes['crashyear']<=end_year)]
sorter = time_sorter_dict[col]
col_counts = counts_by_index(col,sorter,SiteCrashes_target)
fig, ax = plt.subplots(figsize = (8,6))
ax.bar(col_counts.index,col_counts[col].values)
plt.xlabel(time_label_dict[col], labelpad=20)
plt.ylabel('Number of Crashes', labelpad=20)
ax.yaxis.get_major_locator().set_params(integer=True)
ax.set_xticks(sorter)
ax.set_xticklabels(time_xticklabel_dict[col])
plt.title('Crash Counts by '+ time_label_dict[col]+' at the '+LocationName, y=1.02)
plt.savefig('./analysis_results/Crash Counts by '+time_label_dict[col]+'.png', dpi=600)
col_counts.to_csv('./analysis_results/Crash Counts by '+time_label_dict[col]+'.csv',header=['counts'])
def plot_counts_by_time_statistics(start_year,end_year,SiteCrashes):
"""
This function is a synthetic version of the counts_by_time function
This function plots the crash counts by 4 time dimensions (cols), namely ['crashyear', 'crashmonth', 'dayofweekc', 'crashhour']
and save the counts results in a separate csv file
"""
fig, axs = plt.subplots(2,2, figsize = (16,10))
SiteCrashes_target = SiteCrashes[(SiteCrashes['crashyear']>=start_year)&(SiteCrashes['crashyear']<=end_year)]
for axes,col in zip([(0,0),(0,1),(1,0),(1,1)],['crashyear','crashmonth','dayofweekc','crashhour']):
sorter = time_sorter_dict[col]
col_counts = counts_by_index(col,sorter,SiteCrashes_target)
axs[axes].bar(col_counts.index,col_counts[col].values)
axs[axes].set_ylabel('Number of Crashes', labelpad=10)
axs[axes].yaxis.get_major_locator().set_params(integer=True)
axs[axes].set_xticks(sorter)
axs[axes].set_xticklabels(time_xticklabel_dict[col])
axs[axes].set_title('Crash Counts by '+ time_label_dict[col], y=1.02)
col_counts.to_csv('./analysis_results/Crash Counts by '+time_label_dict[col]+'.csv',header=['counts'])
def counts_by_type(col,LocationName,start_year,end_year,SiteCrashes):
"""
This function plot the crash counts by collision type and save the counts results in a csv file
col: the column that describes the crash type in the dataset, 'typeoffirs'
"""
SiteCrashes_target = SiteCrashes[(SiteCrashes['crashyear']>=start_year)&(SiteCrashes['crashyear']<=end_year)]
sorter = SiteCrashes_target[col].value_counts().index
col_counts = counts_by_index(col,sorter,SiteCrashes_target)
fig, ax = plt.subplots(figsize = (8,6))
ax.bar(col_counts.index,col_counts[col].values)
plt.xlabel('Type', labelpad=20)
plt.ylabel('Number of Crashes', labelpad=20)
ax.yaxis.get_major_locator().set_params(integer=True)
ax.set_xticks(sorter)
ax.set_xticklabels(sorter, rotation = 45)
plt.title('Crash Counts by Crash Type at the '+LocationName, y=1.02)
plt.savefig('./analysis_results/Crash Counts by Crash Type.png', bbox_inches='tight', dpi=600)
col_counts.to_csv('./analysis_results/Crash Counts by Crash Type'+'.csv',header=['counts'])
def counts_by_type_time(col_time,col_main,LocationName,start_year,end_year,SiteCrashes):
"""
This function plot the crash counts by collision type and time, and save the counts results in a csv file
col_time: the column that describes the time dimension in the dataset, ['crashyear','crashmonth','dayofweekc','crashhour']
col_main: the column that describes the crash type in the dataset, 'typeoffirs'
"""
SiteCrashes_target = SiteCrashes[(SiteCrashes['crashyear']>=start_year)&(SiteCrashes['crashyear']<=end_year)]
df = pd.crosstab(SiteCrashes_target[col_time],SiteCrashes_target[col_main])
sorter = time_sorter_dict[col_time]
for i in sorter:
if i not in df.index:
df.loc[i,:] = 0
df = df.reindex(sorter)
df.plot.bar(stacked=True)
plt.ylabel('Number of Crashes', labelpad=20)
plt.title('Number of Crashes by Crash Type and ' + time_label_dict[col_time]+' at the \n'+LocationName, y=1.05)
plt.xticks(rotation=0)
plt.legend()
plt.legend(bbox_to_anchor=(1, 0.8))
plt.savefig('./analysis_results/Number of Crashes by Crash Type and '+time_label_dict[col_time]+'.png', bbox_inches='tight', dpi=1000)
df.to_csv('./analysis_results/Number of Crashes by Crash Type and '+time_label_dict[col_time]+'.csv')
def plot_type_time_statistics(col_main,LocationName,start_year,end_year,SiteCrashes):
"""
This function is a synthetic version of the counts_by_type_time function
This function plots the crash counts by crash type and one of the 4 time dimensions (cols), namely ['crashyear', 'crashmonth', 'dayofweekc', 'crashhour'], and save the counts results in a separate csv file
"""
fig, axs = plt.subplots(2,2, figsize = (16,10))
SiteCrashes_target = SiteCrashes[(SiteCrashes['crashyear']>=start_year)&(SiteCrashes['crashyear']<=end_year)]
for axes,col_time in zip(axs.flat,['crashyear','crashmonth','dayofweekc','crashhour']):
df = pd.crosstab(SiteCrashes_target[col_time],SiteCrashes_target[col_main])
sorter = time_sorter_dict[col_time]
for i in sorter:
if i not in df.index:
df.loc[i,:] = 0
df = df.reindex(sorter)
df.plot.bar(stacked=True,legend=None,ax=axes)
df.to_csv('./analysis_results/Crash Counts by Crash Type and '+time_label_dict[col_time]+'.csv')
axes.set_xlabel(time_label_dict[col_time])
handles, labels = axs.flat[0].get_legend_handles_labels()
fig.legend(handles, labels,loc='upper center', bbox_to_anchor=(0.5, 0.05),
fancybox=True, shadow=True, ncol=len(pd.unique(SiteCrashes_target[col_main])))
fig.suptitle('Crash Counts by Crash Type and Time at the \n'+LocationName,y=0.95,fontsize=15)
plt.savefig('./analysis_results/Number of Crashes by Crash Type and Time.png', dpi=1000)
def counts_by_time_type(col_time,col_main,LocationName,start_year,end_year,SiteCrashes):
"""
This function plot the crash counts by time and collision type using heat map, and save the counts results in a csv file
col_time: the column that describes the time dimension in the dataset, ['crashyear','crashmonth','dayofweekc','crashhour']
col_main: the column that describes the crash type in the dataset, 'typeoffirs'
"""
time = time_sorter_dict[col_time]
SiteCrashes_target = SiteCrashes[(SiteCrashes['crashyear']>=start_year)&(SiteCrashes['crashyear']<=end_year)]
crashtype = pd.unique(SiteCrashes_target[col_main])
crashtype_count_time = pd.crosstab(SiteCrashes[col_time],SiteCrashes[col_main])
## Padding 0 counts
if len(list(set(time) - set(crashtype_count_time.index)))>0:
for t in list(set(time) - set(crashtype_count_time.index)):
crashtype_count_time.loc[t,:] = 0
## Re-organize indices
crashtype_count_time = crashtype_count_time.reindex(time)
## Re-organize by crash counts
crashtype_count_time = crashtype_count_time[list(crashtype_count_time.sum().sort_values(ascending=False).index)]
crashtype_count_time.to_csv('./analysis_results/Crash Counts by '+time_label_dict[col_time]+' and Crash Type.csv')
fig = plt.subplots(figsize = (10,6))
ticks=np.arange(crashtype_count_time.values.min(),crashtype_count_time.values.max()+1 )
ranges = np.arange(crashtype_count_time.values.min()-0.5,crashtype_count_time.values.max()+1.5 )
cmap = plt.get_cmap("Reds", crashtype_count_time.values.max()-crashtype_count_time.values.min()+1)
ax = sns.heatmap(crashtype_count_time.T, annot=True, linewidths=0.4, cmap=cmap,
cbar_kws={"ticks":ticks, "boundaries":ranges,'label': 'Crash Counts'})
plt.xticks(rotation=0)
plt.yticks(rotation=0)
plt.xlabel(col_time)
plt.ylabel(' ')
plt.title('Crash counts by ' + time_label_dict[col_time]+' and Crash Type at the \n'+LocationName, y=1.05)
plt.savefig('./analysis_results/Crash Type and '+col_time+'.png', bbox_inches='tight', dpi=600)
def plot_time_type_statistics(col_main,LocationName,start_year,end_year,SiteCrashes):
"""
This function is a synthetic version of the counts_by_time_type function
This function plots the crash counts by crash type and one of the 4 time dimensions (cols), namely ['crashyear', 'crashmonth', 'dayofweekc', 'crashhour'], and save the counts results in a separate csv file
"""
fig, axs = plt.subplots(2,2, figsize = (20,12))
SiteCrashes_target = SiteCrashes[(SiteCrashes['crashyear']>=start_year)&(SiteCrashes['crashyear']<=end_year)]
for axes,col_time in zip(axs.flat,['crashyear','crashmonth','dayofweekc','crashhour']):
time = time_sorter_dict[col_time]
crashtype = | pd.unique(SiteCrashes_target[col_main]) | pandas.unique |
"""Module to test bowline.utils."""
import pandas as pd
import pytest
from bowline.utils import detect_series_type
@pytest.mark.parametrize(
"input_series, expected",
[
( | pd.Series([0, 1, 1, 0]) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pytesseract
import platform
import pandas as pd
class TextDetect():
def __init__(self, path_cmd):
if platform.system() == 'Windows':
pytesseract.pytesseract.tesseract_cmd = path_cmd
def get_data(self, image, join=True):
"""
:param image: image for text detection
:param join: dafault True: apply data to join_word
:return: list of dictionaries with information extracted by tesseract
"""
boxes = pytesseract.image_to_data(image, lang='por')
rows = boxes.splitlines()
columns = rows[0].split()
to_int = columns[:-1]
data = []
for r in rows[1:]:
info = r.split()
if len(info) < 12:
continue
data.append(dict(zip(columns, info)))
for k in to_int:
data[-1][k] = int(data[-1][k])
if join:
return self.join_words( | pd.DataFrame(data) | pandas.DataFrame |
import xgboost as xgb
import pandas as pd
import math
def predict_xgb(df_in):
df = df_in.copy()
cols_input = ['Mz', 'Sk', 'Ku', 'Sigma']
dinput = xgb.DMatrix(df[cols_input])
bst = xgb.Booster()
bst.load_model('model/xgb_2.model')
ypred = bst.predict(dinput)
df['code_ng_pred'] = | pd.Series(ypred, index=df.index, dtype='int') | pandas.Series |
# -*- coding: utf-8 -*-
import os
import pandas as pd
##### DEPRECATED? ######
# !!! STILL VERY TIME INEFFICIENT. WORKS FOR NOW BUT NEEDS REWORK LATER ON !!!
def transform_to_longitudinal(df, feats, pat_col, time_col, save_folder):
"""
Transforms a long format (each visit of patient stored in one row) dataframe feature into a longitudinal format
dataframe. The values in time column will give the column names while one row will store all the consecutive
visits of a patient.
:param df: The pandas dataframe storing the features in long format
:param feats: A list of features for which longitudinal dataframes shall be constructed
:param pat_col: The column name listing the patient IDs
:param time_col: The column name listing the time events (visits, months...)
:param save_folder: A folder in which the longitudinal dataframes shall be saved.
:return:
"""
# create dataframe in which longitudinal data is stored
long_df = | pd.DataFrame() | pandas.DataFrame |
import queue
import logging
import datetime
import pandas as pd
from koapy.grpc import KiwoomOpenApiService_pb2
from koapy.grpc.KiwoomOpenApiServiceClientSideDynamicCallable import KiwoomOpenApiServiceClientSideDynamicCallable
from koapy.grpc.KiwoomOpenApiServiceClientSideSignalConnector import KiwoomOpenApiServiceClientSideSignalConnector
from koapy.pyqt5.KiwoomOpenApiControlWrapper import KiwoomOpenApiControlCommonWrapper
from koapy.openapi.RealType import RealType
from koapy.openapi.KiwoomOpenApiError import KiwoomOpenApiError
from koapy.grpc.event.KiwoomOpenApiEventHandlerFunctions import KiwoomOpenApiEventHandlerFunctions
class KiwoomOpenApiServiceClientStubCoreWrapper(KiwoomOpenApiControlCommonWrapper):
def __init__(self, stub):
super().__init__()
self._stub = stub
def __getattr__(self, name):
try:
return getattr(self._stub, name)
except AttributeError:
if name.startswith('On') and name in dir(KiwoomOpenApiEventHandlerFunctions):
return KiwoomOpenApiServiceClientSideSignalConnector(self._stub, name)
else:
return KiwoomOpenApiServiceClientSideDynamicCallable(self._stub, name)
def Call(self, name, *args):
return KiwoomOpenApiServiceClientSideDynamicCallable(self._stub, name)(*args)
def LoginCall(self):
request = KiwoomOpenApiService_pb2.LoginRequest()
for response in self._stub.LoginCall(request):
errcode = response.arguments[0].long_value
return errcode
def TransactionCall(self, rqname, trcode, scrnno, inputs, stop_condition=None):
request = KiwoomOpenApiService_pb2.TransactionRequest()
request.request_name = rqname
request.transaction_code = trcode
request.screen_no = scrnno or ''
for k, v in inputs.items():
request.inputs[k] = v # pylint: disable=no-member
if stop_condition:
request.stop_condition.name = stop_condition.get('name', '') # pylint: disable=no-member
request.stop_condition.value = str(stop_condition.get('value', '')) # pylint: disable=no-member
request.stop_condition.comparator = { # pylint: disable=no-member
'<=': KiwoomOpenApiService_pb2.TransactionStopConditionCompartor.LESS_THAN_OR_EQUAL_TO,
'<': KiwoomOpenApiService_pb2.TransactionStopConditionCompartor.LESS_THAN,
'>=': KiwoomOpenApiService_pb2.TransactionStopConditionCompartor.GREATER_THAN_OR_EQUAL_TO,
'>': KiwoomOpenApiService_pb2.TransactionStopConditionCompartor.GREATER_THAN,
'==': KiwoomOpenApiService_pb2.TransactionStopConditionCompartor.EQUAL_TO,
'!=': KiwoomOpenApiService_pb2.TransactionStopConditionCompartor.NOT_EQUAL_TO,
}.get(stop_condition.get('comparator', '<='))
request.stop_condition.include_equal = stop_condition.get('include_equal', False) # pylint: disable=no-member
return self._stub.TransactionCall(request)
def OrderCall(self, rqname, scrnno, account, order_type, code, quantity, price, quote_type, original_order_no=None):
"""
[거래구분]
모의투자에서는 지정가 주문과 시장가 주문만 가능합니다.
00 : 지정가
03 : 시장가
05 : 조건부지정가
06 : 최유리지정가
07 : 최우선지정가
10 : 지정가IOC
13 : 시장가IOC
16 : 최유리IOC
20 : 지정가FOK
23 : 시장가FOK
26 : 최유리FOK
61 : 장전시간외종가
62 : 시간외단일가매매
81 : 장후시간외종가
[주문유형]
1:신규매수, 2:신규매도 3:매수취소, 4:매도취소, 5:매수정정, 6:매도정정
"""
request = KiwoomOpenApiService_pb2.OrderRequest()
request.request_name = rqname
request.screen_no = scrnno or ''
request.account_no = account
request.order_type = int(order_type) if order_type else 0
request.code = code
request.quantity = int(quantity) if quantity else 0
request.price = int(price) if price else 0
request.quote_type = quote_type
request.original_order_no = '' if original_order_no is None else original_order_no
return self._stub.OrderCall(request)
def RealCall(self, scrnno, codes, fids, realtype=None, infer_fids=False, readable_names=False, fast_parse=False):
request = KiwoomOpenApiService_pb2.RealRequest()
if scrnno is None:
scrnnos = []
elif isinstance(scrnno, str):
scrnnos = [scrnno]
else:
scrnnos = scrnno
fids = [int(fid) for fid in fids]
if realtype is None:
realtype = '0'
request.screen_no.extend(scrnnos) # pylint: disable=no-member
request.code_list.extend(codes) # pylint: disable=no-member
request.fid_list.extend(fids) # pylint: disable=no-member
request.real_type = realtype
request.flags.infer_fids = infer_fids # pylint: disable=no-member
request.flags.readable_names = readable_names # pylint: disable=no-member
request.flags.fast_parse = fast_parse # pylint: disable=no-member
return self._stub.RealCall(request)
def LoadConditionCall(self):
request = KiwoomOpenApiService_pb2.LoadConditionRequest()
for response in self._stub.LoadConditionCall(request):
ret = response.arguments[0].long_value
msg = response.arguments[1].string_value
return (ret, msg)
def ConditionCall(self, scrnno, condition_name, condition_index, search_type, with_info=False, is_future_option=False, request_name=None):
request = request = KiwoomOpenApiService_pb2.ConditionRequest()
request.screen_no = scrnno or ''
request.condition_name = condition_name
request.condition_index = condition_index
request.search_type = search_type
request.flags.with_info = with_info # pylint: disable=no-member
request.flags.is_future_option = is_future_option # pylint: disable=no-member
if request_name is not None:
request.request_name = request_name
return self._stub.ConditionCall(request)
def SetLogLevel(self, level, logger=''):
request = KiwoomOpenApiService_pb2.SetLogLevelRequest()
request.level = level
request.logger = logger
return self._stub.SetLogLevel(request)
def _EnsureConnectedUsingSignalConnector(self):
errcode = 0
if self.GetConnectState() == 0:
q = queue.Queue()
def OnEventConnect(errcode):
q.put(errcode)
self.OnEventConnect.disconnect(OnEventConnect)
self.OnEventConnect.connect(OnEventConnect)
errcode = KiwoomOpenApiError.try_or_raise(self.CommConnect())
errcode = KiwoomOpenApiError.try_or_raise(q.get())
return errcode
def _EnsureConnectedUsingLoginCall(self):
errcode = 0
if self.GetConnectState() == 0:
errcode = self.LoginCall()
return errcode
def _EnsureConnectedUsingCall(self):
return self.Call('EnsureConnected')
def EnsureConnected(self):
return self._EnsureConnectedUsingCall()
def _ConnectUsingCall(self):
return self.Call('Connect')
def Connect(self):
return self._ConnectUsingCall()
def _LoadConditionUsingCall(self):
return self.Call('LoadCondition')
def LoadCondition(self):
return self._LoadConditionUsingCall()
def _EnsureConditionLoadedUsingCall(self, force=False):
return self.Call('EnsureConditionLoaded', force)
def EnsureConditionLoaded(self, force=False):
return self._EnsureConditionLoadedUsingCall(force)
def _RateLimitedCommRqDataUsingCall(self, rqname, trcode, prevnext, scrnno, inputs=None):
return self.Call('RateLimitedCommRqData', rqname, trcode, prevnext, scrnno, inputs)
def RateLimitedCommRqData(self, rqname, trcode, prevnext, scrnno, inputs=None):
self._RateLimitedCommRqDataUsingCall(rqname, trcode, prevnext, scrnno, inputs)
def _RateLimitedSendConditionUsingCall(self, scrnno, condition_name, condition_index, search_type):
return self.Call('RateLimitedSendCondition', scrnno, condition_name, condition_index, search_type)
def RateLimitedSendCondition(self, scrnno, condition_name, condition_index, search_type):
return self._RateLimitedSendConditionUsingCall(scrnno, condition_name, condition_index, search_type)
class KiwoomOpenApiServiceClientStubWrapper(KiwoomOpenApiServiceClientStubCoreWrapper):
def _ParseTransactionCallResponses(self, responses, remove_zeros_width=None):
single_output = None
columns = []
records = []
for response in responses:
if single_output is None:
single_output = dict(zip(
response.single_data.names,
self._RemoveLeadingZerosForNumbersInValues(response.single_data.values, remove_zeros_width)))
if not columns:
columns = response.multi_data.names
for values in response.multi_data.values:
records.append(self._RemoveLeadingZerosForNumbersInValues(values.values, remove_zeros_width))
single = | pd.Series(single_output) | pandas.Series |
import MDAnalysis
import MDAnalysis.analysis.hbonds
import pandas as pd
import numpy as np
import os
from collections import defaultdict
import networkx as nx
import matplotlib.pyplot as plt
import sys
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
#logger.addHandler(logging.FileHandler('test.log', 'a'))
print = logger.info
sys.setrecursionlimit(1000)
print(sys.getrecursionlimit())
class HB_MD:
def __init__(self, frame):
self.direct_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.one_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.two_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.three_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.four_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.hb_analysis(frame, self.direct_connection, self.one_water_connection, self.two_water_connection, self.three_water_connection, self.four_water_connection)
return
def addEdge(self, graph,u,v):
graph[u].append(v)
def generate_edges(self, graph):
edges = []
for node in graph:
for neighbour in graph[node]:
edges.append((node, neighbour))
return edges
def find_path(self, graph, start, end, path =[]):
path = path + [start]
if start == end:
return path
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
return newpath
return None
def find_all_path(self, graph, start, path, paths):
if len(path) == 6:
return paths.append(list(path))
if len(graph[start]) == 0:
return paths.append(list(path))
for node in graph[start]:
if node in path:
continue
path.append(node)
self.find_all_path(graph, node, path, paths)
path.pop()
def get_chain(self, frame, chain):
i = 0
pdb = open(frame, 'r')
#os.system('sed -i "s/1H / H1/" hoh.pdb')
for line in pdb:
#line.replace('HOH', 'TIP3')
if line[0:4] != 'ATOM':
continue
chain[i] = line[21:22]
i += 1
return
def MDtraj(self, pdb):
#print('Getting coordinate')
h3 = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(pdb, 'not resname ALA and not resname GLN and not resname GLY and not resname ILE and not resname LEU and not resname PHE and not resname PRO and not resname VAL',
'not resname ALA and not resname GLN and not resname GLY and not resname ILE and not resname LEU and not resname PHE and not resname PRO and not resname VAL', distance=3.5, angle=90.0, acceptors = {'O1', 'O2'})
#print('Analyzing')
h3.run()
#print('Generating table')
h3.generate_table()
#print('Generating form')
df3 = pd.DataFrame.from_records(h3.table)
h3.generate_table()
df3 = pd.DataFrame.from_records(h3.table)
return df3
def get_all_connection(self, df3, chain, index_donor, index_accept):
for index2, row2 in df3.iterrows():
if row2['donor_resnm'] == 'TIP3'and row2['acceptor_resnm'] != 'TIP3':
if row2['donor_atom'] == 'H1':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-1))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
if row2['donor_atom'] == 'H2':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-2))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
elif row2['acceptor_resnm'] == 'TIP3' and row2['donor_resnm'] != 'TIP3':
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
index_donor.append(row2['donor_resnm'] + '_' + chain[row2['donor_index']] + '_' + str(row2['donor_resid']))
elif row2['acceptor_resnm'] == 'TIP3' and row2['donor_resnm'] == 'TIP3':
if row2['donor_atom'] == 'H1':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-1))
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
if row2['donor_atom'] == 'H2':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-2))
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
else:
index_donor.append(row2['donor_resnm'] + '_' + chain[row2['donor_index']] + '_' + str(row2['donor_resid']))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
return
def divide_networks(self, hb_two, donor_residue, acceptor_residue, donor_residue2, acceptor_residue2):
#print('Divide networks')
for row in range(len(hb_two)):
if hb_two['donor_residue'][row][0:3] != 'TIP' and hb_two['acceptor_residue'][row][0:3] != 'TIP':
if hb_two['donor_residue'][row] == hb_two['acceptor_residue'][row]:
continue
else:
donor_residue.append(hb_two['donor_residue'][row])
acceptor_residue.append(hb_two['acceptor_residue'][row])
else:
if hb_two['donor_residue'][row] == hb_two['acceptor_residue'][row]:
continue
else:
donor_residue2.append(hb_two['donor_residue'][row])
acceptor_residue2.append(hb_two['acceptor_residue'][row])
return
def count_water_num(self, path, donor, accept, wat_num):
#print('Count number of water in paths')
for item in path:
donor_column = [item[0]]
accpt_column = []
count = 0
for r in range(1, len(item)):
if item[r][0:3] != 'TIP':
donor_column.append(item[r])
accpt_column.append(item[r])
wat_num.append(count)
count = 0
else:
count += 1
if len(donor_column) > len(accpt_column):
donor_column.pop()
else:
accpt_column.pop()
donor.extend(donor_column)
accept.extend(accpt_column)
return
#c = u.select_atoms("protein and prop z > 85 or around 3.0 protein and prop z > 85 ")
#c.write('/Users/zhangyingying/Dropbox (City College)/Yingying/large_file/new_trajectories_PSII_wt/cut_frame32_50_test.pdb')
def hb_analysis(self, frame, direct_connection, one_water_connection, two_water_connection, three_water_connection, four_water_connection):
chain = {}
graph = defaultdict(list)
pdb = MDAnalysis.Universe(frame)
self.get_chain(frame, chain)
df3 = self.MDtraj(pdb)
index_donor = []
index_accept = []
self.get_all_connection(df3, chain, index_donor, index_accept)
df3['donor_residue'] = index_donor
df3['acceptor_residue'] = index_accept
dic_hdonnor = {'ASP':['HD1', 'HD2'], 'ARG': ['HH11', 'HH12', 'HH21', 'HH22', 'HE'], 'GLU':['HE1', 'HE2'], 'HIS':['HD1', 'HE2'], 'HSD':['HD1', 'HE2'], 'HSE':['HD1', 'HE2'], 'HSP':['HD1', 'HE2'],
'SER':['HG'], 'THR':['HG1'], 'ASN':['HD21', 'HD22'], 'GLN':['HE21', 'HE22'], 'CYS':['HG'], 'TYR':['HH'], 'TRP':['HE1'], 'LYS':['HZ1', 'HZ2', 'HZ3'], 'TIP3':['H1', 'H2'], 'HOH':['1H', '2H']}
dic_accept = {'ASP':['OD1', 'OD2'], 'HCO': ['OC1', 'OC2'], 'ARG': ['NE', 'NH1', 'NH2'], 'GLU':['OE1', 'OE2'], 'HSD':['ND1', 'NE2'], 'HSE':['ND1', 'NE2'], 'HSP':['ND1', 'NE2'], 'HIS':['ND1', 'NE2'],
'SER':['OG'], 'THR':['OG1'], 'ASN':['OD1'], 'GLN':['OE1'], 'CYS':['SG'], 'TYR':['OH'], 'LYS':['NZ'], 'MET':['SD'], 'CLX':['CLX'], 'CLA':['CLA'], 'OX2':['OX2'], 'PL9':['O1', 'O2'], 'FX':['FX'], 'TIP3':['OH2'], 'HOH':['O'], 'MQ8':['O1', 'O2']}
donor_residue_pick = []
acceptor_residue_pick = []
for index, row in df3.iterrows():
if row['donor_resnm'] in dic_hdonnor.keys() and row['acceptor_resnm'] in dic_accept.keys():
if row['donor_atom'] in dic_hdonnor[row['donor_resnm']] and row['acceptor_atom'] in dic_accept[row['acceptor_resnm']]:
donor_residue_pick.append(row['donor_residue'])
acceptor_residue_pick.append(row['acceptor_residue'])
else:
continue
hb_two = pd.DataFrame({'donor_residue':donor_residue_pick, 'acceptor_residue':acceptor_residue_pick})
donor_residue = []
acceptor_residue = []
donor_residue2 = []
acceptor_residue2 = []
self.divide_networks(hb_two, donor_residue, acceptor_residue, donor_residue2, acceptor_residue2)
dire_con = pd.DataFrame({'donor_residue': donor_residue, 'acceptor_residue': acceptor_residue, 'wat_num': [0]*len(donor_residue)})
wat_con = pd.DataFrame({'donor_residue': donor_residue2, 'acceptor_residue': acceptor_residue2})
# connection via water
wat_con = wat_con.drop_duplicates()
wat_con.index = range(0, len(wat_con))
# direct connection
dire_con = dire_con.drop_duplicates()
dire_con.index = range(0, len(dire_con))
#wat_con.to_csv('/Users/zhangyingying/Dropbox (City College)/Yingying/PSII/quinone/hb_network/conncetion_hoh_frame32_50.csv')
#print('Generating graph')
for i in range(len(wat_con)):
self.addEdge(graph, wat_con['donor_residue'][i], wat_con['acceptor_residue'][i])
visited = []
path = []
#print('Finding all paths through water')
for res in range(len(wat_con)):
results = []
if wat_con['donor_residue'][res] not in visited and wat_con['donor_residue'][res][0:3] != 'TIP':
self.find_all_path(graph, wat_con['donor_residue'][res], [wat_con['donor_residue'][res]], results)
path = path + results
visited.append(wat_con['donor_residue'][res])
else:
continue
donor = []
accept = []
wat_num = []
self.count_water_num(path, donor, accept, wat_num)
# put all the connection together get the network
res_wat_res = | pd.DataFrame({'donor_residue': donor, 'acceptor_residue': accept, 'wat_num': wat_num}) | pandas.DataFrame |
import pandas as pd
import matplotlib.pyplot as plt
def plot_results_for_probability_changes():
df1 = pd.read_csv("base.csv")
df2 = pd.read_csv("base_pc_100_pm_80.csv")
df3 = pd.read_csv("base_pc_80_pm_5.csv")
df_iterations = pd.DataFrame({
"90%% crossover, 40%% mutação": df1["iterations"],
"100%% crossover, 80%% mutação": df2["iterations"],
"80%% crossover, 5%% mutação": df3["iterations"]
})
df_avg_fitness = pd.DataFrame({
"90%% crossover, 40%% mutação": df1["average fitness"],
"100%% crossover, 80%% mutação": df2["average fitness"],
"80%% crossover, 5%% mutação": df3["average fitness"]
})
df_iterations.boxplot()
plt.show()
df_avg_fitness.boxplot()
plt.show()
def plot_results_for_pop_size_changes():
df1 = pd.read_csv("base_pc_100_pm_80_pop_20.csv")
df2 = pd.read_csv("base_pc_100_pm_80_pop_50.csv")
df3 = pd.read_csv("base_pc_100_pm_80.csv")
df4 = pd.read_csv("base_pc_100_pm_80_pop_200.csv")
df_iterations = pd.DataFrame({
"20 indivíduos": df1["iterations"],
"50 indivíduos": df2["iterations"],
"100 indivíduos": df3["iterations"],
"200 indivíduos": df4["iterations"]
})
df_avg_fitness = pd.DataFrame({
"20 indivíduos": df1["average fitness"],
"50 indivíduos": df2["average fitness"],
"100 indivíduos": df3["average fitness"],
"200 indivíduos": df4["average fitness"]
})
df_iterations.boxplot()
plt.show()
df_avg_fitness.boxplot()
plt.show()
def plot_results_for_crossover_changes():
df1 = pd.read_csv("base_pc_100_pm_80_pop_200.csv")
df2 = pd.read_csv("pmx_pc_100_pm_80_pop_200.csv")
df3 = pd.read_csv("edge_pc_100_pm_80_pop_200.csv")
df4 = pd.read_csv("cyclic_pc_100_pm_80_pop_200.csv")
df_iterations = pd.DataFrame({
"Cut and crossfill": df1["iterations"],
"PMX": df2["iterations"],
"Edge crossfill": df3["iterations"],
"Ciclos": df4["iterations"]
})
df_avg_fitness = pd.DataFrame({
"Cut and crossfill": df1["average fitness"],
"PMX": df2["average fitness"],
"Edge crossfill": df3["average fitness"],
"Ciclos": df4["average fitness"]
})
df_iterations.boxplot()
plt.show()
df_avg_fitness.boxplot()
plt.show()
def plot_results_for_mutation_changes():
df1 = pd.read_csv("pmx_pc_100_pm_80_pop_200.csv")
df2 = | pd.read_csv("pmx_insert_pc_100_pm_80_pop_200.csv") | pandas.read_csv |
# 导入数据包
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix
import warnings
warnings.filterwarnings('ignore')
def decode(encode_list):
final_re = []
for i in encode_list:
if i == 0:
final_re.append(89950166)
if i == 1:
final_re.append(89950167)
if i == 2:
final_re.append(89950168)
if i == 3:
final_re.append(99999825)
if i == 4:
final_re.append(99999826)
if i == 5:
final_re.append(99999827)
if i == 6:
final_re.append(99999828)
if i == 7:
final_re.append(99999830)
return final_re
def F1_score(confusion_max):
precision = []
recall = []
F1 = []
class_num = len(confusion_max)
for i in range(class_num):
temp_row = confusion_max[i]
TP = temp_row[i]
FN_sum = sum(temp_row)
temp_column = confusion_max[:, i]
FP_sum = sum(temp_column)
pre = TP / max(FP_sum, 1)
rec = TP / max(FN_sum, 1)
f1 = (2 * pre * rec) / max((pre + rec), 1)
F1.append(f1)
precision.append(pre)
recall.append(rec)
print("F1")
print(F1)
print("precision")
print(precision)
print("recall")
print(recall)
F_score = ((1 / len(F1)) * sum(F1)) ** 2
return F_score
# 基础配置信息
# path = '/data/projects/CCFDF_18/data/'
# path = 'E:\\CCFDF\\plansmatching\\data\\raw data\\final_data\\'
path = 'E:\\CCFDF\\plansmatching\\data\\raw data\\final_data\\Chanle_B\\'
n_splits = 10
seed = 42
# lgb 参数
params = {
"learning_rate": 0.1,
"lambda_l1": 0.1,
"lambda_l2": 0.2,
"max_depth": 4,
"objective": "multiclass",
"num_class": 15,
"silent": True,
}
# 读取数据
# train = pd.read_csv(path + 'train4_feature_2.csv')
# test = pd.read_csv(path + 'test4_feature_2.csv')
train = pd.read_csv(path + 'train_4_feature_select.csv')
test = | pd.read_csv(path + 'test_4_feature_select.csv') | pandas.read_csv |
# coding: utf-8
# # Generating OncoPrint Data Files
#
# The script will process all variant files and output files in an ingestible format for the R OncoPrint function.
#
# It will output oncoprint data for both replicate files and the merged variant callsets.
# In[1]:
import os
import pandas as pd
# In[2]:
# Load all cosmic variants called in this dataset
# This file was generated in filter_variants.ipynb
cosmic_all_file = os.path.join('results', 'all_cosmic_variants.tsv')
cosmic_all_df = pd.read_table(cosmic_all_file)
# What are the 50 most commonly altered COSMIC genes?
top_n = 50
paad_genes = cosmic_all_df['Gene.refGene'].value_counts().head(top_n).index.tolist()
# Add in ATM and RNF43 (see https://github.com/greenelab/pdx_exomeseq/issues/68)
paad_genes += ['ATM', 'RNF43']
cosmic_all_df['Gene.refGene'].value_counts().head(20)
# ### Define Functions for Oncoprint Data Processing
# In[3]:
def process_variants(variant_dir, focus_variants, strip_text, process_cosmic=False,
id_updater=None):
"""
Retrieve VCF files from an input directory and determine membership
Arguments:
variant_dir - the directory to search for variant files to load
focus_variants - a list of genes or variants to search for in samples
strip_text - a string of text to strip from variant files
process_cosmic - boolean to determine if cosmic variants are to be processed
id_updater - a dictionary of sample ID mappings (defaults to None)
Output:
A dataframe that is ready for input into oncoprint function
"""
variant_assign = []
case_ids = []
for variant_file in os.listdir(variant_dir):
# Load and subset file to only variants in the COSMIC db
variant_df = pd.read_table(os.path.join(variant_dir, variant_file), index_col=0)
variant_sub_df = variant_df[variant_df['cosmic70'] != '.']
# Define mutated genes or variants if they exist for the given sample
if process_cosmic:
variant_class = [1 if x in variant_sub_df['cosmic70'].tolist() else 0
for x in focus_variants]
else:
variant_class = ['MUT;' if x in variant_sub_df['Gene.refGene'].tolist() else ''
for x in focus_variants]
# Store results
sample_id = variant_file.replace(strip_text, '')
variant_assign.append(variant_class)
if id_updater is not None:
sample_id = variant_file.replace(variant_file.split('_')[0],
id_updater[variant_file.split('_')[0]])
case_ids.append(sample_id)
# Process output variants
output_df = pd.DataFrame(variant_assign,
index=case_ids,
columns=focus_variants).sort_index()
output_df.index.name = 'Case.ID'
return output_df
# ## Generate OncoPrint Data
#
# ### For All Replicates
# In[4]:
# Process each replicate by observed COSMIC mutation
replicate_file_path = os.path.join('results', 'processed_vcfs')
replicate_strip_text = '_001_processed_variants.tsv.bz2'
replicate_oncoprint_df = process_variants(variant_dir=replicate_file_path,
focus_variants=paad_genes,
strip_text=replicate_strip_text,
process_cosmic=False,
id_updater=None)
# Output file
replicate_output_file = os.path.join('results', 'oncoprint_replicates.tsv')
replicate_oncoprint_df.to_csv(replicate_output_file, sep='\t')
# ### For Merged Samples
# In[5]:
# Process each replicate by observed COSMIC mutation
merged_file_path = os.path.join('results', 'processed_merged_vcfs')
merged_strip_text = '_processed_variants.tsv.bz2'
merged_oncoprint_df = process_variants(variant_dir=merged_file_path,
focus_variants=paad_genes,
strip_text=merged_strip_text,
process_cosmic=False,
id_updater=None)
# Output file
merged_output_file = os.path.join('results', 'oncoprint_merged.tsv')
merged_oncoprint_df.to_csv(merged_output_file, sep='\t')
# ## COSMIC Mutational Similarity
#
# Output mutational similarity data for all replicates and consensus samples.
# The COSMIC mutational similarity is built from a (0,1) sample by COSMIC mutation matrix.
# In[6]:
# How many COSMIC mutation IDs are in the entire set and how many are unique?
print('All COSMIC mutations: {}'.format(cosmic_all_df.shape[0]))
unique_cosmic_ids = set(cosmic_all_df['cosmic70'])
print('Unique COSMIC mutations: {}'.format(len(unique_cosmic_ids)))
# ### For All Replicates
# In[7]:
# Obtain replicate cosmic similarity matrix
replicate_cosmic_df = process_variants(variant_dir=replicate_file_path,
focus_variants=unique_cosmic_ids,
strip_text=replicate_strip_text,
process_cosmic=True,
id_updater=None)
replicate_common_file = os.path.join('results', 'cosmic_similarity_replicates.tsv')
replicate_cosmic_df.to_csv(replicate_common_file, sep='\t')
# ### Consensus samples
# In[8]:
# Obtain consensus cosmic similarity matrix
merged_cosmic_df = process_variants(variant_dir=merged_file_path,
focus_variants=unique_cosmic_ids,
strip_text=merged_strip_text,
process_cosmic=True,
id_updater=None)
merged_common_file = os.path.join('results', 'cosmic_similarity_merged.tsv')
merged_cosmic_df.to_csv(merged_common_file, sep='\t')
# ## What about prefiltered variants (i.e. before COSMIC filtering)
#
# Observed merged samples with cosmic similarity only
# In[9]:
# Load all prefiltered cosmic variants called in this dataset
# This file was generated in filter_variants.ipynb
file = os.path.join('results', 'all_cosmic_prefiltered_variants.tsv')
cosmic_prefiltered_df = | pd.read_table(file) | pandas.read_table |
from bokeh.charts import save, output_file, BoxPlot
from bokeh.layouts import column, gridplot
from bokeh.palettes import all_palettes
from bokeh.plotting import figure
from bokeh.models.widgets import Panel, Tabs, Div
from bokeh.models.widgets import DataTable, TableColumn
from bokeh.models import ColumnDataSource
import pandas as pd
import numpy as np
import itertools
def create_colors(count):
c = min(20, max(3, count))
return list(itertools.islice(
itertools.cycle(all_palettes['Category20'][c]), 0, count))
def create_timelines(report):
worker_timelines, group_names = report.get_timelines()
worker_timelines = list(worker_timelines.items())
worker_timelines.sort()
y_labels = []
for worker_id, timelines in worker_timelines:
for i, _ in enumerate(timelines):
if worker_id == -1:
y_labels.append("Server")
else:
y_labels.append("Worker {}.{}".format(worker_id, i))
f = figure(plot_width=1000, plot_height=1000, y_range=y_labels,
x_range=[0, report.end_time],
webgl=True)
line_id = 1
colors = create_colors(len(group_names))
for worker_id, timelines in worker_timelines:
for starts, ends, gids in timelines:
y = line_id
c = [colors[i] for i in gids]
f.quad(starts, ends, y - 0.1, y + 0.1, line_width=0,
fill_color=c, line_color=None)
line_id += 1
for i, group_name in enumerate(group_names):
f.quad([], [], 1, 1,
fill_color=colors[i],
line_color="black",
legend=group_name)
return f
def create_monitoring(report):
result = []
for worker in report.worker_list:
f = figure(plot_width=1000, plot_height=130,
x_range=[0, report.end_time],
title="Worker {}".format(worker.address))
f.line(worker.monitoring.time, worker.monitoring.cpu,
color="blue", legend="CPU %")
f.line(worker.monitoring.time, worker.monitoring.mem,
color="red", legend="mem %")
result.append([f])
return gridplot(result)
def create_transfer(report):
result = []
for worker in report.worker_list:
f = figure(plot_width=1000, plot_height=130,
x_range=[0, report.end_time],
title="Worker {}".format(worker.address))
f.line(worker.sends.time, worker.sends.data_size.cumsum(),
color="green", legend="send")
f.line(worker.recvs.time, worker.recvs.data_size.cumsum(),
color="red", legend="receive")
result.append([f])
return gridplot(result)
def create_ctransfer(report):
sends = report.all_sends()
f = figure(plot_width=1000, plot_height=500,
x_range=[0, report.end_time],
title="Cumulative transfers")
sends = sends.join(report.task_frame["group"], on="id")
names = report.group_names
f = figure(plot_width=1000, plot_height=500, x_range=[0, report.end_time])
for color, name in zip(create_colors(len(names)), names):
frame = sends[sends["group"] == name]
frame = frame.sort_values("time")
f.line(frame.time, frame.data_size.cumsum(),
color=color, legend=name, line_width=3)
return f
def create_ctime(report):
ds = report.task_frame
names = ds["group"].unique()
names.sort()
colors = create_colors(len(names))
f1 = figure(plot_width=1000, plot_height=400,
x_range=[0, report.end_time],
title="Cumulative time of finished tasks")
for name, color in zip(names, colors):
frame = ds[ds["group"] == name]
f1.line(frame.end_time, frame.duration.cumsum(),
legend=name, color=color, line_width=2)
f2 = figure(plot_width=1000, plot_height=400,
x_range=[0, report.end_time],
title="Number of finished tasks")
for name, color in zip(names, colors):
frame = ds[ds["group"] == name]
f2.line(frame.end_time, np.arange(1, len(frame) + 1),
legend=name, color=color, line_width=2)
return column([f1, f2])
def create_task_summary(report):
counts = report.task_frame.group.value_counts().sort_index()
groups = counts.index
counts.reset_index(drop=True)
ds = | pd.DataFrame({"group": groups, "count": counts}) | pandas.DataFrame |
import random
import unittest
import numpy as np
import pandas as pd
from numpy import testing as nptest
from examples.project_ENGIE import Project_Engie
from operational_analysis.methods import plant_analysis
def reset_prng():
np.random.seed(42)
random.seed(42)
class TestPandasPrufPlantAnalysis(unittest.TestCase):
def setUp(self):
reset_prng()
# Set up data to use for testing (ENGIE example plant)
self.project = Project_Engie("./examples/data/la_haute_borne")
self.project.prepare()
# Test inputs to the regression model, at monthly time resolution
def test_monthly_inputs(self):
reset_prng()
# ____________________________________________________________________
# Test inputs to the regression model, at monthly time resolution
self.analysis = plant_analysis.MonteCarloAEP(
self.project,
reanal_products=["merra2", "era5"],
time_resolution="M",
reg_temperature=True,
reg_winddirection=True,
)
df = self.analysis._aggregate.df
# Check the pre-processing functions
self.check_process_revenue_meter_energy_monthly(df)
self.check_process_loss_estimates_monthly(df)
self.check_process_reanalysis_data_monthly(df)
def test_monthly_lin(self):
reset_prng()
# ____________________________________________________________________
# Test linear regression model, at monthly time resolution
self.analysis = plant_analysis.MonteCarloAEP(
self.project,
reanal_products=["merra2", "era5"],
time_resolution="M",
reg_model="lin",
reg_temperature=False,
reg_winddirection=False,
)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=10)
sim_results = self.analysis.results
self.check_simulation_results_lin_monthly(sim_results)
# Test inputs to the regression model, at daily time resolution
def test_daily_inputs(self):
reset_prng()
# ____________________________________________________________________
# Test inputs to the regression model, at monthly time resolution
self.analysis = plant_analysis.MonteCarloAEP(
self.project,
reanal_products=["merra2", "era5"],
time_resolution="D",
reg_temperature=True,
reg_winddirection=True,
)
df = self.analysis._aggregate.df
# Check the pre-processing functions
self.check_process_revenue_meter_energy_daily(df)
self.check_process_loss_estimates_daily(df)
self.check_process_reanalysis_data_daily(df)
def test_daily_gam(self):
reset_prng()
# ____________________________________________________________________
# Test GAM regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(
self.project,
reanal_products=["merra2", "era5"],
time_resolution="D",
reg_model="gam",
reg_temperature=True,
reg_winddirection=True,
)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=5)
sim_results = self.analysis.results
self.check_simulation_results_gam_daily(sim_results)
def test_daily_gbm(self):
reset_prng()
# ____________________________________________________________________
# Test GBM regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(
self.project,
reanal_products=["era5"],
time_resolution="D",
reg_model="gbm",
reg_temperature=True,
reg_winddirection=False,
)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=5)
sim_results = self.analysis.results
self.check_simulation_results_gbm_daily(sim_results)
def test_daily_etr(self):
reset_prng()
# ____________________________________________________________________
# Test ETR regression model (can be used at daily time resolution only)
self.analysis = plant_analysis.MonteCarloAEP(
self.project,
reanal_products=["merra2"],
time_resolution="D",
reg_model="etr",
reg_temperature=False,
reg_winddirection=False,
)
# Run Monte Carlo AEP analysis, confirm the results are consistent
self.analysis.run(num_sim=5)
sim_results = self.analysis.results
self.check_simulation_results_etr_daily(sim_results)
def check_process_revenue_meter_energy_monthly(self, df):
# Energy Nan flags are all zero
nptest.assert_array_equal(df["energy_nan_perc"].values, np.repeat(0.0, df.shape[0]))
# Expected number of days per month are equal to number of actual days
nptest.assert_array_equal(df["num_days_expected"], df["num_days_actual"])
# Check a few energy values
expected_gwh = pd.Series([0.692400, 1.471730, 0.580035])
actual_gwh = df.loc[
pd.to_datetime(["2014-06-01", "2014-12-01", "2015-10-01"]), "energy_gwh"
]
nptest.assert_array_almost_equal(expected_gwh, actual_gwh)
def check_process_loss_estimates_monthly(self, df):
# Availablity, curtailment nan fields both 0, NaN flag is all False
nptest.assert_array_equal(df["avail_nan_perc"].values, np.repeat(0.0, df.shape[0]))
nptest.assert_array_equal(df["curt_nan_perc"].values, np.repeat(0.0, df.shape[0]))
nptest.assert_array_equal(df["nan_flag"].values, np.repeat(False, df.shape[0]))
# Check a few reported availabilty and curtailment values
expected_avail_gwh = pd.Series([0.029417, 0.021005, 0.000444])
expected_curt_gwh = pd.Series([0.013250, 0.000000, 0.000000])
expected_avail_pct = pd.Series([0.040019, 0.014071, 0.000765])
expected_curt_pct = pd.Series([0.018026, 0.000000, 0.000000])
date_ind = pd.to_datetime(["2014-06-01", "2014-12-01", "2015-10-01"])
nptest.assert_array_almost_equal(expected_avail_gwh, df.loc[date_ind, "availability_gwh"])
nptest.assert_array_almost_equal(expected_curt_gwh, df.loc[date_ind, "curtailment_gwh"])
nptest.assert_array_almost_equal(expected_avail_pct, df.loc[date_ind, "availability_pct"])
nptest.assert_array_almost_equal(expected_curt_pct, df.loc[date_ind, "curtailment_pct"])
def check_process_reanalysis_data_monthly(self, df):
expected = {
"merra2": [5.42523278, 6.86883337, 5.02690892],
"era5": [5.20508049, 6.71586744, 5.23824611],
"merra2_wd": [11.74700241, 250.90081133, 123.70142025],
"era5_wd": [23.4291153, 253.14150601, 121.25886916],
"merra2_temperature_K": [289.87128364, 275.26493716, 281.72562887],
"era5_temperature_K": [290.82110632, 276.62490053, 282.71629935],
}
date_ind = | pd.to_datetime(["2014-06-01", "2014-12-01", "2015-10-01"]) | pandas.to_datetime |
import os
import shutil
import filecmp
from unittest import TestCase
import pandas as pd
from pylearn.varselect import count_xvars, rank_xvars, extract_xvar_combos, remove_high_corvar
class TestVariableSelect(TestCase):
def setUp(self):
self.output = './tests/output'
if not os.path.exists(self.output):
os.makedirs(self.output)
def assert_file_same(self, filename):
expected = os.path.join('./tests/data/expected', filename)
actual = os.path.join('./tests/output', filename)
return filecmp.cmp(expected, actual)
def test_count_xvars(self):
vsel_xy_config = pd.read_csv('./tests/data/vsel_xy_config.csv')
count = count_xvars(vsel_xy_config)
self.assertEqual(count, 708)
def test_rank_xvars(self):
varselect = pd.read_csv('./tests/data/rlearn/VARSELECT.csv')
ranks = rank_xvars(varselect)
columns = ranks.columns.values.tolist()
self.assertListEqual(columns, ['VARNAME', 'IMPORTANCE', 'P', 'RANK'])
def test_extract_xvar_combos(self):
varselect = | pd.read_csv('./tests/data/rlearn/VARSELECT.csv') | pandas.read_csv |
# --------------
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# Load the dataset and create column `year` which stores the year in which match was played
data_ipl= | pd.read_csv(path) | pandas.read_csv |
import matplotlib
import matplotlib.pylab as plt
import os
from matplotlib.pyplot import legend, title
from numpy.core.defchararray import array
from numpy.lib.shape_base import column_stack
import seaborn as sns
import pandas as pd
import itertools
import numpy as np
def plot_graph(data, plot_name, figsize, legend):
"""
Plot the input data to latex compatible .pgg format.
"""
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
# sns.set()
sns.set_context("paper")
# sns.set(rc={'figure.figsize':figsize})
palette = 'summer' #['copper_r', 'BuPu'afmhot_r cool_r] https://medium.com/@morganjonesartist/color-guide-to-seaborn-palettes-da849406d44f
sns.set_theme(style="whitegrid")
g = sns.catplot(data=data, kind="bar", x='Model', y='Score', hue="Loss", ci='sd', palette=palette, legend=legend, legend_out=True, height=figsize[1], aspect=figsize[0]/figsize[1])
g.despine(left=True)
g.set(ylim=(0, .5))
g.map(plt.axhline, y=0.3633, color='purple', linestyle='dotted')
# plt.legend(loc='upper right', title='Metric')
g.set(xlabel='', ylabel='Score')
# plt.title(t_name.replace('_', ' ').title())
folder = os.path.dirname(os.path.abspath(__file__)) + '/plots/'
if not os.path.isdir(folder):
os.makedirs(folder)
# plt.savefig(folder + '{}.pgf'.format(plot_name))
g.savefig(folder + '{}{}.png'.format(plot_name, '' if legend else '_wol'))
# plt.savefig(folder + '{}{}.png'.format(plot_name, '' if legend else '_wol'))
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
| pd.set_option('display.max_colwidth', None) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 10 17:52:18 2018
@author: sudhir
"""
# =============================================================================
# Import packages
# =============================================================================
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
# =============================================================================
# Read Geomertic function
# =============================================================================
def xyz(df1,df2,path,seed):
def get_xyz_data(filename):
pos_data = []
lat_data = []
with open(filename) as f:
for line in f.readlines():
x = line.split()
if x[0] == 'atom':
pos_data.append([np.array(x[1:4], dtype=np.float),x[4]])
elif x[0] == 'lattice_vector':
lat_data.append(np.array(x[1:4], dtype=np.float))
return pos_data, np.array(lat_data)
ga_cols =[]
al_cols =[]
o_cols = []
in_cols = []
for i in range(6):
ga_cols.append('Ga'+str(i))
al_cols.append('Al'+str(i))
o_cols.append('O'+str(i))
in_cols.append('In'+str(i))
ga_df = pd.DataFrame(columns= ga_cols)
al_df = pd.DataFrame(columns= al_cols)
o_df = pd.DataFrame(columns= o_cols)
in_df = pd.DataFrame(columns= in_cols)
train = df1
for i in train.id.values:
fn = path+'train/{}/geometry.xyz'.format(i)
train_xyz, train_lat = get_xyz_data(fn)
ga_list = []
al_list = []
o_list = []
in_list = []
for li in train_xyz:
try:
if li[1] == "Ga":
ga_list.append(li[0])
except:
pass
try:
if li[1] == "Al":
al_list.append(li[0])
except:
pass
try:
if li[1] == "In":
in_list.append(li[0])
except:
pass
try:
if li[1] == "O":
o_list.append(li[0])
except:
pass
# ga_list = [item for sublist in ga_list for item in sublist]
# al_list = [item for sublist in al_list for item in sublist]
# o_list = [item for sublist in o_list for item in sublist]
try:
model = PCA(n_components=2,random_state=seed)
ga_list = np.array(ga_list)
temp_ga = model.fit_transform(ga_list.transpose())
temp_ga = [item for sublist in temp_ga for item in sublist]
except:
temp_ga = [0,0,0,0,0,0]
# print i
try:
model = PCA(n_components=2 ,random_state=seed)
al_list = np.array(al_list)
temp_al = model.fit_transform(al_list.transpose())
temp_al = [item for sublist in temp_al for item in sublist]
# print i
except:
temp_al = [0,0,0,0,0,0]
# print i
try:
model = PCA(n_components=2 ,random_state=seed)
o_list = np.array(o_list)
temp_o = model.fit_transform(o_list.transpose())
temp_o = [item for sublist in temp_o for item in sublist]
# print i
except:
temp_o = [0,0,0,0,0,0]
# print i
try:
model = PCA(n_components=2 ,random_state=seed)
in_list = np.array(in_list)
temp_in = model.fit_transform(in_list.transpose())
temp_in = [item for sublist in temp_in for item in sublist]
# print i
except:
temp_in = [0,0,0,0,0,0]
# print i
temp_ga = pd.DataFrame(temp_ga).transpose()
temp_ga.columns = ga_cols
temp_ga.index = np.array([i])
temp_al = pd.DataFrame(temp_al).transpose()
temp_al.columns = al_cols
temp_al.index = np.array([i])
temp_o = pd.DataFrame(temp_o).transpose()
temp_o.columns = o_cols
temp_o.index = np.array([i])
temp_in = pd.DataFrame(temp_in).transpose()
temp_in.columns = in_cols
temp_in.index = np.array([i])
ga_df = pd.concat([ga_df,temp_ga])
al_df = pd.concat([al_df,temp_al])
o_df = pd.concat([o_df,temp_o])
in_df = pd.concat([in_df,temp_in])
ga_df["id"] = ga_df.index
al_df["id"] = al_df.index
o_df["id"] = o_df.index
in_df["id"] = in_df.index
train = pd.merge(train,ga_df,on = ["id"],how = "left")
train = pd.merge(train,al_df,on = ["id"],how = "left")
train = pd.merge(train,o_df,on = ["id"],how = "left")
train = pd.merge(train,in_df,on = ["id"],how = "left")
# =============================================================================
# Test data
# =============================================================================
ga_df = pd.DataFrame(columns= ga_cols)
al_df = pd.DataFrame(columns= al_cols)
o_df = pd.DataFrame(columns= o_cols)
in_df = pd.DataFrame(columns= in_cols)
test = df2
for i in test.id.values:
fn = path+'test/{}/geometry.xyz'.format(i)
train_xyz, train_lat = get_xyz_data(fn)
ga_list = []
al_list = []
o_list = []
in_list = []
for li in train_xyz:
try:
if li[1] == "Ga":
ga_list.append(li[0])
except:
pass
try:
if li[1] == "Al":
al_list.append(li[0])
except:
pass
try:
if li[1] == "In":
in_list.append(li[0])
except:
pass
try:
if li[1] == "O":
o_list.append(li[0])
except:
pass
# ga_list = [item for sublist in ga_list for item in sublist]
# al_list = [item for sublist in al_list for item in sublist]
# o_list = [item for sublist in o_list for item in sublist]
try:
model = PCA(n_components=2 ,random_state=seed)
ga_list = np.array(ga_list)
temp_ga = model.fit_transform(ga_list.transpose())
temp_ga = [item for sublist in temp_ga for item in sublist]
except:
temp_ga = [0,0,0,0,0,0]
# print i
try:
model = PCA(n_components=2 ,random_state=seed)
al_list = np.array(al_list)
temp_al = model.fit_transform(al_list.transpose())
temp_al = [item for sublist in temp_al for item in sublist]
# print i
except:
temp_al = [0,0,0,0,0,0]
# print i
try:
model = PCA(n_components=2 ,random_state=seed)
o_list = np.array(o_list)
temp_o = model.fit_transform(o_list.transpose())
temp_o = [item for sublist in temp_o for item in sublist]
# print i
except:
temp_o = [0,0,0,0,0,0]
# print i
try:
model = PCA(n_components=2 ,random_state=seed)
in_list = np.array(in_list)
temp_in = model.fit_transform(in_list.transpose())
temp_in = [item for sublist in temp_in for item in sublist]
# print i
except:
temp_in = [0,0,0,0,0,0]
# print i
temp_ga = | pd.DataFrame(temp_ga) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/10/24 16:12
describe: Tushare 数据缓存,这是用pickle缓存数据,是临时性的缓存。单次缓存,多次使用,但是不做增量更新。
"""
import os.path
import shutil
import pandas as pd
from .ts import *
from ..utils import io
class TsDataCache:
"""Tushare 数据缓存"""
def __init__(self, data_path, sdt, edt, verbose=False):
"""
:param data_path: 数据路径
:param sdt: 缓存开始时间
:param edt: 缓存结束时间
:param verbose: 是否显示详细信息
"""
self.date_fmt = "%Y%m%d"
self.verbose = verbose
self.sdt = pd.to_datetime(sdt).strftime(self.date_fmt)
self.edt = | pd.to_datetime(edt) | pandas.to_datetime |
# Evolutionary optimizer for hyperparameters and architecture. Project at https://github.com/pgfeldman/optevolver
import concurrent.futures
import copy
import datetime
import getpass
import os
import random
import re
import threading
from enum import Enum
from typing import Dict, List, Tuple, Callable
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as st
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D # <-- DON'T DELETE, and note the capitalization!
from sklearn.utils import resample
import optevolver.hyperparameter.ValueAxis as VA
import optevolver.util.ExcelUtils as eu
class EvolverTags(Enum):
"""A class containing enumerations elements for use in the argument dictionaries"""
FITNESS = "fitness"
ID = "id"
FUNCTION = "func"
FLOAT = "float"
GENERATION = "generation"
GENOME = "genome"
THREAD_NAME = "thread_str"
FILENAME = "filename"
class Genome:
"""
Class that handles the evolution of a set of ValueAxis (i.e. the chromosome)
...
Attributes
----------
chromosome_dict: Dict
fitness: float
ea_list: List
population: List
meta_info: Dict
data_list: List
generation:int
Methods
-------
reset(self):
Resets all the variables. Needed to eliminate class cross-contamination of class-global variables
equals(self, g: "Genome") -> bool:
Does a deep compare of two Genomes. returns a True if they have the same structure and value(s).
get_chromosome_value(self, key: str) -> Dict:
mutate(self, chance: float = 0.1):
create_args_from_chromo(self, chromo: dict = None) -> Dict:
create_dict_from_chromo(self, chromo: dict = None) -> Dict:
calc_fitness(self, func, id_str: str) -> float:
calc_fitness2(self, args: Dict):
calc_fitness_stats(self, resample_size: int = 100) -> float:
get_data_list(self) -> List:
get_name(self) -> str:
to_dict(self):
to_string(self, meta: bool = True, chromo: bool = True) -> str:
"""
chromosome_dict: Dict
fitness: float
ea_list: List
population: List
meta_info: Dict
data_list: List
generation = 0
def __init__(self, evolve_axis_list: List, p1: 'Genome' = None, p2: 'Genome' = None, crossover: float = 0.5,
generation=0):
"""
Parameters
----------
evolve_axis_list : List
The list of all EvolveAxis used to create this genome
p1 :
Optional parent for this Genome. Two are required to breed.
p2
Optional parent for this Genome. Two are required to breed.
crossover: float
probability that a chromosome will be selected randomly from p1
generation: int
The generation (as determined by the calling EvolutionaryOpimizer) that this genome belongs to
"""
self.reset()
self.generation = generation
self.ea_list = copy.deepcopy(evolve_axis_list)
ea: VA.EvolveAxis
if p1 == None and p2 == None:
for ea in self.ea_list:
self.chromosome_dict[ea.name] = ea.get_random_val()
else:
# for ea in self.ea_list:
for i in range(len(self.ea_list)):
ea = self.ea_list[i]
ea1 = p1.ea_list[i]
ea2 = p2.ea_list[i]
probability = random.random()
if probability < crossover:
ea.set_value(ea1)
else:
ea.set_value(ea2)
self.chromosome_dict[ea.name] = ea.get_result()
def reset(self):
"""Resets all the variables. Needed to eliminate class cross-contamination of class-global variables"""
self.ea_list = []
self.chromosome_dict = {}
self.meta_info = {}
self.fitness = 0
self.population = []
self.generation = 0
self.data_list = []
def equals(self, g: "Genome") -> bool:
"""Does a deep compare of two Genomes. returns a True if they have the same structure and value(s)
Parameters
----------
g : Genome
The genome we are testing against
"""
d1 = self.create_args_from_chromo()
d2 = g.create_args_from_chromo()
if len(d1) != len(d2):
return False
for key, val in d1.items():
if d1[key] != d2[key]:
return False
return True
def get_chromosome_value(self, key: str) -> Dict:
""" Get the current value of a specified EvolveAxis
Parameters
----------
key : str
The name of the EvolveAxis
"""
return self.chromosome_dict[key]
def mutate(self, chance: float = 0.1):
""" Randomly set new values in the chromosomes that make up this genome
Parameters
----------
chance : float = 0.1
The probability that any particular chromosome will mutate. Default is 10%
"""
ea: VA.EvolveAxis
for ea in self.ea_list:
if random.random() < chance: # mutate.
# calculate a new random val
self.chromosome_dict[ea.name] = ea.get_random_val()
def create_args_from_chromo(self, chromo: dict = None) -> Dict:
""" Creates a dictionary that provides values that can be evaluated using the callback function passed to the
EvolutionaryOptimizer. An example of this is the function near the bottom of this file:
def example_evaluation_function(arguments: Dict) -> Tuple[Dict, Dict]:
The arguments:Dict parameter is created and returned by this method
Parameters
----------
chromo : dict = None
An optional chromosome. Otherwise the arguments are created by using this Genome's self.chromosome_dict
"""
if chromo == None:
chromo = self.chromosome_dict
to_return = {}
ea: VA.EvolveAxis
for ea in self.ea_list:
to_return[ea.name] = ea.get_result()
return to_return
def create_dict_from_chromo(self, chromo: dict = None) -> Dict:
""" Creates a dictionary that provides a detailed list of the parameters used by this genome. This differs from
create_args_from_chromo() by including nested parameters of each EvolveAxis
Parameters
----------
chromo : dict = None
An optional chromosome. Otherwise the arguments are created by using this Genome's self.chromosome_dict
"""
if chromo == None:
chromo = self.chromosome_dict
to_return = {}
ea: VA.EvolveAxis
for ea in self.ea_list:
dict = ea.get_last_history()
for key, value in dict.items():
to_return["{}".format(key)] = value
return to_return
def calc_fitness(self, func: Callable, id_str: str) -> float:
""" Depricated - Conceptually the heart of the approach. A pointer to a function is passed in, which is used to
calculate the fitness of whatever is being evaluated and returns it.
Parameters
----------
func : Callable
The function that will produce some fitness value. It returns two Dicts (d1, d2), where d1 must contain a
"fitness" value and d2, which contains data that will be recorded to the spreadsheet for post-hoc
analysis
id_str: str
The name for this evaluation. Added to the argument Dict in case it is needed, for example, as a file name
"""
args = self.create_args_from_chromo(self.chromosome_dict)
args[EvolverTags.ID.value] = id_str
d1, d2 = func(args)
self.data_list.append(d2)
self.fitness = d1[EvolverTags.FITNESS.value]
self.population.append(self.fitness)
return self.fitness
def calc_fitness2(self, args: Dict):
""" Conceptually the heart of the approach. A pointer to a function is passed in, which is used to
calculate the fitness of whatever is being evaluated.
Parameters
----------
args : Dict
Contains the arguments that will be passed to the evaluate function, and a reference to the function as
well. The function is deleted from the arguments, and the remaining Dict os passed to the function, which
is required to produce a fitness value. It returns two Dicts (d1, d2), where d1 must contain a
{EvolverTags.FITNESS.value : <some fitness value>} and d2, which contains data that will be recorded to the
spreadsheet for post-hoc analysis
"""
args.update(self.create_args_from_chromo())
func = args[EvolverTags.FUNCTION.value]
del args[EvolverTags.FUNCTION.value]
d1, d2 = func(args)
self.data_list.append(d2)
self.fitness = d1[EvolverTags.FITNESS.value]
self.population.append(self.fitness)
def calc_fitness_stats(self, resample_size: int = 100) -> float:
""" Creates a bootstrap resampling of the fitness values that have accumulated for this genome. Since the
fitness value may be stochastic, it's best to return a reasonable mean value. It returns the mean
fitness value from this population, and saves the 5%, 95%, minimum, and maximum values for post-hoc analysis
Parameters
----------
resample_size: int = 100
The size of the bootstrap population to resample into
"""
# print("calc_fitness_stats(): population = {}".format(len(self.population)))
boot = resample(self.population, replace=True, n_samples=resample_size, random_state=1)
s = | pd.Series(boot) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = | pd.Timestamp('2012-01-01', tz=tz) | pandas.Timestamp |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from collections import defaultdict
from logging import Logger
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from ax.core.experiment import Experiment
from ax.core.metric import Metric
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.objective import MultiObjective, ScalarizedObjective
from ax.core.search_space import SearchSpace
from ax.core.trial import BaseTrial, Trial
from ax.modelbridge import ModelBridge
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.plot.contour import interact_contour_plotly
from ax.plot.slice import plot_slice_plotly
from ax.plot.trace import optimization_trace_single_method_plotly
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast, not_none
logger: Logger = get_logger(__name__)
def _get_objective_trace_plot(
experiment: Experiment,
metric_name: str,
model_transitions: List[int],
optimization_direction: Optional[str] = None,
# pyre-ignore[11]: Annotation `go.Figure` is not defined as a type.
) -> Optional[go.Figure]:
best_objectives = np.array([experiment.fetch_data().df["mean"]])
return optimization_trace_single_method_plotly(
y=best_objectives,
title="Best objective found vs. # of iterations",
ylabel=metric_name,
model_transitions=model_transitions,
optimization_direction=optimization_direction,
plot_trial_points=True,
)
def _get_objective_v_param_plot(
search_space: SearchSpace,
model: ModelBridge,
metric_name: str,
trials: Dict[int, BaseTrial],
) -> Optional[go.Figure]:
range_params = list(search_space.range_parameters.keys())
if len(range_params) == 1:
# individual parameter slice plot
output_slice_plot = plot_slice_plotly(
model=not_none(model),
param_name=range_params[0],
metric_name=metric_name,
generator_runs_dict={
str(t.index): not_none(checked_cast(Trial, t).generator_run)
for t in trials.values()
},
)
return output_slice_plot
if len(range_params) > 1:
# contour plot
output_contour_plot = interact_contour_plotly(
model=not_none(model),
metric_name=metric_name,
)
return output_contour_plot
# if search space contains no range params
logger.warning(
"_get_objective_v_param_plot requires a search space with at least one "
"RangeParameter. Returning None."
)
return None
def _get_suffix(input_str: str, delim: str = ".", n_chunks: int = 1) -> str:
return delim.join(input_str.split(delim)[-n_chunks:])
def _get_shortest_unique_suffix_dict(
input_str_list: List[str], delim: str = "."
) -> Dict[str, str]:
"""Maps a list of strings to their shortest unique suffixes
Maps all original strings to the smallest number of chunks, as specified by
delim, that are not a suffix of any other original string. If the original
string was a suffix of another string, map it to its unaltered self.
Args:
input_str_list: a list of strings to create the suffix mapping for
delim: the delimiter used to split up the strings into meaningful chunks
Returns:
dict: A dict with the original strings as keys and their abbreviations as
values
"""
# all input strings must be unique
assert len(input_str_list) == len(set(input_str_list))
if delim == "":
raise ValueError("delim must be a non-empty string.")
suffix_dict = defaultdict(list)
# initialize suffix_dict with last chunk
for istr in input_str_list:
suffix_dict[_get_suffix(istr, delim=delim, n_chunks=1)].append(istr)
max_chunks = max(len(istr.split(delim)) for istr in input_str_list)
if max_chunks == 1:
return {istr: istr for istr in input_str_list}
# the upper range of this loop is `max_chunks + 2` because:
# - `i` needs to take the value of `max_chunks`, hence one +1
# - the contents of the loop are run one more time to check if `all_unique`,
# hence the other +1
for i in range(2, max_chunks + 2):
new_dict = defaultdict(list)
all_unique = True
for suffix, suffix_str_list in suffix_dict.items():
if len(suffix_str_list) > 1:
all_unique = False
for istr in suffix_str_list:
new_dict[_get_suffix(istr, delim=delim, n_chunks=i)].append(istr)
else:
new_dict[suffix] = suffix_str_list
if all_unique:
if len(set(input_str_list)) != len(suffix_dict.keys()):
break
return {
suffix_str_list[0]: suffix
for suffix, suffix_str_list in suffix_dict.items()
}
suffix_dict = new_dict
# If this function has not yet exited, some input strings still share a suffix.
# This is not expected, but in this case, the function will return the identity
# mapping, i.e., a dict with the original strings as both keys and values.
logger.warning(
"Something went wrong. Returning dictionary with original strings as keys and "
"values."
)
return {istr: istr for istr in input_str_list}
def get_standard_plots(
experiment: Experiment, generation_strategy: GenerationStrategy
) -> List[go.Figure]:
"""Extract standard plots for single-objective optimization.
Extracts a list of plots from an Experiment and GenerationStrategy of general
interest to an Ax user. Currently not supported are
- TODO: multi-objective optimization
- TODO: ChoiceParameter plots
Args:
- experiment: the Experiment from which to obtain standard plots.
- generation_strategy: the GenerationStrategy used to suggest trial parameters
in experiment
Returns:
- a plot of objective value vs. trial index, to show experiment progression
- a plot of objective value vs. range parameter values, only included if the
model associated with generation_strategy can create predictions. This
consists of:
- a plot_slice plot if the search space contains one range parameter
- an interact_contour plot if the search space contains multiple
range parameters
"""
objective = not_none(experiment.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"get_standard_plots does not currently support MultiObjective "
"optimization experiments. Returning an empty list."
)
return None
if isinstance(objective, ScalarizedObjective):
logger.warning(
"get_standard_plots does not currently support ScalarizedObjective "
"optimization experiments. Returning an empty list."
)
return None
output_plot_list = []
output_plot_list.append(
_get_objective_trace_plot(
experiment=experiment,
metric_name=not_none(experiment.optimization_config).objective.metric.name,
model_transitions=generation_strategy.model_transitions,
optimization_direction=(
"minimize"
if not_none(experiment.optimization_config).objective.minimize
else "maximize"
),
)
)
try:
output_plot_list.append(
_get_objective_v_param_plot(
search_space=experiment.search_space,
model=not_none(generation_strategy.model),
metric_name=not_none(
experiment.optimization_config
).objective.metric.name,
trials=experiment.trials,
)
)
except NotImplementedError:
# Model does not implement `predict` method.
pass
return [plot for plot in output_plot_list if plot is not None]
def exp_to_df(
exp: Experiment,
metrics: Optional[List[Metric]] = None,
key_components: Optional[List[str]] = None,
run_metadata_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> pd.DataFrame:
"""Transforms an experiment to a DataFrame. Only supports Experiment and
SimpleExperiment.
Transforms an Experiment into a dataframe with rows keyed by trial_index
and arm_name, metrics pivoted into one row.
Args:
exp: An Experiment that may have pending trials.
metrics: Override list of metrics to return. Return all metrics if None.
key_components: fields that combine to make a unique key corresponding
to rows, similar to the list of fields passed to a GROUP BY.
Defaults to ['arm_name', 'trial_index'].
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs and metrics by trial and arm.
"""
def prep_return(
df: pd.DataFrame, drop_col: str, sort_by: List[str]
) -> pd.DataFrame:
return not_none(not_none(df.drop(drop_col, axis=1)).sort_values(sort_by))
key_components = key_components or ["trial_index", "arm_name"]
# Accept Experiment and SimpleExperiment
if isinstance(exp, MultiTypeExperiment):
raise ValueError("Cannot transform MultiTypeExperiments to DataFrames.")
results = exp.fetch_data(metrics, **kwargs).df
if len(results.index) == 0: # Handle empty case
return results
# create key column from key_components
key_col = "-".join(key_components)
key_vals = results[key_components[0]].astype("str")
for key in key_components[1:]:
key_vals = key_vals + results[key].astype("str")
results[key_col] = key_vals
# pivot dataframe from long to wide
metric_vals = results.pivot(
index=key_col, columns="metric_name", values="mean"
).reset_index()
# dedupe results by key_components
metadata = results[key_components + [key_col]].drop_duplicates()
metric_and_metadata = | pd.merge(metric_vals, metadata, on=key_col) | pandas.merge |
Subsets and Splits