prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
(c) 2013 <NAME>
This source code is released under the Apache license.
<EMAIL>
Created on April 1, 2013
"""
import datetime as dt
import pandas as pd
import numpy as np
import random
import csv
from .order import Order
from .fincommon import FinCommon
import finpy.utils.fpdateutil as du
from finpy.utils import utils as ut
from finpy.financial.equity import get_tickdata
class Portfolio():
"""
Portfolio has three items.
equities is a panda Panel of equity data.
Reference by ticker. self.equities['AAPL']
cash is a pandas series with daily cash balance.
total is the daily balance.
order_list is a list of Order
"""
def __init__(self, equities, cash, dates, order_list=None):
self.equities = pd.concat(equities, names=["tick", "date"])
self.equities.sort_index(inplace=True)
# self.equities = self.equities.reorder_levels(order=["date", "tick"])
"""
:var equities: is a Panel of equities.
"""
if order_list == None:
self.order = pd.DataFrame(columns=['tick', 'date', 'action', 'shares', 'price'])
self.order = self.order.set_index(["tick","date"])
else:
ol = order_list
ol.sort(key=lambda x: x.date)
self.order = pd.DataFrame.from_records([s.to_dict() for s in ol])
self.order = self.order.set_index(["tick","date"])
xi = self.order[self.order["price"].isnull()].index
self.order.loc[xi, "price"] = self.equities.loc[xi, "close"]
self.cash = pd.Series(index=dates)
self.cash[0] = cash
self.total = pd.Series(index=dates)
self.total[0] = self.dailysum(dates[0])
self.dates = dates
def dailysum(self, date):
" Calculate the total balance of the date."
equities_total = np.nansum(self.equities.xs(key=date, level=1)['shares'] * self.equities.xs(key=date, level=1)['close'])
total = equities_total + self.cash[date]
return total
def buy(self, shares, tick, price, date, update_ol=False):
"""
Portfolio Buy
Calculate total, shares and cash upto the date.
Before we buy, we need to update share numbers. "
"""
self.cal_total(date)
last_valid = self.equities.loc[(tick,slice(None)),'shares'].last_valid_index()[1]
self.equities.loc[(tick, slice(last_valid, date)), 'shares'] = self.equities.loc[(tick, last_valid), 'shares']
self.equities.loc[(tick, date), 'shares'] += shares
self.cash[date] -= price*shares
self.total[date] = self.dailysum(date)
if update_ol:
self.order = self.order.append(pd.DataFrame({"action": "buy", "shares" : shares, "price": self.equities.loc[(tick, date), 'close']}, [(tick, date)]))
def sell(self, shares, tick, price, date, update_ol=False):
"""
Portfolio sell
Calculate shares and cash upto the date.
"""
self.cal_total(date)
last_valid = self.equities.loc[(tick,slice(None)),'shares'].last_valid_index()[1]
self.equities.loc[(tick, slice(last_valid, date)), 'shares'] = self.equities.loc[(tick, last_valid), 'shares']
self.equities.loc[(tick, date), 'shares'] -= shares
self.cash[date] += price*shares
self.total[date] = self.dailysum(date)
if update_ol:
self.order = self.order.append(pd.DataFrame({"action": "sell", "shares" : shares, "price": self.equities.loc[(tick, date), 'close']}, [(tick, date)]))
def fillna_cash(self, date):
" fillna on cash up to date "
update_start = self.cash.last_valid_index()
update_end = date
self.cash[update_start:update_end] = self.cash[update_start]
return update_start, update_end
def fillna(self, date):
"""
fillna cash and all equities.
return update_start and update_end.
"""
update_start, update_end = self.fillna_cash(date)
for tick in self.equities.index.unique(0).tolist():
self.equities.loc[(tick, slice(update_start, update_end)),'shares'] = self.equities.loc[(tick, update_start), 'shares']
return update_start, update_end
def cal_total(self, date=None):
"""
Calculate total up to "date".
"""
if date == None:
equities_sum = pd.Series(index=self.ldt_timestamps())
each_total = self.equities.loc[(slice(None),slice(None)),'close'] * self.equities.loc[(slice(None),slice(None)),'shares']
equities_sum = each_total.groupby(level=1).sum()
self.total = self.cash + equities_sum
else:
start, end = self.fillna(date)
equities_total_df = self.equities.loc[(slice(None),slice(start,end)),'shares'] * self.equities.loc[(slice(None),slice(start,end)),'close']
equities_total = equities_total_df.groupby(level=1).sum()
self.total[start:end ] = equities_total + self.cash[start:end]
def put_orders(self):
"""
Put the order list to the DataFrame.
Update shares, cash columns of each Equity
"""
for o in self.order:
if o.action.lower() == "buy":
self.buy(date=o.date, shares=np.float(o.shares), price=np.float(o.price), tick=o.tick)
elif o.action.lower() == "sell":
self.sell(shares=np.float(o.shares), tick=o.tick, price=np.float(o.price), date=o.date)
def sim(self, ldt_timestamps=None):
"""
Go through each day and calculate total and cash.
"""
self.put_orders()
if ldt_timestamps == None:
ldt_timestamps = self.ldt_timestamps()
dt_end = ldt_timestamps[-1]
self.cal_total()
def csvwriter(self, equity_col=None, csv_file="pf.csv", total=True, cash=True, d=','):
"""
Write the content of the Portfolio to a csv file.
If total is True, the total is printed to the csv file.
If cash is True, the cash is printed to the csv file.
equity_col specify which columns to print for an equity.
The specified columns of each equity will be printed.
"""
lines = []
l = []
l.append("Date")
if total:
l.append("Total")
if cash:
l.append("Cash")
if equity_col != None:
for e in self.equities:
for col in equity_col:
label = e + col
l.append(label)
lines.append(l)
for i in self.ldt_timestamps():
l = []
l.append(i.strftime("%Y-%m-%d"))
if total:
l.append(round(self.total[i], 2))
if cash:
l.append(round(self.cash[i], 2))
if equity_col != None:
for e in self.equities.index.droplevel(1).drop_duplicates():
for col in equity_col:
l.append(round(self.equities.loc[(e, i), col], 2))
lines.append(l)
with open(csv_file, 'w') as fp:
cw = csv.writer(fp, lineterminator='\n', delimiter=d)
for line in lines:
cw.writerow(line)
def write_order_csv(self, csv_file="pf_order.csv", d=','):
self.order.reorder_levels(["date", "tick"]).to_csv(path_or_buf = csv_file, sep = d, header = False, columns = ["action", "shares"])
def daily_return(self,tick=None):
"""
Return the return rate of each day, a list.
:param tick: The ticker of the equity.
:type string:
"""
if tick == None:
total = self.total
else:
total = self.equities.loc[(tick,slice(None)),'close'].droplevel(0)
daily_rtn = total/total.shift(1)-1
daily_rtn[0] = 0
return np.array(daily_rtn)
def avg_daily_return(self, tick=None):
" Average of the daily_return list "
return np.average(self.daily_return(tick))
def std(self, tick=None):
" Standard Deviation of the daily_return "
return np.std(self.daily_return(tick))
def normalized(self, tick=None):
start = self.ldt_timestamps()[0]
if tick == None:
return self.total/self.total[0]
else:
return (self.equities.loc[(tick, slice(None)), 'close']/self.equities.loc[(tick, start), 'close']).droplevel(0)
def normalized_price(self, tick):
self.equities.loc[(tick, slice(None)),'open'] = self.equities.loc[(tick, slice(None)),'open'] * self.equities.loc[(tick, slice(None)),'close']/self.equities.loc[(tick, slice(None)),'actual_close']
self.equities.loc[(tick, slice(None)),'high'] = self.equities.loc[(tick, slice(None)),'high'] * self.equities.loc[(tick, slice(None)),'close']/self.equities.loc[(tick, slice(None)),'actual_close']
self.equities.loc[(tick, slice(None)),'low'] = self.equities.loc[(tick, slice(None)),'low'] * self.equities.loc[(tick, slice(None)),'close']/self.equities.loc[(tick, slice(None)),'actual_close']
def sortino(self, k=252, tick=None):
"""
Return Sortino Ratio.
You can overwirte the coefficient with k.
The default is 252.
"""
daily_rtn = self.daily_return(tick)
negative_daily_rtn = daily_rtn[daily_rtn < 0]
sortino_dev = np.std( negative_daily_rtn)
sortino = (self.avg_daily_return(tick) / sortino_dev) * np.sqrt(k)
return sortino
def return_ratio(self, tick=None):
" Return the return ratio of the period "
if tick == None:
return self.total[-1]/self.total[0]
else:
return self.equities.loc[(tick, self.ldt_timestamps()[-1]), 'close']/self.equities.loc[(tick, self.ldt_timestamps()[0]), 'close']
def moving_average(self, window=20, tick=None):
"""
Return an array of moving average. Window specified how many days in
a window.
"""
if tick == None:
ma = pd.stats.moments.rolling_mean(self.total, window=window)
else:
ma = self.equities[tick].stats.moments.rolling_mean(window=window)
ma[0:window] = ma[window]
return ma
def drawdown(self, window=10):
"""
Find the peak within the retrospective window.
Drawdown is the difference between the peak and the current value.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
merged_data = self.total[pd.Index(pre_timestamps[0]), ldt_timestamps[-1]]
total_timestamps = merged_data.index
dd = pd.Series(index=ldt_timestamps)
j = 0
for i in range(len(pre_timestamps), len(total_timestamps)):
win_start = total_timestamps[i - window]
win_end = total_timestamps[i]
ts_value = merged_data[win_start:win_end]
current = merged_data[win_end]
peak = np.amax(ts_value)
dd[j] = (peak-current)/peak
j += 1
return dd
def random_choose_tick(self, exclude=[]):
"""
Randomly return a ticker in the portfolio.
The items in exclude list are not in the select pool.
"""
ex_set = set(exclude)
pf_set = set([x for x in self.equities])
sel_ls = [s for s in pf_set - ex_set]
return random.choice(sel_ls)
def equities_long(self, date):
"""
Return the list of long equities on the date.
"Long equities" means the number of shares of the equity is greater than 0.
"""
return [x for x in self.equities if self.equities[x].shares[date] > 0]
def ldt_timestamps(self):
"""
Return an array of datetime objects.
"""
ldt_index = self.total.index
dt_start = ldt_index[0]
dt_end = ldt_index[-1]
dt_timeofday = dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
return ldt_timestamps
def excess_return(self, rf_tick="$TNX", tick=None):
"""
An excess return is the difference between an asset's return and the riskless rate.
"""
return self.daily_return(tick=tick) - ut.riskfree_return(self.ldt_timestamps(), rf_tick=rf_tick)
def mean_excess_return(self, rf_tick="$TNX", tick=None):
return np.mean(self.excess_return(rf_tick=rf_tick, tick=tick))
def residual_return(self, benchmark, rf_tick="$TNX", tick=None):
"""
A residual return is the excess return minus beta times the benchmark excess return.
"""
beta = self.beta(benchmark, tick)
return self.excess_return(rf_tick=rf_tick, tick=tick) - beta * self.excess_return(rf_tick=rf_tick, tick=benchmark)
def mean_residual_return(self, benchmark, rf_tick="$TNX", tick=None):
return np.mean(self.residual_return(benchmark=benchmark, rf_tick=rf_tick, tick=tick))
def residual_risk(self, benchmark, rf_tick="$TNX", tick=None):
"""
Residual Risk is the standard deviation of the residual return.
"""
return np.std(self.residual_return(benchmark=benchmark, rf_tick=rf_tick, tick=tick))
def active_return(self, benchmark, tick=None):
"""
An active return is the difference between the benchmark and the actual return.
"""
return self.daily_return(tick=tick) - self.daily_return(tick=benchmark)
def mean_active_return(self, benchmark, tick=None):
return np.mean(self.active_return(benchmark, tick))
def beta_alpha(self, benchmark):
"""
benchmark is an Equity representing the market.
It can be S&P 500, Russel 2000, or your choice of market indicator.
This function uses polyfit in numpy to find the closest linear equation.
"""
beta, alpha = np.polyfit(self.daily_return(tick=benchmark), self.daily_return(), 1)
return beta, alpha
def beta(self, benchmark, tick=None):
"""
benchmark is an Equity representing the market.
This function uses cov in numpy to calculate beta.
"""
benchmark_return = self.daily_return(tick=benchmark)
C = np.cov(benchmark_return, self.daily_return(tick=tick))/np.var(benchmark_return)
beta = C[0][1]/C[0][0]
return beta
def excess_risk(self, rf_tick="$TNX", tick=None):
"""
$FVX is another option. Five-Year treasury rate.
An excess risk is the standard deviation of the excess return.
"""
return np.std(self.excess_return(rf_tick=rf_tick, tick=tick))
def active_risk(self, benchmark, tick=None):
"""
An active risk is the standard deviation of the active return.
"""
return np.std(self.active_return(benchmark, tick))
def info_ratio(self, benchmark, rf_tick="$TNX", tick=None):
"""
Information Ratio
https://en.wikipedia.org/wiki/Information_ratio
Information Ratio is defined as active return divided by active risk,
where active return is the difference between the return of the security
and the return of a selected benchmark index, and active risk is the
standard deviation of the active return.
"""
return self.mean_active_return(benchmark=benchmark, tick=tick)/self.active_risk(benchmark=benchmark, tick=tick)
def appraisal_ratio(self, benchmark, rf_tick="$TNX", tick=None):
"""
Appraisal Ratio
https://en.wikipedia.org/wiki/Appraisal_ratio
Appraisal Ratio is defined as residual return divided by residual risk,
where residual return is the difference between the return of the security
and the return of a selected benchmark index, and residual risk is the
standard deviation of the residual return.
"""
return self.mean_residual_return(benchmark, rf_tick, tick)/self.residual_risk(benchmark, rf_tick, tick)
def sharpe_ratio(self, rf_tick="$TNX", tick=None):
"""
Return the Original Sharpe Ratio.
https://en.wikipedia.org/wiki/Sharpe_ratio
rf_tick is Ten-Year treasury rate ticker at Yahoo.
"""
return self.mean_excess_return(rf_tick=rf_tick, tick=tick)/self.excess_risk(rf_tick=rf_tick, tick=tick)
def up_ratio(self, date, tick, days=10):
"""
Return the ratio of the past up days.
This function only applies to equities.
"""
ldt_index = self.ldt_timestamps()
last = date
first = date-days
up = 0.0
dn = 0.0
for i in range(first, last+1):
if self.equities.loc[(tick, ldt_index[i]), 'close'] < self.equities.loc[(tick, ldt_index[i-1]), 'close']:
dn += 1
else:
up += 1
ratio = up / (dn + up)
return ratio
def dn_ratio(self, date,tick , days=10):
"""
Return the ratio of the past down days.
This function only applies to equities.
"""
ratio = 1.0 - self.up_ratio(date=date, tick=tick, days=days)
return ratio
def rolling_normalized_stdev(self, tick, window=50):
"""
Return the rolling standard deviation of normalized price.
This function only applies to equities.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
ldf_data = get_tickdata([tick], pre_timestamps)
pre_data = pd.concat(ldf_data, names=["tick", "date"])
merged_data = pd.concat([pre_data.loc[(tick, slice(None)), 'close'], self.equities.loc[(tick,slice(None)),'close']])
all_timestamps = pre_timestamps.append(ldt_timestamps)
merged_daily_rtn = (self.equities.loc[(tick,slice(None)),'close']/self.equities.loc[(tick,slice(None)),'close'].shift(1)-1)
merged_daily_rtn[0] = 0
sigma = merged_daily_rtn.rolling(window).std()
return sigma.droplevel(0)[self.ldt_timestamps()]
def max_rise(self, tick, date, window=20):
"""
Find the maximum change percentage between the current date and the bottom of the retrospective window.
:param tick: ticker
:type tick: string
:param date: date to calculate max_rise
:type date: datetime
:param window: The days of window to calculate max_rise.
:type window: int
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
first = pre_timestamps[0]
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
try:
self.equities.loc[(tick, first), 'close']
merged_data = self.equties.loc[(tick, slice(None)), 'close']
except:
ldf_data = get_tickdata([tick], pre_timestamps)
pre_data = pd.concat(ldf_data, names=["tick", "date"])
merged_data = pd.concat([pre_data.loc[(tick, slice(None)), 'close'], self.equities.loc[(tick,slice(None)),'close']])
if(isinstance(date , int)):
int_date = ldt_timestamps[date]
else:
int_date = date
merged_data = merged_data.droplevel(0)
c = merged_data.index.get_loc(int_date)
m = merged_data[c-window:c].min()
r = (merged_data[c]-m)/merged_data[c]
return r
def max_fall(self, tick, date, window=20):
"""
Find the change percentage between the top and the bottom of the retrospective window.
:param tick: ticker
:type tick: string
:param date: date to calculate max_rise
:type date: datetime
:param window: The days of window to calculate max_rise.
:type window: int
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
first = pre_timestamps[0]
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
try:
self.equities.loc[(tick, first), 'close']
merged_data = self.equties.loc[(tick, slice(None)), 'close']
except:
ldf_data = get_tickdata([tick], pre_timestamps)
pre_data = pd.concat(ldf_data, names=["tick", "date"])
merged_data = pd.concat([pre_data.loc[(tick, slice(None)), 'close'], self.equities.loc[(tick,slice(None)),'close']])
if(isinstance(date , int)):
int_date = ldt_timestamps[date]
else:
int_date = date
merged_data = merged_data.droplevel(0)
c = merged_data.index.get_loc(int_date)
mx = merged_data[c-window:c].max()
mn = merged_data[c-window:c].min()
r = (mx-mn)/merged_data[c]
return r
def moving_average(self, tick, window=20):
"""
Return an array of moving average. Window specified how many days in
a window.
:param tick: ticker
:type tick: string
:param window: The days of window to calculate moving average.
:type window: int
"""
mi = self.bollinger_band(tick=tick, window=window, mi_only=True)
return mi
def bollinger_band(self, tick, window=20, k=2, mi_only=False):
"""
Return four arrays for Bollinger Band. The upper band at k times an N-period
standard deviation above the moving average. The lower band at k times an N-period
below the moving average.
:param tick: ticker
:type tick: string
:param window: The days of window to calculate Bollinger Band.
:type window: int
:param k: k *
:return bo: bo['mi'] is the moving average. bo['lo'] is the lower band.
bo['hi'] is the upper band. bo['ba'] is a seris of the position of the current
price relative to the bollinger band.
:type bo: A dictionary of series.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
ldf_data = get_tickdata([tick], pre_timestamps)
pre_data = pd.concat(ldf_data, names=["tick", "date"])
merged_data = pd.concat([pre_data.loc[(tick, slice(None)), 'close'], self.equities.loc[(tick,slice(None)),'close']]).droplevel(0)
bo = dict()
bo['mi'] = merged_data.rolling(window).mean()[ldt_timestamps]
if mi_only:
return bo['mi']
else:
sigma = merged_data.rolling(window).std()
bo['hi'] = bo['mi'] + k * sigma[ldt_timestamps]
bo['lo'] = bo['mi'] - k * sigma[ldt_timestamps]
bo['ba'] = (merged_data[ldt_timestamps] - bo['mi']) / (k * sigma[ldt_timestamps])
return bo
def RSI(self, tick):
"""
Relative Strength Index
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:relative_strength_index_rsi
This function uses roughly 250 prior points to calculate RS.
:param tick: The ticker to calculate RSI
:type tick: string
:return rsi[ldt_timestamps]: RSI series
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, 250)
ldf_data = get_tickdata([tick], pre_timestamps)
merged_data = pd.concat([ldf_data[tick]['close'], self.equities[tick]['close']])
delta = merged_data.diff()
gain = pd.Series(delta[delta > 0], index=delta.index).fillna(0)
loss = pd.Series(delta[delta < 0], index=delta.index).fillna(0).abs()
avg_gain =
|
pd.Series(index=delta.index)
|
pandas.Series
|
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from nowcasting_utils.visualization.visualization import plot_example
from nowcasting_utils.visualization.line import plot_batch_results
from nowcasting_dataset.data_sources.nwp.nwp_data_source import NWP_VARIABLE_NAMES
from nowcasting_utils.models.loss import WeightedLosses
from nowcasting_utils.models.metrics import mae_each_forecast_horizon, mse_each_forecast_horizon
from nowcasting_dataloader.batch import BatchML
from nowcasting_utils.metrics.validation import make_validation_results, save_validation_results_to_logger
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
activities = [torch.profiler.ProfilerActivity.CPU]
if torch.cuda.is_available():
activities.append(torch.profiler.ProfilerActivity.CUDA)
default_output_variable = "pv_yield"
class BaseModel(pl.LightningModule):
# default batch_size
batch_size = 32
# results file name
results_file_name = "results_epoch"
# list of results dataframes. This is used to save validation results
results_dfs = []
def __init__(self):
super().__init__()
self.history_len_5 = (
self.history_minutes // 5
) # the number of historic timestemps for 5 minutes data
self.forecast_len_5 = (
self.forecast_minutes // 5
) # the number of forecast timestemps for 5 minutes data
self.history_len_30 = (
self.history_minutes // 30
) # the number of historic timestemps for 5 minutes data
self.forecast_len_30 = (
self.forecast_minutes // 30
) # the number of forecast timestemps for 5 minutes data
# the number of historic timesteps for 60 minutes data
# Note that ceil is taken as for 30 minutes of history data, one history value will be used
self.history_len_60 = int(np.ceil(self.history_minutes / 60))
self.forecast_len_60 = (
self.forecast_minutes // 60
) # the number of forecast timestemps for 60 minutes data
if not hasattr(self, "output_variable"):
print("setting")
self.output_variable = default_output_variable
if self.output_variable == "pv_yield":
self.forecast_len = self.forecast_len_5
self.history_len = self.history_len_5
self.number_of_samples_per_batch = 128
else:
self.forecast_len = self.forecast_len_30
self.history_len = self.history_len_30
self.number_of_samples_per_batch = 32
self.number_of_pv_samples_per_batch = 128
self.weighted_losses = WeightedLosses(forecast_length=self.forecast_len)
def _training_or_validation_step(self, batch, tag: str, return_model_outputs: bool = False):
"""
batch: The batch data
tag: either 'Train', 'Validation' , 'Test'
"""
if type(batch) == dict:
batch = BatchML(**batch)
# put the batch data through the model
y_hat = self(batch)
# get the true result out. Select the first data point, as this is the pv system in the center of the image
if self.output_variable == "gsp_yield":
y = batch.gsp.gsp_yield
else:
y = batch.pv.pv_yield
y = y[0 : self.batch_size, -self.forecast_len :, 0]
# calculate mse, mae
mse_loss = F.mse_loss(y_hat, y)
nmae_loss = (y_hat - y).abs().mean()
# calculate mse, mae with exp weighted loss
mse_exp = self.weighted_losses.get_mse_exp(output=y_hat, target=y)
mae_exp = self.weighted_losses.get_mae_exp(output=y_hat, target=y)
# TODO: Compute correlation coef using np.corrcoef(tensor with
# shape (2, num_timesteps))[0, 1] on each example, and taking
# the mean across the batch?
self.log_dict(
{
f"MSE/{tag}": mse_loss,
f"NMAE/{tag}": nmae_loss,
f"MSE_EXP/{tag}": mse_exp,
f"MAE_EXP/{tag}": mae_exp,
},
on_step=True,
on_epoch=True,
sync_dist=True # Required for distributed training
# (even multi-GPU on signle machine).
)
if tag != "Train":
# add metrics for each forecast horizon
mse_each_forecast_horizon_metric = mse_each_forecast_horizon(output=y_hat, target=y)
mae_each_forecast_horizon_metric = mae_each_forecast_horizon(output=y_hat, target=y)
metrics_mse = {
f"MSE_forecast_horizon_{i}/{tag}": mse_each_forecast_horizon_metric[i]
for i in range(self.forecast_len_30)
}
metrics_mae = {
f"MSE_forecast_horizon_{i}/{tag}": mae_each_forecast_horizon_metric[i]
for i in range(self.forecast_len_30)
}
self.log_dict(
{**metrics_mse, **metrics_mae},
on_step=True,
on_epoch=True,
sync_dist=True # Required for distributed training
# (even multi-GPU on signle machine).
)
if return_model_outputs:
return nmae_loss, y_hat
else:
return nmae_loss
def training_step(self, batch, batch_idx):
if (batch_idx == 0) and (self.current_epoch == 0):
return self._training_or_validation_step(batch, tag="Train")
else:
return self._training_or_validation_step(batch, tag="Train")
def validation_step(self, batch: BatchML, batch_idx):
if type(batch) == dict:
batch = BatchML(**batch)
# get model outputs
nmae_loss, model_output = self._training_or_validation_step(
batch, tag="Validation", return_model_outputs=True
)
INTERESTING_EXAMPLES = (1, 5, 6, 7, 9, 11, 17, 19)
name = f"validation/plot/epoch_{self.current_epoch}_{batch_idx}"
if batch_idx in [0, 1, 2, 3, 4]:
# make sure the interesting example doesnt go above the batch size
INTERESTING_EXAMPLES = (i for i in INTERESTING_EXAMPLES if i < self.batch_size)
for example_i in INTERESTING_EXAMPLES:
# 1. Plot example
if 0:
fig = plot_example(
batch,
model_output,
history_minutes=self.history_len_5 * 5,
forecast_minutes=self.forecast_len_5 * 5,
nwp_channels=NWP_VARIABLE_NAMES,
example_i=example_i,
epoch=self.current_epoch,
output_variable=self.output_variable,
)
# save fig to log
self.logger.experiment[-1].log_image(name, fig)
try:
fig.close()
except Exception as _:
# could not close figure
pass
# 2. plot summary batch of predictions and results
# make x,y data
if self.output_variable == "gsp_yield":
y = batch.gsp.gsp_yield[0 : self.batch_size, :, 0].cpu().numpy()
else:
y = batch.pv.pv_yield[0 : self.batch_size, :, 0].cpu().numpy()
y_hat = model_output[0 : self.batch_size].cpu().numpy()
time = [
pd.to_datetime(x, unit="ns")
for x in batch.gsp.gsp_datetime_index[0 : self.batch_size].cpu().numpy()
]
time_hat = [
pd.to_datetime(x, unit="ns")
for x in batch.gsp.gsp_datetime_index[
0 : self.batch_size, self.history_len_30 + 1 :
]
.cpu()
.numpy()
]
# plot and save to logger
fig = plot_batch_results(model_name=self.name, y=y, y_hat=y_hat, x=time, x_hat=time_hat)
fig.write_html(f"temp_{batch_idx}.html")
try:
self.logger.experiment[-1][name].upload(f"temp_{batch_idx}.html")
except:
pass
# save validation results
capacity = batch.gsp.gsp_capacity[:,-self.forecast_len_30:,0].cpu().numpy()
predictions = model_output.cpu().numpy()
truths = batch.gsp.gsp_yield[:, -self.forecast_len_30:, 0].cpu().numpy()
predictions = predictions * capacity
truths = truths * capacity
results = make_validation_results(truths_mw=truths,
predictions_mw=predictions,
capacity_mwp=capacity,
gsp_ids=batch.gsp.gsp_id[:, 0].cpu(),
batch_idx=batch_idx,
t0_datetimes_utc=
|
pd.to_datetime(batch.metadata.t0_datetime_utc)
|
pandas.to_datetime
|
"""
pygemfxns_preprocessing.py is a list of the model functions that are used to preprocess the data into the proper format.
"""
# Built-in libraries
import os
import argparse
# External libraries
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MultipleLocator
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import pygemfxns_modelsetup as modelsetup
#import pygem_input as input
print('\ndhdt analysis performed separately using shean_mb_parallel.py\n')
# ===== INPUT DATA =====
hyps_fn = ('/Users/davidrounce/Documents/Dave_Rounce/HiMAT/IceThickness_Farinotti/output/' +
'area_km2_01_Farinotti2019_10m.csv')
icethickness_fn = ('/Users/davidrounce/Documents/Dave_Rounce/HiMAT/IceThickness_Farinotti/output/' +
'thickness_m_01_Farinotti2019_10m.csv')
#dataset_name = 'berthier'
dataset_name = 'braun'
if dataset_name == 'berthier':
dems_output_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/DEMs/Berthier/output/'
mb_summary_fn_list = ['AK_Pen_mb_20190912_2256.csv', 'AR_C_mb_20190913_0735.csv', 'AR_E_mb_20190913_0735.csv',
'AR_W_mb_20190913_0835.csv', 'Chugach_mb_20190913_0744.csv', 'Coast_mb_20190912_2308.csv',
'Kenai_mb_20190912_2301.csv', 'StElias_mb_20190913_0836.csv']
mb_summary_fn = 'AK_all_20190913.csv'
mb_mwea_all_fn = 'AK_all_20190913_wextrapolations.csv'
reg_t1_dict = {2: 1953., 3: 1950., 4: 1952., 5: 1968., 6: 1966, 9999: 1957.8}
reg_t2_dict = {2: 2004.75, 3: 2007.75, 4: 2007.75, 5: 2006.75, 6: 2007.75, 9999: 2006.75}
obs_type = 'mb_geo'
elif dataset_name == 'braun':
dems_output_fp = '/Users/davidrounce/Documents/Dave_Rounce/HiMAT/DEMs/Braun/output/'
mb_summary_fn_list = ['Braun_mb_20190924_all.csv']
mb_summary_fn = 'braun_AK_all_20190924.csv'
mb_mwea_all_fn = 'braun_AK_all_20190924_wextrapolations.csv'
reg_t1_dict = {1: 2000.128, 2: 2000.128, 3: 2000.128, 4: 2000.128, 5: 2000.128, 6: 2000.128, 9999: 2000.128}
reg_t2_dict = {1: 2012., 2: 2012., 3: 2012., 4: 2012., 5: 2012., 6: 2012., 9999: 2012.}
obs_type = 'mb_geo'
binned_fp = dems_output_fp + 'csv/'
fig_fp = dems_output_fp + 'figures/all/'
if os.path.exists(fig_fp) == False:
os.makedirs(fig_fp)
valid_perc_threshold = 90
min_area_km2 = 3
mb_cn = 'mb_bin_med_mwea'
mb_max = 2.5
mb_min = -5
option_normelev = 'huss' # Terminus = 1, Top = 0
#option_normelev = 'larsen' # Terminus = 0, Top = 1
#Binned CSV column name conversion dictionary
# change column names so they are easier to work with (remove spaces, etc.)
sheancoldict = {'# bin_center_elev_m': 'bin_center_elev_m',
' z1_bin_count_valid': 'z1_bin_count_valid',
' z1_bin_area_valid_km2': 'z1_bin_area_valid_km2',
' z1_bin_area_perc': 'z1_bin_area_perc',
' z2_bin_count_valid': 'z2_bin_count_valid',
' z2_bin_area_valid_km2': 'z2_bin_area_valid_km2',
' z2_bin_area_perc': 'z2_bin_area_perc',
' dhdt_bin_count' : 'dhdt_bin_count',
' dhdt_bin_area_valid_km2' : 'dhdt_bin_area_valid_km2',
' dhdt_bin_area_perc' : 'dhdt_bin_area_perc',
' dhdt_bin_med_ma': 'dhdt_bin_med_ma',
' dhdt_bin_mad_ma': 'dhdt_bin_mad_ma',
' dhdt_bin_mean_ma': 'dhdt_bin_mean_ma',
' dhdt_bin_std_ma': 'dhdt_bin_std_ma',
' mb_bin_med_mwea': 'mb_bin_med_mwea',
' mb_bin_mad_mwea': 'mb_bin_mad_mwea',
' mb_bin_mean_mwea': 'mb_bin_mean_mwea',
' mb_bin_std_mwea': 'mb_bin_std_mwea',
' debris_thick_med_m': 'debris_thick_med_m',
' debris_thick_mad_m': 'debris_thick_mad_m',
' perc_debris': 'perc_debris',
' perc_pond': 'perc_pond',
' perc_clean': 'perc_clean',
' dhdt_debris_med' : 'dhdt_debris_med',
' dhdt_pond_med' : 'dhdt_pond_med',
' dhdt_clean_med' : 'dhdt_clean_med',
' vm_med' : 'vm_med',
' vm_mad' : 'vm_mad',
' H_mean' : 'H_mean',
' H_std' : 'H_std'}
def norm_stats(norm_list, option_normelev=option_normelev, option_norm_limits=False):
"""
Statistics associated with normalized elevation data
Parameters
----------
norm_list : list of np.array
each item is a np.array (col 1: normalized elevation, col 2: mb, dhdt, normalized mb, or normalized dhdt)
option_norm_limits : boolean
option to place limits on the normalized dh/dt of 0 and 1
Returns
-------
norm_all_stats : pd.DataFrame
statistics associated with the normalized values
"""
# Merge norm_list to make array of all glaciers with same elevation normalization space
# max_length = len(max(norm_list,key=len)) #len of glac w most norm values
# norm_all = np.zeros((max_length, len(norm_list)+1)) #array: each col a glac, each row a norm dhdt val to be interpolated
# # First column is normalized elevation, pulled from the glac with most norm vals
# norm_all[:,0] = max(norm_list,key=len)[:,0]
# Interpolate to common normalized elevation for all glaciers
norm_elev = np.arange(0,1.01,0.01)
norm_all = np.zeros((len(norm_elev), len(norm_list)+1)) #array: each col a glac, each row norm dhdt val interpolated
norm_all[:,0] = norm_elev
# Loop through each glacier's normalized array (where col1 is elev_norm and col2 is mb or dhdt)
for n, norm_single in enumerate(norm_list):
if option_normelev == 'huss':
norm_single = norm_single[::-1]
# Fill in nan values for elev_norm of 0 and 1 with nearest neighbor
nonan_idx = np.where(~np.isnan(norm_single[:,1]))[0]
norm_single[0,1] = norm_single[nonan_idx[0], 1]
norm_single[-1,1] = norm_single[nonan_idx[-1], 1]
# Remove nan values.
norm_single = norm_single[nonan_idx]
elev_single = norm_single[:,0] #set name for first col of a given glac
dhdt_single = norm_single[:,1] #set name for the second col of a given glac
#loop through each dhdt value of the glacier, and add it and interpolate to add to the norm_all array.
for r in range(0, norm_all.shape[0]):
if r == 0:
# put first value dhdt value into the norm_all. n+1 because the first col is taken by the elevnorms
norm_all[r,n+1] = dhdt_single[0]
elif r == (norm_all.shape[0] - 1):
#put last value into the the last row for the glacier's 'stretched out'(interpolated) normalized curve
norm_all[r,n+1] = dhdt_single[-1]
else:
# Find value need to interpolate to
norm_elev_value = norm_all[r,0] #go through each row in the elev (col1)
# Find index of value above it from dhdt_norm, which is a different size
upper_idx = np.where(elev_single == elev_single[elev_single >= norm_elev_value].min())[0][0]
# Find index of value below it
lower_idx = np.where(elev_single == elev_single[elev_single < norm_elev_value].max())[0][0]
#get the two values, based on the indices.
upper_elev = elev_single[upper_idx]
upper_value = dhdt_single[upper_idx]
lower_elev = elev_single[lower_idx]
lower_value = dhdt_single[lower_idx]
#Linearly Interpolate between two values, and plug in interpolated value into norm_all
norm_all[r,n+1] = (lower_value + (norm_elev_value - lower_elev) / (upper_elev - lower_elev) *
(upper_value - lower_value))
# Compute mean and standard deviation
norm_all_stats = pd.DataFrame()
norm_all_stats['norm_elev'] = norm_all[:,0]
norm_all_stats['norm_dhdt_med'] = np.nanmedian(norm_all[:,1:], axis=1)
norm_all_stats['norm_dhdt_nmad'] = (1.483 *
np.median(np.absolute((norm_all[:,1:] - norm_all_stats['norm_dhdt_med'][:,np.newaxis])), axis=1))
norm_all_stats['norm_dhdt_mean'] = np.nanmean(norm_all[:,1:], axis=1)
norm_all_stats['norm_dhdt_std'] = np.nanstd(norm_all[:,1:], axis=1)
norm_all_stats['norm_dhdt_68high'] = norm_all_stats['norm_dhdt_mean'] + norm_all_stats['norm_dhdt_std']
norm_all_stats['norm_dhdt_68low'] = norm_all_stats['norm_dhdt_mean'] - norm_all_stats['norm_dhdt_std']
if option_norm_limits:
norm_all_stats.loc[norm_all_stats['norm_dhdt_68high'] > 1, 'norm_dhdt_68high'] = 1
norm_all_stats.loc[norm_all_stats['norm_dhdt_68low'] < 0, 'norm_dhdt_68low'] = 0
return norm_all_stats
# ===== START PROCESSING =====
# Load mass balance summary data
if os.path.exists(dems_output_fp + mb_summary_fn):
mb_summary = pd.read_csv(dems_output_fp + mb_summary_fn)
else:
# Merge files
for n_fn, fn in enumerate(mb_summary_fn_list):
mb_summary_subset = pd.read_csv(dems_output_fp + fn)
mb_summary_subset['region'] = fn.split('_mb')[0]
if n_fn == 0:
mb_summary = mb_summary_subset
else:
mb_summary = mb_summary.append(mb_summary_subset)
# Sort and add glacier number
mb_summary = mb_summary.sort_values('RGIId')
mb_summary.reset_index(inplace=True, drop=True)
mb_summary['glacno'] = [str(int(x)).zfill(2) + '.' + str(int(np.round(x%1*10**5))).zfill(5)
for x in mb_summary['RGIId']]
# Export dataset
mb_summary.to_csv(dems_output_fp + mb_summary_fn, index=False)
# ===== PROCESS DATA =====
print('Glaciers total:', mb_summary.shape[0])
if ~(type(mb_summary.loc[0,'glacno']) == str):
mb_summary['glacno'] = [str(int(x)).zfill(2) + '.' + str(int(np.round(x%1*10**5))).zfill(5)
for x in mb_summary['RGIId']]
mb_summary = mb_summary.loc[mb_summary['valid_area_perc'] >= valid_perc_threshold]
mb_summary.reset_index(inplace=True, drop=True)
mb_summary = mb_summary.loc[mb_summary['area_m2'] / 1e6 >= min_area_km2]
mb_summary.reset_index(inplace=True, drop=True)
print('Glaciers total after % threshold:', mb_summary.shape[0])
glacno_list = list(mb_summary.glacno.values)
main_glac_rgi = modelsetup.selectglaciersrgitable(glac_no=glacno_list)
# ===== BINNED DATA =====
binned_list = []
for glacno in glacno_list:
csv_str = str(int(glacno.split('.')[0])) + '.' + glacno.split('.')[1]
for i in os.listdir(binned_fp):
if i.startswith(csv_str) and i.endswith('_mb_bins.csv'):
binned_ds = pd.read_csv(binned_fp + i, na_values=' nan')
# Rename columns so they are easier to read
binned_ds = binned_ds.rename(columns=sheancoldict)
# Remove bad values of dhdt
binned_ds.loc[binned_ds[mb_cn] > mb_max, mb_cn] = np.nan
binned_ds.loc[binned_ds[mb_cn] < mb_min, mb_cn] = np.nan
# If dhdt is nan, remove row
null_bins = binned_ds.loc[pd.isnull(binned_ds[mb_cn])].index.values
binned_ds = binned_ds.drop(null_bins)
# ===== BINNED DATA NORMALIZATIONS =====
elev_cn = binned_ds.columns[0]
glac_elev = binned_ds[elev_cn].values
glac_mb = binned_ds[mb_cn].values.astype(float)
# Larsen normalization (terminus = 0, top = 1)
if option_normelev == 'larsen':
binned_ds['elev_norm'] = (glac_elev - glac_elev[0]) / (glac_elev[-1] - glac_elev[0])
# Huss normalization (terminus = 1, top = 0)
elif option_normelev == 'huss':
binned_ds['elev_norm'] = (glac_elev[-1] - glac_elev) / (glac_elev[-1] - glac_elev[0])
# Normalized ice thickness change [ma]
# dhdt / dhdt_max
# Shifted normalized ice thickness change such that everything is negative
binned_ds['mb_norm_shifted'] = (glac_mb - np.nanmax(glac_mb)) / np.nanmin(glac_mb - np.nanmax(glac_mb))
binned_ds.loc[binned_ds['mb_norm_shifted'] == -0, 'mb_norm_shifted'] = 0
# Replace positive values to zero
glac_mb[glac_mb >= 0] = 0
binned_ds['mb_norm_huss'] = glac_mb / np.nanmin(glac_mb)
binned_ds.loc[binned_ds['mb_norm_huss'] == -0, 'mb_norm_huss'] = 0
# Append to list
binned_list.append(binned_ds)
#%% ===== ELEVATION VS MASS BALANCE PLOTS======
# List of np.array where first column is elev_norm and second column is mass balance
elev_mb_list = [np.array([i[elev_cn].values, i[mb_cn].values]).transpose() for i in binned_list]
normelev_mb_list = [np.array([i['elev_norm'].values, i[mb_cn].values]).transpose() for i in binned_list]
normelev_mb_stats = norm_stats(normelev_mb_list)
# Estimate a curve
def curve_func(x, a, b, c, d):
return (x + a)**d + b * (x + a) + c
p0 = [1,1,1,1]
coeffs, matcov = curve_fit(curve_func, normelev_mb_stats['norm_elev'].values,
normelev_mb_stats['norm_dhdt_med'].values, p0, maxfev=10000)
curve_x = np.arange(0,1.01,0.01)
curve_y = curve_func(curve_x, coeffs[0], coeffs[1], coeffs[2], coeffs[3])
# Plot
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(2, 1, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.4, 'hspace':0.25})
max_elev = 0
for n, i in enumerate(elev_mb_list):
glac_elev = i[:,0]
glac_mb = i[:,1]
glac_elev_norm = normelev_mb_list[n][:,0]
if glac_elev.max() > max_elev:
max_elev = glac_elev.max()
max_elev = np.ceil(max_elev/500)*500
# Elevation vs MB
ax[0,0].plot(glac_elev, glac_mb, linewidth=0.5, alpha=0.5)
# Norm Elevation vs MB
# note: zorder overrides alpha, only alpha if same zorder
ax[1,0].plot(glac_elev_norm, glac_mb, linewidth=0.5, alpha=0.2, zorder=1)
ax[1,0].plot(normelev_mb_stats['norm_elev'], normelev_mb_stats['norm_dhdt_med'],
color='k', linewidth=1, alpha=1, zorder=2)
ax[1,0].fill_between(normelev_mb_stats['norm_elev'], normelev_mb_stats['norm_dhdt_med'],
normelev_mb_stats['norm_dhdt_med'] + normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
ax[1,0].fill_between(normelev_mb_stats['norm_elev'], normelev_mb_stats['norm_dhdt_med'],
normelev_mb_stats['norm_dhdt_med'] - normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
ax[1,0].plot(curve_x, curve_y,
color='k', linewidth=1, alpha=1, linestyle='--', zorder=2)
# niceties - Elevation vs MB
ax[0,0].set_xlabel('Elevation (m a.s.l.)', fontsize=12)
ax[0,0].xaxis.set_major_locator(MultipleLocator(500))
ax[0,0].xaxis.set_minor_locator(MultipleLocator(100))
ax[0,0].set_xlim(0, max_elev)
ax[0,0].set_ylabel('Mass Balance (m w.e. $\mathregular{yr{-1}}$)', fontsize=12, labelpad=10)
ax[0,0].axhline(y=0, color='k', linestyle='-', linewidth=0.5)
ax[0,0].yaxis.set_major_locator(MultipleLocator(0.5))
ax[0,0].yaxis.set_minor_locator(MultipleLocator(0.1))
# niceties - Norm Elevation vs MB
ax[1,0].set_xlabel('Normalized Elevation (-)', fontsize=12)
ax[1,0].xaxis.set_major_locator(MultipleLocator(0.25))
ax[1,0].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[1,0].set_xlim(0,1)
ax[1,0].set_ylabel('Mass Balance (m w.e. $\mathregular{yr{-1}}$)', fontsize=12, labelpad=10)
#ax[1,0].axhline(y=0, color='k', linestyle='--', linewidth=0.5)
ax[1,0].yaxis.set_major_locator(MultipleLocator(0.5))
ax[1,0].yaxis.set_minor_locator(MultipleLocator(0.1))
ax[1,0].set_ylim(-3,1.5)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'elev_mb_all_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ===== REGIONAL ELEVATION VS MASS BALANCE PLOTS======
subregions = 'O2Regions'
if subregions == 'Berthier':
regions = sorted(set(mb_summary.region.values))
subregion_dict = {}
for region in regions:
subregion_dict[region] = region
elif subregions == 'O2Regions':
regions = sorted(set(main_glac_rgi.O2Region.values))
subregion_dict = {2:'Alaska Range', 3:'Alaska Pena', 4:'W Chugach Mtns', 5:'St Elias Mtns', 6:'N Coast Ranges',
9999:'All Alaska'}
reg_normelev_mb_dict = {}
regions.append(9999)
ncols = 2
nrows = int(np.ceil(len(regions)/ncols))
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(nrows, ncols, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.4})
ncol = 0
nrow = 0
for region in regions:
if subregions == 'Berthier':
reg_idx = list(np.where(mb_summary['region'] == region)[0])
elif subregions =='O2Regions':
reg_idx = list(np.where(main_glac_rgi['O2Region'] == region)[0])
if region == 9999:
reg_idx = main_glac_rgi.index.values
print(subregion_dict[region], 'glacier area mean/median:', np.round(main_glac_rgi.loc[reg_idx, 'Area'].mean(),1),
np.round(main_glac_rgi.loc[reg_idx, 'Area'].median(),1), np.round(main_glac_rgi.loc[reg_idx, 'Area'].std(),1))
reg_binned_list = [binned_list[i] for i in reg_idx]
reg_elev_mb_list = [np.array([i[elev_cn].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_list = [np.array([i['elev_norm'].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_stats = norm_stats(reg_normelev_mb_list)
reg_normelev_mb_dict[region] = dict(zip((reg_normelev_mb_stats['norm_elev'].values * 100).astype(int),
reg_normelev_mb_stats['norm_dhdt_med'].values))
if region == 9999:
normelev_all = reg_normelev_mb_stats['norm_elev']
dhdt_all = reg_normelev_mb_stats['norm_dhdt_med']
for n, i in enumerate(reg_elev_mb_list):
glac_elev = i[:,0]
glac_mb = i[:,1]
glac_elev_norm = reg_normelev_mb_list[n][:,0]
# Norm Elevation vs MB
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(glac_elev_norm, glac_mb, linewidth=0.5, alpha=0.2, zorder=1)
# Regional curve
ax[nrow,ncol].plot(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
color='k', linewidth=1, alpha=1, zorder=2)
ax[nrow,ncol].fill_between(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] + reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
ax[nrow,ncol].fill_between(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] - reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
# niceties - Norm Elevation vs MB
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.25))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.25))
ax[nrow,ncol].set_ylim(-3,1.5)
# Title
region_nglac = len(reg_normelev_mb_list)
ax[nrow,ncol].text(0.5, 1.01, subregion_dict[region] + ' (' + str(region_nglac) + ' glaciers)', size=10,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row and column
ncol += 1
if ncol == ncols:
nrow += 1
ncol = 0
# Add All Alaska empirical curve to each glacier
ncol = 0
nrow = 0
for region in regions:
if region != 9999:
ax[nrow,ncol].plot(normelev_all, dhdt_all, color='y', linewidth=1, alpha=1, linestyle='--', zorder=4)
# Adjust row and column
ncol += 1
if ncol == ncols:
nrow += 1
ncol = 0
# Y-label
fig.text(0.04, 0.5, 'Mass Balance (m w.e. $\mathregular{yr^{-1}}$)', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.08, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'elev_mb_regional_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ==== SIZE: ELEVATION VS MASS BALANCE PLOTS======
if min_area_km2 < 5:
group_names = ['Area < 5 km$^{2}$', '5 km$^{2}$ < Area <= 20 km$^{2}$', 'Area > 20 km$^{2}$', 'All Alaska']
group_thresholds = [(0,5), (5, 20), (20, np.inf)]
else:
group_names = ['5 km$^{2}$ < Area <= 20 km$^{2}$', 'Area > 20 km$^{2}$', 'All Alaska']
group_thresholds = [(5, 20), (20, np.inf)]
group_idx = []
for group_threshold in group_thresholds:
group_idx.append(list(main_glac_rgi[(main_glac_rgi.Area > group_threshold[0]) &
(main_glac_rgi.Area <= group_threshold[1])].index.values))
group_idx.append(list(main_glac_rgi.index.values))
group_ncols = 2
group_nrows = int(np.ceil(len(group_names)/group_ncols))
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(group_nrows, group_ncols, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.4})
ncol = 0
nrow = 0
for ngroup, group_name in enumerate(group_names):
reg_idx = group_idx[ngroup]
reg_binned_list = [binned_list[i] for i in reg_idx]
reg_elev_mb_list = [np.array([i[elev_cn].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_list = [np.array([i['elev_norm'].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_stats = norm_stats(reg_normelev_mb_list)
reg_normelev_mb_dict[region] = dict(zip((reg_normelev_mb_stats['norm_elev'].values * 100).astype(int),
reg_normelev_mb_stats['norm_dhdt_med'].values))
if group_name in ['All Alaska']:
normelev_all = reg_normelev_mb_stats['norm_elev']
dhdt_all = reg_normelev_mb_stats['norm_dhdt_med']
for n, i in enumerate(reg_elev_mb_list):
glac_elev = i[:,0]
glac_mb = i[:,1]
glac_elev_norm = reg_normelev_mb_list[n][:,0]
# Norm Elevation vs MB
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(glac_elev_norm, glac_mb, linewidth=0.5, alpha=0.2, zorder=1)
# Regional curve
ax[nrow,ncol].plot(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
color='k', linewidth=1, alpha=1, zorder=2)
ax[nrow,ncol].fill_between(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] + reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
ax[nrow,ncol].fill_between(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] - reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
# niceties - Norm Elevation vs MB
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.25))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.25))
ax[nrow,ncol].set_ylim(-5,1.5)
# Title
region_nglac = len(reg_normelev_mb_list)
ax[nrow,ncol].text(0.5, 1.01, group_name + ' (' + str(region_nglac) + ' glaciers)', size=10,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Add All Alaska curve to each glacier
ncol = 0
nrow = 0
for group_name in group_names:
if group_name not in ['All Alaska']:
# Fitted curve
ax[nrow,ncol].plot(normelev_all, dhdt_all, color='y', linewidth=1, alpha=1, linestyle='--', zorder=4)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Y-label
fig.text(0.04, 0.5, 'Mass Balance (m w.e. $\mathregular{yr^{-1}}$)', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.08, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'elev_mb_SIZE_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ==== TERIMNUS TYPE: ELEVATION VS MASS BALANCE PLOTS======
group_names = ['Land', 'Tidewater', 'Lake', 'All Alaska']
group_idx = []
for group_value in [0,1,2]:
group_idx.append(list(main_glac_rgi[main_glac_rgi.TermType == group_value].index.values))
group_idx.append(list(main_glac_rgi.index.values))
group_ncols = 2
group_nrows = int(np.ceil(len(group_names)/group_ncols))
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(group_nrows, group_ncols, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.4})
ncol = 0
nrow = 0
for ngroup, group_name in enumerate(group_names):
reg_idx = group_idx[ngroup]
reg_binned_list = [binned_list[i] for i in reg_idx]
reg_elev_mb_list = [np.array([i[elev_cn].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_list = [np.array([i['elev_norm'].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_stats = norm_stats(reg_normelev_mb_list)
reg_normelev_mb_dict[region] = dict(zip((reg_normelev_mb_stats['norm_elev'].values * 100).astype(int),
reg_normelev_mb_stats['norm_dhdt_med'].values))
if group_name in ['All Alaska']:
normelev_all = reg_normelev_mb_stats['norm_elev']
dhdt_all = reg_normelev_mb_stats['norm_dhdt_med']
for n, i in enumerate(reg_elev_mb_list):
glac_elev = i[:,0]
glac_mb = i[:,1]
glac_elev_norm = reg_normelev_mb_list[n][:,0]
# Norm Elevation vs MB
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(glac_elev_norm, glac_mb, linewidth=0.5, alpha=0.2, zorder=1)
# Regional curve
ax[nrow,ncol].plot(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
color='k', linewidth=1, alpha=1, zorder=2)
ax[nrow,ncol].fill_between(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] + reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
ax[nrow,ncol].fill_between(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] - reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
# niceties - Norm Elevation vs MB
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.25))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.25))
ax[nrow,ncol].set_ylim(-5,1.5)
# Title
region_nglac = len(reg_normelev_mb_list)
ax[nrow,ncol].text(0.5, 1.01, group_name + ' (' + str(region_nglac) + ' glaciers)', size=10,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Add All Alaska empirical curve to each glacier
ncol = 0
nrow = 0
for ngroup, group_name in enumerate(group_names):
if group_name not in ['All Alaska']:
ax[nrow,ncol].plot(normelev_all, dhdt_all, color='y', linewidth=1, alpha=1, linestyle='--', zorder=4)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Y-label
fig.text(0.04, 0.5, 'Mass Balance (m w.e. $\mathregular{yr^{-1}}$)', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.08, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'elev_mb_TERMTYPE_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ==== TERMINUS TYPE & SIZE: ELEVATION VS MASS BALANCE PLOTS======
group_names = ['Tidewater', 'Lake', 'Land (A < 5 km$^{2}$)', 'Land (5 < A < 20 km$^{2}$)',
'Land (A > 20 km$^{2}$)', 'All Alaska']
group_idx = []
for group_name in group_names:
if group_name == 'Tidewater':
group_idx.append(list(main_glac_rgi[main_glac_rgi.TermType == 1].index.values))
elif group_name == 'Lake':
group_idx.append(list(main_glac_rgi[main_glac_rgi.TermType == 2].index.values))
elif group_name == 'Land (A < 5 km$^{2}$)':
group_idx.append(list(main_glac_rgi[(main_glac_rgi.TermType == 0) & (main_glac_rgi.Area <= 5)].index.values))
elif group_name == 'Land (5 < A < 20 km$^{2}$)':
group_idx.append(list(main_glac_rgi[(main_glac_rgi.TermType == 0) & (main_glac_rgi.Area > 5) &
(main_glac_rgi.Area <= 20)].index.values))
elif group_name == 'Land (A > 20 km$^{2}$)':
group_idx.append(list(main_glac_rgi[(main_glac_rgi.TermType == 0) & (main_glac_rgi.Area > 20)].index.values))
elif group_name == 'All Alaska':
group_idx.append(list(main_glac_rgi.index.values))
group_ncols = 2
group_nrows = int(np.ceil(len(group_names)/group_ncols))
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(group_nrows, group_ncols, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.4})
ncol = 0
nrow = 0
for ngroup, group_name in enumerate(group_names):
reg_idx = group_idx[ngroup]
reg_binned_list = [binned_list[i] for i in reg_idx]
reg_elev_mb_list = [np.array([i[elev_cn].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_list = [np.array([i['elev_norm'].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_stats = norm_stats(reg_normelev_mb_list)
reg_normelev_mb_dict[region] = dict(zip((reg_normelev_mb_stats['norm_elev'].values * 100).astype(int),
reg_normelev_mb_stats['norm_dhdt_med'].values))
if group_name in ['All Alaska']:
normelev_all = reg_normelev_mb_stats['norm_elev']
dhdt_all = reg_normelev_mb_stats['norm_dhdt_med']
for n, i in enumerate(reg_elev_mb_list):
glac_elev = i[:,0]
glac_mb = i[:,1]
glac_elev_norm = reg_normelev_mb_list[n][:,0]
# Norm Elevation vs MB
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(glac_elev_norm, glac_mb, linewidth=0.5, alpha=0.2, zorder=1)
# Regional curve
ax[nrow,ncol].plot(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
color='k', linewidth=1, alpha=1, zorder=2)
ax[nrow,ncol].fill_between(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] + reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
ax[nrow,ncol].fill_between(reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] - reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
# niceties - Norm Elevation vs MB
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.25))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.25))
ax[nrow,ncol].set_ylim(-5,1.5)
# Title
region_nglac = len(reg_normelev_mb_list)
ax[nrow,ncol].text(0.5, 1.01, group_name + ' (' + str(region_nglac) + ' glaciers)', size=10,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Add All Alaska curve to each glacier
ncol = 0
nrow = 0
for group_name in group_names:
if group_name not in ['All Alaska']:
# Fitted curve
ax[nrow,ncol].plot(normelev_all, dhdt_all, color='y', linewidth=1, alpha=1, linestyle='--', zorder=4)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Y-label
fig.text(0.04, 0.5, 'Mass Balance (m w.e. $\mathregular{yr^{-1}}$)', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.08, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'elev_mb_TERMTYPE-SIZE_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ==== TERIMNUS TYPE - LARSEN: ELEVATION VS MASS BALANCE PLOTS======
group_names = ['Land', 'Lake', 'Tidewater']
group_idx = []
for group_value in [0,2,1]:
group_idx.append(list(main_glac_rgi[main_glac_rgi.TermType == group_value].index.values))
figwidth, figheight = 3, 6.5
fig, ax = plt.subplots(3, 1, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.2})
ncol = 0
nrow = 0
for ngroup, group_name in enumerate(group_names):
reg_idx = group_idx[ngroup]
reg_binned_list = [binned_list[i] for i in reg_idx]
reg_elev_mb_list = [np.array([i[elev_cn].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_list = [np.array([i['elev_norm'].values, i[mb_cn].values]).transpose() for i in reg_binned_list]
reg_normelev_mb_stats = norm_stats(reg_normelev_mb_list)
reg_normelev_mb_dict[region] = dict(zip((reg_normelev_mb_stats['norm_elev'].values * 100).astype(int),
reg_normelev_mb_stats['norm_dhdt_med'].values))
for n, i in enumerate(reg_elev_mb_list):
glac_elev = i[:,0]
glac_mb = i[:,1]
glac_elev_norm = reg_normelev_mb_list[n][:,0]
# Norm Elevation vs MB
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(1 - glac_elev_norm, glac_mb, linewidth=0.5, alpha=0.2, zorder=1)
# Regional curve
ax[nrow,ncol].plot(1 - reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
color='k', linewidth=1, alpha=1, zorder=2)
ax[nrow,ncol].fill_between(1 - reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] + reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
ax[nrow,ncol].fill_between(1 - reg_normelev_mb_stats['norm_elev'], reg_normelev_mb_stats['norm_dhdt_med'],
reg_normelev_mb_stats['norm_dhdt_med'] - reg_normelev_mb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=1)
# niceties - Norm Elevation vs MB
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.2))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.1))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].tick_params(labelsize=10)
if group_name in ['Land', 'Lake']:
ax[nrow,ncol].set_ylim(-6,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.5))
elif group_name == 'Tidewater':
ax[nrow,ncol].set_ylim(-10,12)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(5))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(1))
# Title
region_nglac = len(reg_normelev_mb_list)
ax[nrow,ncol].text(0.5, 0.05, group_name + ' (' + str(region_nglac) + ' glaciers)', size=12,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row
nrow += 1
# Y-label
fig.text(-0.02, 0.5, 'Mass Balance (m w.e. $\mathregular{yr^{-1}}$)', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.07, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'elev_mb_TERMTYPE-Larsen_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ===== REGIONAL: NORMALIZED ELEVATION VS NORMALIZED MASS BALANCE PLOT======
subregions = 'O2Regions'
if subregions == 'Berthier':
regions = sorted(set(mb_summary.region.values))
subregion_dict = {}
for region in regions:
subregion_dict[region] = region
elif subregions == 'O2Regions':
regions = sorted(set(main_glac_rgi.O2Region.values))
subregion_dict = {2:'Alaska Range', 3:'Alaska Pena', 4:'W Chugach Mtns', 5:'St Elias Mtns', 6:'N Coast Ranges',
9999:'All Alaska'}
reg_normelev_mb_dict = {}
regions.append(9999)
ncols = 2
nrows = int(np.ceil(len(regions)/ncols))
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(nrows, ncols, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.4})
ncol = 0
nrow = 0
for region in regions:
if subregions == 'Berthier':
reg_idx = list(np.where(mb_summary['region'] == region)[0])
elif subregions =='O2Regions':
reg_idx = list(np.where(main_glac_rgi['O2Region'] == region)[0])
if region == 9999:
reg_idx = main_glac_rgi.index.values
reg_binned_list = [binned_list[i] for i in reg_idx]
mb_norm_cn = 'mb_norm_huss'
# If mb_norm_huss, then remove glaciers with all positive values
if mb_norm_cn == 'mb_norm_huss':
reg_idx_allnan = []
for n, reg_binned_data in enumerate(reg_binned_list):
if np.isnan(reg_binned_data[mb_norm_cn]).any() == True:
reg_idx_allnan.append(n)
for n in sorted(reg_idx_allnan, reverse=True):
del reg_binned_list[n]
reg_normelev_normmb_list = [np.array([i['elev_norm'].values, i[mb_norm_cn].values]).transpose()
for i in reg_binned_list]
reg_normelev_normmb_stats = norm_stats(reg_normelev_normmb_list)
# Estimate a curve
# Two steps: (1) estimate d to nearest integer and avoid nan issues, (2) force d to be an integer and optimize
# bounds ensure
x = reg_normelev_normmb_stats['norm_elev'].values
y = reg_normelev_normmb_stats['norm_dhdt_med'].values
def curve_func_raw(x, a, b, c, d):
y = (x + a)**d + b * (x + a) + c
# avoid errors with np.arrays where negative number to power returns NaN - replace with 0
y = np.nan_to_num(y)
return y
def curve_func(x, a, b, c, d):
# force d to be an integer
d = int(np.round(d,0))
y = (x + a)**d + b * (x + a) + c
return y
p0 = [-0.02,0.12,0,3]
bnd_low = [-np.inf, -np.inf, -np.inf, 0]
bnd_high = [np.inf, np.inf, np.inf, np.inf]
coeffs, matcov = curve_fit(curve_func_raw, x, y, p0, bounds=(bnd_low, bnd_high), maxfev=10000)
# specify integer for d
p0[3] = int(np.round(coeffs[3],0))
coeffs, matcov = curve_fit(curve_func, x, y, p0, bounds=(bnd_low, bnd_high), maxfev=10000)
# Round coefficients
coeffs[0] = np.round(coeffs[0],2)
coeffs[1] = np.round(coeffs[1],2)
coeffs[2] = np.round(coeffs[2],2)
glac_elev_norm = np.arange(0,1.01,0.01)
curve_y = curve_func(glac_elev_norm, coeffs[0], coeffs[1], coeffs[2], coeffs[3])
if region == 9999:
curve_y_all = curve_y.copy()
# Plot regional curves
ax[nrow,ncol].plot(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
color='k', linewidth=2, alpha=0.5, zorder=4)
ax[nrow,ncol].fill_between(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
reg_normelev_normmb_stats['norm_dhdt_med'] + reg_normelev_normmb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=2)
ax[nrow,ncol].fill_between(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
reg_normelev_normmb_stats['norm_dhdt_med'] - reg_normelev_normmb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=2)
# Fitted curve
if region != 9999:
color_curve = 'k'
else:
color_curve = 'y'
ax[nrow,ncol].plot(glac_elev_norm, curve_y, color=color_curve, linewidth=1, alpha=1, linestyle='--', zorder=5)
# Huss curve
huss_y_lrg = (glac_elev_norm - 0.02)**6 + 0.12 * (glac_elev_norm - 0.02)
huss_y_med = (glac_elev_norm - 0.05)**4 + 0.19 * (glac_elev_norm - 0.05) + 0.01
huss_y_sml = (glac_elev_norm - 0.30)**2 + 0.60 * (glac_elev_norm - 0.30) + 0.09
ax[nrow,ncol].plot(glac_elev_norm, huss_y_lrg, linewidth=1, color='red', linestyle='-.', alpha=1, zorder=3)
ax[nrow,ncol].plot(glac_elev_norm, huss_y_med, linewidth=1, color='green', linestyle='-.', alpha=1, zorder=3)
ax[nrow,ncol].plot(glac_elev_norm, huss_y_sml, linewidth=1, color='blue', linestyle='-.', alpha=1, zorder=3)
# Individual curves
for n, i in enumerate(reg_normelev_normmb_list):
glac_elev_norm_single = reg_normelev_normmb_list[n][:,0]
glac_mb_norm_single = reg_normelev_normmb_list[n][:,1]
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(glac_elev_norm_single, glac_mb_norm_single, linewidth=0.25, alpha=0.2, zorder=1)
# Niceties
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.25))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.25))
ax[nrow,ncol].set_ylim(1.05,-0.05)
# Title
region_nglac = len(reg_normelev_normmb_list)
ax[nrow,ncol].text(0.5, 1.01, subregion_dict[region] + ' (' + str(region_nglac) + ' glaciers)', size=10,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Equation
signs = ['+', '+', '+']
for nsign, coeff in enumerate(coeffs[0:3]):
if coeff < 0:
signs[nsign] = '-'
eqn_txt = 'dh=(x+a)$^{d}$+b(x+a)+c'
ax[nrow,ncol].text(0.05, 0.45, 'a=' + '{:.2f}'.format(coeffs[0]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.35, 'b=' + '{:.2f}'.format(coeffs[1]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.25, 'c=' + '{:.2f}'.format(coeffs[2]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.15, 'd=' + str(int(coeffs[3])), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.05, eqn_txt, size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row and column
ncol += 1
if ncol == ncols:
nrow += 1
ncol = 0
# Add All Alaska curve to each glacier
ncol = 0
nrow = 0
for region in regions:
if region != 9999:
# Fitted curve
ax[nrow,ncol].plot(glac_elev_norm, curve_y_all, color='y', linewidth=1, alpha=1, linestyle='--', zorder=4)
# Adjust row and column
ncol += 1
if ncol == ncols:
nrow += 1
ncol = 0
# Legend
leg_labels = ['Median', 'Curve', 'Curve-all', 'H-small', 'H-medium', 'H-large']
#leg_labels = ['Median', 'Curve_fit', 'Huss-small', 'Huss-medium', 'Huss-large']
#leg_linestyles = ['-', '--', '--', '--', '--']
leg_linestyles = ['-', '--', '--', '-.', '-.', '-.']
leg_colors = ['dimgray', 'k', 'y', 'b', 'g', 'r']
leg_lines = []
for nline, label in enumerate(leg_labels):
# line = Line2D([0,1],[0,1], color='white')
# leg_lines.append(line)
# leg_labels.append('')
line = Line2D([0,0],[0,0], color=leg_colors[nline], linestyle=leg_linestyles[nline], linewidth=1.5)
leg_lines.append(line)
fig.legend(leg_lines, leg_labels, loc='lower center',
bbox_to_anchor=(0.5,0.01), handlelength=1.5, handletextpad=0.25, borderpad=0.2, frameon=True,
ncol=len(leg_labels), columnspacing=0.75)
# Y-label
fig.text(0.04, 0.5, 'Normalized Mass Balance', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.08, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'normelev_normmb_regional_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ===== GLACIER SIZE: NORMALIZED ELEVATION VS NORMALIZED MASS BALANCE PLOT======
if min_area_km2 < 5:
group_names = ['Area < 5 km$^{2}$', '5 km$^{2}$ < Area <= 20 km$^{2}$', 'Area > 20 km$^{2}$', 'All Alaska']
group_thresholds = [(0,5), (5, 20), (20, np.inf)]
else:
group_names = ['5 km$^{2}$ < Area <= 20 km$^{2}$', 'Area > 20 km$^{2}$', 'All Alaska']
group_thresholds = [(5, 20), (20, np.inf)]
group_idx = []
for group_threshold in group_thresholds:
group_idx.append(list(main_glac_rgi[(main_glac_rgi.Area > group_threshold[0]) &
(main_glac_rgi.Area <= group_threshold[1])].index.values))
group_idx.append(list(main_glac_rgi.index.values))
group_ncols = 2
group_nrows = int(np.ceil(len(group_names)/group_ncols))
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(group_nrows, group_ncols, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.4})
ncol = 0
nrow = 0
for ngroup, group_name in enumerate(group_names):
print(group_name)
reg_idx = group_idx[ngroup]
reg_binned_list = [binned_list[i] for i in reg_idx]
mb_norm_cn = 'mb_norm_huss'
# If mb_norm_huss, then remove glaciers with all positive values
if mb_norm_cn == 'mb_norm_huss':
reg_idx_allnan = []
for n, reg_binned_data in enumerate(reg_binned_list):
if np.isnan(reg_binned_data[mb_norm_cn]).any() == True:
reg_idx_allnan.append(n)
for n in sorted(reg_idx_allnan, reverse=True):
del reg_binned_list[n]
reg_normelev_normmb_list = [np.array([i['elev_norm'].values, i[mb_norm_cn].values]).transpose()
for i in reg_binned_list]
reg_normelev_normmb_stats = norm_stats(reg_normelev_normmb_list)
# Estimate a curve
# Two steps: (1) estimate d to nearest integer and avoid nan issues, (2) force d to be an integer and optimize
# bounds ensure
x = reg_normelev_normmb_stats['norm_elev'].values
y = reg_normelev_normmb_stats['norm_dhdt_med'].values
def curve_func_raw(x, a, b, c, d):
y = (x + a)**d + b * (x + a) + c
# avoid errors with np.arrays where negative number to power returns NaN - replace with 0
y = np.nan_to_num(y)
return y
def curve_func(x, a, b, c, d):
# force d to be an integer
d = int(np.round(d,0))
y = (x + a)**d + b * (x + a) + c
return y
p0 = [-0.02,0.12,0,3]
bnd_low = [-np.inf, -np.inf, -np.inf, 0]
bnd_high = [np.inf, np.inf, np.inf, np.inf]
coeffs, matcov = curve_fit(curve_func_raw, x, y, p0, bounds=(bnd_low, bnd_high), maxfev=10000)
# specify integer for d
coeffs, matcov = curve_fit(curve_func, x, y, p0, bounds=(bnd_low, bnd_high), maxfev=10000)
# Round coefficients
coeffs[0] = np.round(coeffs[0],2)
coeffs[1] = np.round(coeffs[1],2)
coeffs[2] = np.round(coeffs[2],2)
glac_elev_norm = np.arange(0,1.01,0.01)
curve_y = curve_func(glac_elev_norm, coeffs[0], coeffs[1], coeffs[2], coeffs[3])
if group_name in ['All Alaska']:
curve_y_all = curve_y.copy()
# Plot regional curves
ax[nrow,ncol].plot(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
color='k', linewidth=2, alpha=0.5, zorder=4)
ax[nrow,ncol].fill_between(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
reg_normelev_normmb_stats['norm_dhdt_med'] + reg_normelev_normmb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=2)
ax[nrow,ncol].fill_between(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
reg_normelev_normmb_stats['norm_dhdt_med'] - reg_normelev_normmb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=2)
# Fitted curve
if group_name not in ['All Alaska']:
color_curve = 'k'
else:
color_curve = 'y'
ax[nrow,ncol].plot(glac_elev_norm, curve_y, color=color_curve, linewidth=1, alpha=1, linestyle='--', zorder=5)
# Huss curve
huss_y_lrg = (glac_elev_norm - 0.02)**6 + 0.12 * (glac_elev_norm - 0.02)
huss_y_med = (glac_elev_norm - 0.05)**4 + 0.19 * (glac_elev_norm - 0.05) + 0.01
huss_y_sml = (glac_elev_norm - 0.30)**2 + 0.60 * (glac_elev_norm - 0.30) + 0.09
ax[nrow,ncol].plot(glac_elev_norm, huss_y_lrg, linewidth=1, color='red', linestyle='-.', alpha=1, zorder=3)
ax[nrow,ncol].plot(glac_elev_norm, huss_y_med, linewidth=1, color='green', linestyle='-.', alpha=1, zorder=3)
ax[nrow,ncol].plot(glac_elev_norm, huss_y_sml, linewidth=1, color='blue', linestyle='-.', alpha=1, zorder=3)
# Individual curves
for n, i in enumerate(reg_normelev_normmb_list):
glac_elev_norm_single = reg_normelev_normmb_list[n][:,0]
glac_mb_norm_single = reg_normelev_normmb_list[n][:,1]
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(glac_elev_norm_single, glac_mb_norm_single, linewidth=0.25, alpha=0.2, zorder=1)
# Niceties
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.25))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.25))
ax[nrow,ncol].set_ylim(1.05,-0.05)
# Title
region_nglac = len(reg_normelev_normmb_list)
ax[nrow,ncol].text(0.5, 1.01, group_name + ' (' + str(region_nglac) + ' glaciers)', size=10,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Equation
signs = ['+', '+', '+']
for nsign, coeff in enumerate(coeffs[0:3]):
if coeff < 0:
signs[nsign] = '-'
eqn_txt = 'dh=(x+a)$^{d}$+b(x+a)+c'
ax[nrow,ncol].text(0.05, 0.45, 'a=' + '{:.2f}'.format(coeffs[0]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.35, 'b=' + '{:.2f}'.format(coeffs[1]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.25, 'c=' + '{:.2f}'.format(coeffs[2]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.15, 'd=' + str(int(coeffs[3])), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.05, eqn_txt, size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Add All Alaska curve to each glacier
ncol = 0
nrow = 0
for group_name in group_names:
if group_name not in ['All Alaska']:
# Fitted curve
ax[nrow,ncol].plot(glac_elev_norm, curve_y_all, color='y', linewidth=1, alpha=1, linestyle='--', zorder=4)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Legend
leg_labels = ['Median', 'Curve', 'Curve-all', 'H-small', 'H-medium', 'H-large']
#leg_labels = ['Median', 'Curve_fit', 'Huss-small', 'Huss-medium', 'Huss-large']
#leg_linestyles = ['-', '--', '--', '--', '--']
leg_linestyles = ['-', '--', '--', '-.', '-.', '-.']
leg_colors = ['dimgray', 'k', 'y', 'b', 'g', 'r']
leg_lines = []
for nline, label in enumerate(leg_labels):
# line = Line2D([0,1],[0,1], color='white')
# leg_lines.append(line)
# leg_labels.append('')
line = Line2D([0,0],[0,0], color=leg_colors[nline], linestyle=leg_linestyles[nline], linewidth=1.5)
leg_lines.append(line)
fig.legend(leg_lines, leg_labels, loc='lower center',
bbox_to_anchor=(0.5,0.01), handlelength=1.5, handletextpad=0.25, borderpad=0.2, frameon=True,
ncol=len(leg_labels), columnspacing=0.75)
# Y-label
fig.text(0.04, 0.5, 'Normalized Mass Balance', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.08, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'normelev_normmb_SIZE_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ===== TERMINUS TYPE: NORMALIZED ELEVATION VS NORMALIZED MASS BALANCE PLOT======
group_names = ['Land', 'Lake', 'Tidewater', 'All Alaska']
group_idx = []
for group_value in [0,2,1]:
group_idx.append(list(main_glac_rgi[main_glac_rgi.TermType == group_value].index.values))
group_idx.append(main_glac_rgi.index.values)
group_ncols = 2
group_nrows = int(np.ceil(len(group_names)/group_ncols))
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(group_nrows, group_ncols, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.4})
ncol = 0
nrow = 0
for ngroup, group_name in enumerate(group_names):
reg_idx = group_idx[ngroup]
reg_binned_list = [binned_list[i] for i in reg_idx]
mb_norm_cn = 'mb_norm_huss'
# If mb_norm_huss, then remove glaciers with all positive values
if mb_norm_cn == 'mb_norm_huss':
reg_idx_allnan = []
for n, reg_binned_data in enumerate(reg_binned_list):
if np.isnan(reg_binned_data[mb_norm_cn]).any() == True:
reg_idx_allnan.append(n)
for n in sorted(reg_idx_allnan, reverse=True):
del reg_binned_list[n]
reg_normelev_normmb_list = [np.array([i['elev_norm'].values, i[mb_norm_cn].values]).transpose()
for i in reg_binned_list]
reg_normelev_normmb_stats = norm_stats(reg_normelev_normmb_list)
# Estimate a curve
# Two steps: (1) estimate d to nearest integer and avoid nan issues, (2) force d to be an integer and optimize
# bounds ensure
x = reg_normelev_normmb_stats['norm_elev'].values
y = reg_normelev_normmb_stats['norm_dhdt_med'].values
def curve_func_raw(x, a, b, c, d):
y = (x + a)**d + b * (x + a) + c
# avoid errors with np.arrays where negative number to power returns NaN - replace with 0
y = np.nan_to_num(y)
return y
def curve_func(x, a, b, c, d):
# force d to be an integer
d = int(np.round(d,0))
y = (x + a)**d + b * (x + a) + c
return y
p0 = [-0.02,0.12,0,3]
bnd_low = [-np.inf, -np.inf, -np.inf, 0]
bnd_high = [np.inf, np.inf, np.inf, np.inf]
coeffs, matcov = curve_fit(curve_func_raw, x, y, p0, bounds=(bnd_low, bnd_high), maxfev=10000)
# specify integer for d
p0[3] = int(np.round(coeffs[3],0))
coeffs, matcov = curve_fit(curve_func, x, y, p0, bounds=(bnd_low, bnd_high), maxfev=10000)
# Round coefficients
coeffs[0] = np.round(coeffs[0],2)
coeffs[1] = np.round(coeffs[1],2)
coeffs[2] = np.round(coeffs[2],2)
glac_elev_norm = np.arange(0,1.01,0.01)
curve_y = curve_func(glac_elev_norm, coeffs[0], coeffs[1], coeffs[2], coeffs[3])
if group_name in ['All Alaska']:
curve_y_all = curve_y.copy()
# Plot regional curves
ax[nrow,ncol].plot(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
color='k', linewidth=2, alpha=0.5, zorder=4)
ax[nrow,ncol].fill_between(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
reg_normelev_normmb_stats['norm_dhdt_med'] + reg_normelev_normmb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=2)
ax[nrow,ncol].fill_between(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
reg_normelev_normmb_stats['norm_dhdt_med'] - reg_normelev_normmb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=2)
# Fitted curve
if group_name not in ['All Alaska']:
color_curve = 'k'
else:
color_curve = 'y'
ax[nrow,ncol].plot(glac_elev_norm, curve_y, color=color_curve, linewidth=1, alpha=1, linestyle='--', zorder=5)
# Huss curve
huss_y_lrg = (glac_elev_norm - 0.02)**6 + 0.12 * (glac_elev_norm - 0.02)
huss_y_med = (glac_elev_norm - 0.05)**4 + 0.19 * (glac_elev_norm - 0.05) + 0.01
huss_y_sml = (glac_elev_norm - 0.30)**2 + 0.60 * (glac_elev_norm - 0.30) + 0.09
ax[nrow,ncol].plot(glac_elev_norm, huss_y_lrg, linewidth=1, color='red', linestyle='-.', alpha=1, zorder=3)
ax[nrow,ncol].plot(glac_elev_norm, huss_y_med, linewidth=1, color='green', linestyle='-.', alpha=1, zorder=3)
ax[nrow,ncol].plot(glac_elev_norm, huss_y_sml, linewidth=1, color='blue', linestyle='-.', alpha=1, zorder=3)
# Individual curves
for n, i in enumerate(reg_normelev_normmb_list):
glac_elev_norm_single = reg_normelev_normmb_list[n][:,0]
glac_mb_norm_single = reg_normelev_normmb_list[n][:,1]
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(glac_elev_norm_single, glac_mb_norm_single, linewidth=0.25, alpha=0.2, zorder=1)
# Niceties
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.25))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.25))
ax[nrow,ncol].set_ylim(1.05,-0.05)
# Title
region_nglac = len(reg_normelev_normmb_list)
ax[nrow,ncol].text(0.5, 1.01, group_name + ' (' + str(region_nglac) + ' glaciers)', size=10,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Equation
signs = ['+', '+', '+']
for nsign, coeff in enumerate(coeffs[0:3]):
if coeff < 0:
signs[nsign] = '-'
eqn_txt = 'dh=(x+a)$^{d}$+b(x+a)+c'
ax[nrow,ncol].text(0.05, 0.45, 'a=' + '{:.2f}'.format(coeffs[0]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.35, 'b=' + '{:.2f}'.format(coeffs[1]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.25, 'c=' + '{:.2f}'.format(coeffs[2]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.15, 'd=' + str(int(coeffs[3])), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.05, eqn_txt, size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Add All Alaska curve to each glacier
ncol = 0
nrow = 0
for group_name in group_names:
if group_name not in ['All Alaska']:
# Fitted curve
ax[nrow,ncol].plot(glac_elev_norm, curve_y_all, color='y', linewidth=1, alpha=1, linestyle='--', zorder=4)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Legend
leg_labels = ['Median', 'Curve', 'Curve-all', 'H-small', 'H-medium', 'H-large']
#leg_labels = ['Median', 'Curve_fit', 'Huss-small', 'Huss-medium', 'Huss-large']
#leg_linestyles = ['-', '--', '--', '--', '--']
leg_linestyles = ['-', '--', '--', '-.', '-.', '-.']
leg_colors = ['dimgray', 'k', 'y', 'b', 'g', 'r']
leg_lines = []
for nline, label in enumerate(leg_labels):
# line = Line2D([0,1],[0,1], color='white')
# leg_lines.append(line)
# leg_labels.append('')
line = Line2D([0,0],[0,0], color=leg_colors[nline], linestyle=leg_linestyles[nline], linewidth=1.5)
leg_lines.append(line)
fig.legend(leg_lines, leg_labels, loc='lower center',
bbox_to_anchor=(0.5,0.01), handlelength=1.5, handletextpad=0.25, borderpad=0.2, frameon=True,
ncol=len(leg_labels), columnspacing=0.75)
# Y-label
fig.text(0.04, 0.5, 'Normalized Mass Balance', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.08, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'normelev_normmb_TERMTYPE_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ===== TERMTYPE AND SIZE: NORMALIZED ELEVATION VS NORMALIZED MASS BALANCE PLOT======
group_names = ['Tidewater', 'Lake', 'Land (A < 5 km$^{2}$)', 'Land (5 < A < 20 km$^{2}$)',
'Land (A > 20 km$^{2}$)', 'All Alaska']
group_idx = []
for group_name in group_names:
if group_name == 'Tidewater':
group_idx.append(list(main_glac_rgi[main_glac_rgi.TermType == 1].index.values))
elif group_name == 'Lake':
group_idx.append(list(main_glac_rgi[main_glac_rgi.TermType == 2].index.values))
elif group_name == 'Land (A < 5 km$^{2}$)':
group_idx.append(list(main_glac_rgi[(main_glac_rgi.TermType == 0) & (main_glac_rgi.Area <= 5)].index.values))
elif group_name == 'Land (5 < A < 20 km$^{2}$)':
group_idx.append(list(main_glac_rgi[(main_glac_rgi.TermType == 0) & (main_glac_rgi.Area > 5) &
(main_glac_rgi.Area <= 20)].index.values))
elif group_name == 'Land (A > 20 km$^{2}$)':
group_idx.append(list(main_glac_rgi[(main_glac_rgi.TermType == 0) & (main_glac_rgi.Area > 20)].index.values))
elif group_name == 'All Alaska':
group_idx.append(list(main_glac_rgi.index.values))
group_ncols = 2
group_nrows = int(np.ceil(len(group_names)/group_ncols))
figwidth, figheight = 6.5, 8
fig, ax = plt.subplots(group_nrows, group_ncols, squeeze=False, sharex=False, sharey=False,
figsize=(figwidth,figheight), gridspec_kw = {'wspace':0.25, 'hspace':0.4})
ncol = 0
nrow = 0
for ngroup, group_name in enumerate(group_names):
reg_idx = group_idx[ngroup]
reg_binned_list = [binned_list[i] for i in reg_idx]
mb_norm_cn = 'mb_norm_huss'
# If mb_norm_huss, then remove glaciers with all positive values
if mb_norm_cn == 'mb_norm_huss':
reg_idx_allnan = []
for n, reg_binned_data in enumerate(reg_binned_list):
if np.isnan(reg_binned_data[mb_norm_cn]).any() == True:
reg_idx_allnan.append(n)
for n in sorted(reg_idx_allnan, reverse=True):
del reg_binned_list[n]
reg_normelev_normmb_list = [np.array([i['elev_norm'].values, i[mb_norm_cn].values]).transpose()
for i in reg_binned_list]
reg_normelev_normmb_stats = norm_stats(reg_normelev_normmb_list)
# Estimate a curve
# Two steps: (1) estimate d to nearest integer and avoid nan issues, (2) force d to be an integer and optimize
# bounds ensure
x = reg_normelev_normmb_stats['norm_elev'].values
y = reg_normelev_normmb_stats['norm_dhdt_med'].values
def curve_func_raw(x, a, b, c, d):
y = (x + a)**d + b * (x + a) + c
# avoid errors with np.arrays where negative number to power returns NaN - replace with 0
y = np.nan_to_num(y)
return y
def curve_func(x, a, b, c, d):
# force d to be an integer
d = int(np.round(d,0))
y = (x + a)**d + b * (x + a) + c
return y
p0 = [-0.02,0.12,0,3]
bnd_low = [-np.inf, -np.inf, -np.inf, 0]
bnd_high = [np.inf, np.inf, np.inf, np.inf]
coeffs, matcov = curve_fit(curve_func_raw, x, y, p0, bounds=(bnd_low, bnd_high), maxfev=10000)
# specify integer for d
p0[3] = int(np.round(coeffs[3],0))
coeffs, matcov = curve_fit(curve_func, x, y, p0, bounds=(bnd_low, bnd_high), maxfev=10000)
# Round coefficients
coeffs[0] = np.round(coeffs[0],2)
coeffs[1] = np.round(coeffs[1],2)
coeffs[2] = np.round(coeffs[2],2)
glac_elev_norm = np.arange(0,1.01,0.01)
curve_y = curve_func(glac_elev_norm, coeffs[0], coeffs[1], coeffs[2], coeffs[3])
if group_name in ['All Alaska']:
curve_y_all = curve_y.copy()
# Plot regional curves
ax[nrow,ncol].plot(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
color='k', linewidth=2, alpha=0.5, zorder=4)
ax[nrow,ncol].fill_between(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
reg_normelev_normmb_stats['norm_dhdt_med'] + reg_normelev_normmb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=2)
ax[nrow,ncol].fill_between(reg_normelev_normmb_stats['norm_elev'], reg_normelev_normmb_stats['norm_dhdt_med'],
reg_normelev_normmb_stats['norm_dhdt_med'] - reg_normelev_normmb_stats['norm_dhdt_nmad'],
color='dimgray', alpha=0.5, zorder=2)
# Fitted curve
if group_name not in ['All Alaska']:
color_curve = 'k'
else:
color_curve = 'y'
ax[nrow,ncol].plot(glac_elev_norm, curve_y, color=color_curve, linewidth=1, alpha=1, linestyle='--', zorder=5)
# Huss curve
huss_y_lrg = (glac_elev_norm - 0.02)**6 + 0.12 * (glac_elev_norm - 0.02)
huss_y_med = (glac_elev_norm - 0.05)**4 + 0.19 * (glac_elev_norm - 0.05) + 0.01
huss_y_sml = (glac_elev_norm - 0.30)**2 + 0.60 * (glac_elev_norm - 0.30) + 0.09
ax[nrow,ncol].plot(glac_elev_norm, huss_y_lrg, linewidth=1, color='red', linestyle='-.', alpha=1, zorder=3)
ax[nrow,ncol].plot(glac_elev_norm, huss_y_med, linewidth=1, color='green', linestyle='-.', alpha=1, zorder=3)
ax[nrow,ncol].plot(glac_elev_norm, huss_y_sml, linewidth=1, color='blue', linestyle='-.', alpha=1, zorder=3)
# Individual curves
for n, i in enumerate(reg_normelev_normmb_list):
glac_elev_norm_single = reg_normelev_normmb_list[n][:,0]
glac_mb_norm_single = reg_normelev_normmb_list[n][:,1]
# note: zorder overrides alpha, only alpha if same zorder
ax[nrow,ncol].plot(glac_elev_norm_single, glac_mb_norm_single, linewidth=0.25, alpha=0.2, zorder=1)
# Niceties
ax[nrow,ncol].xaxis.set_major_locator(MultipleLocator(0.25))
ax[nrow,ncol].xaxis.set_minor_locator(MultipleLocator(0.05))
ax[nrow,ncol].set_xlim(0,1)
ax[nrow,ncol].yaxis.set_major_locator(MultipleLocator(1))
ax[nrow,ncol].yaxis.set_minor_locator(MultipleLocator(0.25))
ax[nrow,ncol].set_ylim(1.05,-0.05)
# Title
region_nglac = len(reg_normelev_normmb_list)
ax[nrow,ncol].text(0.5, 1.01, group_name + ' (' + str(region_nglac) + ' glaciers)', size=10,
horizontalalignment='center', verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Equation
signs = ['+', '+', '+']
for nsign, coeff in enumerate(coeffs[0:3]):
if coeff < 0:
signs[nsign] = '-'
eqn_txt = 'dh=(x+a)$^{d}$+b(x+a)+c'
ax[nrow,ncol].text(0.05, 0.45, 'a=' + '{:.2f}'.format(coeffs[0]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.35, 'b=' + '{:.2f}'.format(coeffs[1]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.25, 'c=' + '{:.2f}'.format(coeffs[2]), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.15, 'd=' + str(int(coeffs[3])), size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
ax[nrow,ncol].text(0.05, 0.05, eqn_txt, size=10, horizontalalignment='left',
verticalalignment='bottom', transform=ax[nrow,ncol].transAxes)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Add All Alaska curve to each glacier
ncol = 0
nrow = 0
for group_name in group_names:
if group_name not in ['All Alaska']:
# Fitted curve
ax[nrow,ncol].plot(glac_elev_norm, curve_y_all, color='y', linewidth=1, alpha=1, linestyle='--', zorder=4)
# Adjust row and column
ncol += 1
if ncol == group_ncols:
nrow += 1
ncol = 0
# Legend
leg_labels = ['Median', 'Curve', 'Curve-all', 'H-small', 'H-medium', 'H-large']
leg_linestyles = ['-', '--', '--', '-.', '-.', '-.']
leg_colors = ['dimgray', 'k', 'y', 'b', 'g', 'r']
leg_lines = []
for nline, label in enumerate(leg_labels):
# line = Line2D([0,1],[0,1], color='white')
# leg_lines.append(line)
# leg_labels.append('')
line = Line2D([0,0],[0,0], color=leg_colors[nline], linestyle=leg_linestyles[nline], linewidth=1.5)
leg_lines.append(line)
fig.legend(leg_lines, leg_labels, loc='lower center',
bbox_to_anchor=(0.5,0.01), handlelength=1.5, handletextpad=0.25, borderpad=0.2, frameon=True,
ncol=len(leg_labels), columnspacing=0.75)
# Y-label
fig.text(0.04, 0.5, 'Normalized Mass Balance', va='center', ha='center',
rotation='vertical', size=12)
fig.text(0.5, 0.08, 'Normalized Elevation', va='center', ha='center', size=12)
# Save figure
fig.set_size_inches(figwidth,figheight)
figure_fn = 'normelev_normmb_TERMTYPE-SIZE_gt' + str(valid_perc_threshold) + 'pct_gt' + str(min_area_km2) + 'km2.png'
fig.savefig(fig_fp + figure_fn, bbox_inches='tight', dpi=300)
#%% ===== EXTRAPOLATE MASS BALANCE CURVES TO EVERY GLACIER =====
main_glac_rgi_all = modelsetup.selectglaciersrgitable(rgi_regionsO1=[1], rgi_regionsO2='all', rgi_glac_number='all')
# Load hypsometry data
glac_hyps_all_df =
|
pd.read_csv(hyps_fn)
|
pandas.read_csv
|
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#import stationary_block_bootstrap as sbb
import pandas as pd
import numpy as np
import scipy.stats
import numpy
import time
import random
#import state_variables
import os
import scipy.stats
import sklearn.feature_selection
import matplotlib.gridspec as gridspec
import copy
from argotools.config import *
from argotools.forecastlib.handlers import *
from argotools.forecastlib.functions import *
import argotools.forecastlib.stationary_block_bootstrap as sbb
from argotools.dataFormatter import *
import seaborn as sns
import matplotlib.ticker as mticker
import math
from matplotlib.ticker import MaxNLocator,IndexFormatter, FormatStrFormatter
class OutputVis:
# Variables : top_n = 3, ranking_metric = 'rmse', ranking_season ='ALL_PERIOD', preds (vector/PD containing all predictions), metrics (matrix/PD containing all metrics),
# Load predictions and csvs from file,
# get name of models, number of models, name of metrics, table variable names (season1, season2... allPeriod).
# Get RANKING METRIC or all models in the file. Check if theres more than one first.
# FUNC STATISTICS BETWEEN THE MODELS : MEAN, VARIANCE, BEST MODEL, WORST MODEL
# figure 1 : Time-series, error and percent error
# figure 2: metric / plot
def __init__(self, folder_dir=None, ids=None, overview_folder='_overview'):
# Loading tables and files
if folder_dir is None:
print('WARNING! No main folder directory specified. Add it as an attribute \
specify it on every function call that requires it.')
self.folder_main = folder_dir
self.ids = ids
self.overview_folder = overview_folder
print('Visualizer initialized')
# imported VARS
def plot_SEC(self, series_filepath=None, coeff_filepath=None, target_name='ILI', models=None, color_dict=None, start_period=None, end_period=None, alpha_dict=None, output_filename=None, ext='png', mode='save', n_coeff=20, cmap_color='RdBu_r', error_type='Error', vmin=-1, vmax=1, font_path=None):
if font_path:
from matplotlib import font_manager
prop = font_manager.FontProperties(fname=font_path)
if color_dict is None:
color_dict = dict(zip(models, [tuple(np.random.random(3)) for mod in models]))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1 for mod in models]))
series_df = pd.read_csv(series_filepath, index_col=0)
coeff_df = pd.read_csv(coeff_filepath, index_col=0)
if start_period is None:
start_period = series_df.index[0]
if end_period is None:
end_period = series_df.index[-1]
series_df = series_df[start_period:end_period]
coeff_df = coeff_df[start_period:end_period]
target = series_df[target_name].values
series = {}
errors = {}
for mod in models:
series[mod] = series_df[mod].values
errors[mod] = np.abs(target - series[mod])
indices = list(series_df[target_name].index.values)
#plotting target
f, axarr = plt.subplots(3,2, gridspec_kw = {'height_ratios':[2,1,3], 'width_ratios':[16,1]})
axarr[0,0].fill_between(x=list(range(len(indices))),y1=target, facecolor='gray', alpha=0.5, label=target_name)
#plotting series
for mod in models:
axarr[0,0].plot(series[mod], label=mod, color=color_dict[mod], alpha=alpha_dict[mod])
axarr[1,0].plot(errors[mod], color=color_dict[mod], alpha=alpha_dict[mod])
if n_coeff is None:
n_coeff = coeff_df.shape[1]
means = coeff_df.mean(axis=0)
coeff_names = list(coeff_df)
ordered_names = [ name for v, name in sorted(zip(means, coeff_names), key=lambda x: x[0], reverse=True)]
coeff_df = coeff_df[ordered_names[:n_coeff]]
sns.heatmap(coeff_df.T, vmin=vmin, vmax=vmax, cmap=cmap_color, center=None, \
robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0,\
linecolor='white', cbar=True, cbar_kws=None, cbar_ax=axarr[2,1], square=False,\
xticklabels='auto', yticklabels=True, mask=None, ax=axarr[2,0])
plt.gcf().set_size_inches([10, int(n_coeff/2)])
plt.sca(axarr[0,0])
plt.legend(frameon=False, ncol=len(models))
plt.xlim([0, len(indices)])
plt.ylim(bottom=0)
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_xticklabels([])
plt.grid(linestyle = 'dotted', linewidth = .6)
plt.sca(axarr[1,0])
plt.xlim([0, len(indices)])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_xticklabels([])
plt.grid(linestyle = 'dotted', linewidth = .6)
plt.sca(axarr[0,1])
plt.axis('off')
plt.sca(axarr[1,1])
plt.axis('off')
plt.sca(axarr[2,0])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
plt.gca().set_yticklabels(ordered_names[:n_coeff], fontproperties=prop)
# STYLE
axarr[0,0].spines['right'].set_visible(False)
axarr[0,0].spines['top'].set_visible(False)
axarr[1,0].spines['right'].set_visible(False)
axarr[1,0].spines['top'].set_visible(False)
axarr[0,0].set_ylabel(target_name)
axarr[1,0].set_ylabel(error_type)
plt.subplots_adjust(left=.2, bottom=.1, right=.95, top=.9, wspace=.05, hspace=.20)
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_coefficients'.format(model)
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, id_, output_filename, ext), format=ext)
else:
plt.savefig(output_filename+'.{0}'.format(ext), format=ext)
plt.close()
def plot_coefficients(self, id_=None, model=None, coefficients_filepath=None, cmap_color='RdBu_r',\
n_coeff=None, filename='_coefficients.csv', output_filename=None, ext='png', mode='show'):
if coefficients_filepath:
coefficients = pd.read_csv(coefficients_filepath, index_col=0)
else:
coefficients = pd.read_csv('{0}/{1}/{2}'.format(self.folder_main, id_, model), index_col=0)
coefficients.fillna(0)
if n_coeff is None:
n_coeff = coefficients.shape[1]
means = coefficients.mean(axis=0)
coeff_names = list(coefficients)
ordered_names = [ name for v, name in sorted(zip(means, coeff_names), key=lambda x: x[0], reverse=True)]
coefficients = coefficients[ordered_names[:n_coeff]]
sns.heatmap(coefficients.T, vmin=None, vmax=None, cmap=cmap_color, center=None, \
robust=False, annot=None, fmt='.2g', annot_kws=None, linewidths=0,\
linecolor='white', cbar=True, cbar_kws=None, cbar_ax=None, square=False,\
xticklabels='auto', yticklabels=True, mask=None, ax=None)
plt.gcf().set_size_inches([10, int(n_coeff/3)])
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_coefficients'.format(model)
plt.savefig('{0}/{1}/{2}.{3}'.format(folder_main, id_, output_filename, ext), format=ext)
else:
plt.savefig(output_filename+'.{0}'.format(ext), format=ext)
plt.close()
def inter_group_lollipop_comparison(ids_dict, path_dict, metric, period, models, benchmark, color_dict=None, alpha_dict=None, metric_filename='metrics.csv', bar_separation_multiplier=1.5, mode='show', output_filename='LollipopTest', plot_domain=None, ext='png'):
"""
Plots the ratio of the metric score for each of the models against a benchmark in a lollipop plot to compare between experiments.
Parameters
__________
ids_dict: dict
Dictionary containing the list of ids for each experiment
path_dict: dict
Dictionary containing the path to the experiment folders (must coincide with the keys of ids_dict)
metric: str
String containing the name of the metric to look for in the predictions file
period: str
Column name containing the values to plot
models: List, optional (default None)
String list containing the names of the models to plot
benchmark: str
The name within "models" which will serve as the benchmark
color_dict : dict
Dictionary containing specific colors for the models to plot
metric_filename : str, optional (default metrics.csv)
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1
output_filename : str, optional (default is None)
If set to None, output_filename is set metricname_barplot
ext : str, optional (default is png)
Extension formal to save the barplot.
plot_domain : list, optional (default is [0,1])
list of two integers that sets the limits in the plot (plt.xlim)
bar_separation_multiplier : float, optional (default is 1)
Parameter that functions as multiplier for the separion between bars in the plot.
if set to 1, then bars are plotted in locations 1,2,3... if set to 2, then 2,4,6, etc
"""
fig, axarr = plt.subplots(len(ids_dict.keys()),1)
axes = axarr.ravel()
if color_dict is None:
color_dict = dict(zip(models, ['b']*len(models)))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1]*len(models)))
for i, (experiment, folder_main) in enumerate(path_dict.items()):
plt.sca(axes[i])
ids = ids_dict[experiment]
values_dict = dict(zip(models, [[] for mod in models]))
min_val = float('inf')
max_val = float('-inf')
indices = []
overview_path = '{0}/{1}'.format(folder_main, '_overview')
for i, id_ in enumerate(ids):
indices.append(i*bar_separation_multiplier)
id_path = '{0}/{1}'.format(folder_main, id_)
df = pd.read_csv('{0}/{1}'.format(id_path, metric_filename))
df = df[df['METRIC']==metric]
for j, mod in enumerate(models):
ratio = copy.copy(df[df['MODEL']==mod][period].values[0]/df[df['MODEL']==benchmark][period].values[0])
if metric in ['ERROR', 'RMSE', 'NRMSE', 'MAPE']:
ratio=(1/ratio)
values_dict[mod].append(ratio)
if ratio < min_val:
min_val = ratio
if ratio > max_val:
max_val = ratio
bar_width = 1/len(models)
indices = np.array(indices)
for i, mod in enumerate(models):
heights = values_dict[mod]
bar_positions = indices + bar_width*i
(markers, stemlines, baseline) = plt.stem(bar_positions, heights, linefmt='--')
plt.setp(markers, marker='o', markersize=7, color=color_dict[mod], alpha=alpha_dict[mod], label=mod)
plt.setp(stemlines, color=color_dict[mod], linewidth=1)
plt.setp(baseline, visible=False)
# Black line
plt.plot([0,bar_positions[-1]], [1,1],'--',color='.6', alpha=.6)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
if experiment == 'State':
ids = [id_[-3:] for id_ in ids]
plt.xticks(indices+bar_width*((len(models)-1)/2), ids)
plt.ylim([min_val*.95, max_val*1.05])
plt.xlim([-.3, bar_positions[-1]+.3])
if i == 0:
axes[i].legend(frameon=False, ncol=len(models))
plt.title('{0} barplot'.format(metric))
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_barplot'.format(metric)
plt.gcf().set_size_inches([6,15])
plt.savefig('{0}/{1}.{2}'.format(overview_path, output_filename, ext), format=ext)
plt.close()
def group_lollipop_ratio(ids, metric, period, models, benchmark, folder_main = None, color_dict=None, alpha_dict=None, metric_filename='metrics.csv', bar_separation_multiplier=1.5, mode='show', output_filename='LollipopTest', plot_domain=None, ext='png'):
"""
Plots the ratio of the metric score for each of the models against a benchmark in a lollipop plot.
Parameters
__________
id_: str
Identifier for the region to look for
metric: str
String containing the name of the metric to look for in the predictions file
period: str
Column name containing the values to plot
models: List, optional (default None)
String list containing the names of the models to plot
benchmark: str
The name within "models" which will serve as the benchmark
color_dict : dict
Dictionary containing specific colors for the models to plot
metric_filename : str, optional (default metrics.csv)
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1
output_filename : str, optional (default is None)
If set to None, output_filename is set metricname_barplot
ext : str, optional (default is png)
Extension formal to save the barplot.
plot_domain : list, optional (default is [0,1])
list of two integers that sets the limits in the plot (plt.xlim)
bar_separation_multiplier : float, optional (default is 1)
Parameter that functions as multiplier for the separion between bars in the plot.
if set to 1, then bars are plotted in locations 1,2,3... if set to 2, then 2,4,6, etc
"""
if color_dict is None:
color_dict = dict(zip(models, ['b']*len(models)))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1]*len(models)))
if folder_main is None:
folder_main = self.folder_main
values_dict = dict(zip(models, [[] for mod in models]))
min_val = float('inf')
max_val = float('-inf')
indices = []
overview_path = '{0}/{1}'.format(folder_main, '_overview')
for i, id_ in enumerate(ids):
indices.append(i*bar_separation_multiplier)
id_path = '{0}/{1}'.format(folder_main, id_)
df = pd.read_csv('{0}/{1}'.format(id_path, metric_filename))
df = df[df['METRIC']==metric]
for j, mod in enumerate(models):
ratio = copy.copy(df[df['MODEL']==mod][period].values[0]/df[df['MODEL']==benchmark][period].values[0])
if metric in ['ERROR', 'RMSE', 'NRMSE', 'MAPE']:
ratio=(1/ratio)
values_dict[mod].append(ratio)
if ratio < min_val:
min_val = ratio
if ratio > max_val:
max_val = ratio
bar_width = 1/len(models)
indices = np.array(indices)
for i, mod in enumerate(models):
heights = values_dict[mod]
bar_positions = indices + bar_width*i
(markers, stemlines, baseline) = plt.stem(bar_positions, heights, linefmt='--')
plt.setp(markers, marker='o', markersize=7, color=color_dict[mod], alpha=alpha_dict[mod], label=mod)
plt.setp(stemlines, color=color_dict[mod], linewidth=1)
plt.setp(baseline, visible=False)
# Black line
plt.plot([0,bar_positions[-1]], [1,1],'--',color='.6', alpha=.6)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.title('{0} barplot'.format(metric))
plt.xticks(indices+bar_width*((len(models)-1)/2), ids)
plt.ylim([min_val*.95, max_val*1.05])
plt.xlim([-.3, bar_positions[-1]+.3])
plt.legend(frameon=False, ncol=len(models))
if plot_domain:
plt.xlim(plot_domain)
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_barplot'.format(metric)
plt.gcf().set_size_inches([6,15])
plt.savefig('{0}/{1}.{2}'.format(overview_path, output_filename, ext), format=ext)
plt.close()
def inter_season_analysis(self,ids, main_folders, periods, series_names, metric = 'RMSE', filename='metrics_condensed.csv', output_filename='season_analysis', color_dict=None, alpha_dict=None, mode='save', ext='png'):
'''
Performs seasonal analysis of data based on periods decided from the user.
The top part of the plot shows violin plots (https://seaborn.pydata.org/generated/seaborn.violinplot.html)
and display the model's metric scores in a boxplot/distribution schemeself.
Bottom part shows a heatmap representing the distribution of ranking along all periods. I.E. If Each timeseries
case contain 4 periods and there's 4 cases, the total number of periods is 4*4 = 16. Each period has a metric for each model.
inter_season_analysis compare this metric within the period and ranks the models from first to nth place, and each place generats a +1
count onto the heatmap in the column representing the model and the row representing the rank.
__________
ids : dict
The dict of lists containing the identifiers for the regions.
main_folders : dict
The path to the experiments. Dictionary keys have to be consistent with the ids keys
periods : list
list containing the periods (should be available within the metrics table)
filename : str
String containing the filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting index.
end_period : str
timeseries ending index in the pandas dataframe.
n_col : int, optional (default is one)
series_names : list, optional (default is None)
Names of the timeseries to plot. If set to None, then model plots all of them.
output_filename : str, optional (default is series)
Name of the graphics file containing the plots.
color_dict : dict
Dictionary containing specific colors for the models to plot.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1.
ext : str, optional (default is png)
Extension formal to save the graphics file.
Defines the number of columns to define the plotting array. Function organizes
plots in n_col then
'''
default_colors = ['royalblue', 'darkorange', 'forestgreen', 'firebrick']
if color_dict is None:
color_dict = dict(zip(series_names, default_colors[0:len(series_names)]))
score_periods = {}
ranks = {}
for title, ids_ in ids.items():
metrics_df = pd.read_csv(main_folders[title] + '/_overview/'+ filename)
score_periods[title] = []
ranks[title] = getRanks(metrics_df, metric, ids_, series_names, periods)
for mod in series_names:
score_periods[title].append(get_all_periods(metrics_df, mod, metric, periods))
score_periods[title] = pd.DataFrame(np.hstack(score_periods[title]), columns=series_names)
f, axarr = plt.subplots(2, len(ids.keys()))
axes = axarr.ravel()
places_dict = get_places(ranks, series_names)
places = ['4th', '3rd', '2nd', '1st']
for i, title in enumerate(ids.keys()):
places_list = places_dict[title]
sns.violinplot(data=score_periods[title], ax=axes[i], cut=0, inner='box')
'''
sns.heatmap(data=ranks[metric], ax=axes[i+len(ids.keys())], cmap='Reds', cbar=False, annot=True)
axes[i+len(ids.keys())].set_yticklabels(['1th', '2th', '3th', '4th', '5th'], rotation='horizontal')
axes[i+len(ids.keys())].set_xticklabels(series_names, rotation='horizontal')
'''
print(title, i)
for j, ord_list in enumerate(reversed(places_list)):
for (mod, height) in ord_list:
axes[i+len(ids.keys())].barh(j, height, color=color_dict[mod])
plt.sca(axes[i+len(ids.keys())])
plt.yticks(range(len(places_list)), places)
axes[i].set_title(title)
axes[i].set_xticklabels(series_names)
if i == 0:
axes[i+len(ids.keys())].set_xlabel('No. of States')
elif i == 1:
axes[i+len(ids.keys())].set_xlabel('No. of Regions')
elif i == 2:
axes[i+len(ids.keys())].set_xlabel('No. of Countries')
if i == 0:
axes[i].set_ylabel('{0}'.format(metric))
axes[+len(ids.keys())].set_ylabel('Ranking Proportions')
if mode == 'show':
plt.show()
if mode == 'save':
plt.gcf().set_size_inches([9, 5])
plt.subplots_adjust(left=.1, bottom=.12, right=.97, top=.91, wspace=.25, hspace=.20)
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, OVERVIEW_FOLDER, output_filename,ext),fmt=ext)
plt.close()
return
def group_seriesbars(self, ids=None, start_period=None, end_period=None, series_names=None, folder_dir=None, metric_filename='metrics.csv', preds_filename='preds.csv', output_filename='series', color_dict=None, alpha_dict=None, mode='show', ext='png', n_col=1, width_ratios=[6,1], metric=None, metric_period=None, target_name=None):
default_colors = ['g', 'b', 'r', 'indigo']
default_linewidths = [1.5,1.4,1.6,1]
'''
Gathers information from all region and does a group plot using matplotlib, along with a barplot, showing a metric.
regions are ordered based on the original ordering from the ids list from left to right, top to bottom
Parameters
__________
ids : list
The list containing the identifiers for the regions.
preds_filename : str
String containing the preds_filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting indices.
end_period : str
timeseries ending indices in the pandas dataframe.
n_col : int, optional (default is one)
series_names : list, optional (default is None)
Names of the timeseries to plot. If set to None, then model plots all of them.
output_preds_filename : str, optional (default is series)
Name of the graphics file containing the plots.
color_dict : dict
Dictionary containing specific colors for the models to plot.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1.
ext : str, optional (default is png)
Extension formal to save the graphics file.
Defines the number of columns to define the plotting array. Function organizes
plots in n_col then
'''
if not ids:
ids = self.ids
if folder_dir is None:
folder_dir = self.folder_main
n_ids = len(ids)
n_rows = math.ceil(n_ids/n_col)
fig, axarr = plt.subplots(n_rows,n_col*2, gridspec_kw = {'width_ratios':width_ratios*n_col})
axes = axarr.ravel()
if color_dict is None:
color_dict = {}
for i, mod in enumerate(series_names):
color_dict[mod] = default_colors[i]
if alpha_dict is None:
alpha_dict = {}
for i, mod in enumerate(series_names):
alpha_dict[mod] = .8
for i, id_ in enumerate(ids):
df = pd.read_csv('{0}/{1}/{2}'.format(folder_dir, id_, preds_filename), index_col=[0])
metric_df = pd.read_csv('{0}/{1}/{2}'.format(folder_dir, id_, metric_filename))
series = []
indices = copy.copy(df[start_period:end_period].index.values)
for kk in range(np.size(indices)):
v = indices[kk][2:7]
indices[kk] = v
col_names = list(df)
if target_name:
zeros=np.zeros(np.size(df[start_period:end_period][target_name].values))
curve_max = np.amax(np.size(df[start_period:end_period][target_name].values))
#axes[i*2].plot(df[start_period:end_period][target_name].values, label=target_name, linewidth=.1)
axes[i*2].fill_between(x=list(range(len(indices))),y1=df[start_period:end_period][target_name].values, facecolor='gray', alpha=0.5, label=target_name)
for k, col in enumerate(series_names):
if col in col_names:
# create top panel
axes[i*2].plot(df[start_period:end_period][col].values, label=col, linewidth=default_linewidths[k])
else:
print('WARNING! {0} not in {1} timeseries list'.format(col, id_))
if color_dict:
for j, l in enumerate(axes[i*2].get_lines()):
l.set_color(color_dict[series_names[j]])
if alpha_dict:
for j, l in enumerate(axes[i*2].get_lines()):
l.set_alpha(alpha_dict[series_names[j]])
######
metric_df = metric_df[metric_df['METRIC']==metric][['MODEL', metric_period]]
bar_width = .5
hs = []
for k, mod in enumerate(series_names):
heights = metric_df[metric_df['MODEL'] == mod][metric_period].values
bar_positions = k
rects = axes[i*2+1].bar(bar_positions, heights, bar_width, label=mod, color=color_dict[mod], alpha=alpha_dict[mod])
hs.append(copy.copy(heights))
max_height = np.amax(hs)
min_height = np.amin(hs)
axes[i*2+1].set_ylim([min_height*.90, max_height*1.1])
axes[i*2+1].set_yticks([min_height, max_height])
axes[i*2+1].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
#####
if i == 0:
if target_name:
n_cols = len(series_names)+1
else:
n_cols = len(series_names)
axes[i*2].legend(ncol=n_cols, frameon=False, loc='upper left', \
bbox_to_anchor=(.0,1.20))
axes[i*2].text(.10,.9, id_, weight = 'bold', horizontalalignment='left', transform=axes[i*2].transAxes)
#axes[i*2+1].yaxis.set_major_locator(mticker.MaxNLocator(2))
axes[i*2].yaxis.set_major_locator(mticker.MaxNLocator(2))
axes[i*2+1].set_xticks([])
# SPINES
axes[i*2].spines['top'].set_visible(False)
axes[i*2].spines['right'].set_visible(False)
#axes[i*2].spines['left'].set_visible(False)
yticks=axes[i*2].get_yticks()
ylim = axes[i*2].get_ylim()
axes[i*2].spines['left'].set_bounds(0,yticks[2])
axes[i*2+1].spines['left'].set_bounds(min_height,max_height)
axes[i*2].set_ylim(0,ylim[1])
axes[i*2+1].spines['top'].set_visible(False)
axes[i*2+1].spines['right'].set_visible(False)
#axes[i*2+1].spines['left'].set_visible(False)
if i == 0:
plt.ylabel('Estimates')
if i > n_col*(n_rows - 1)-1:
axes[i*2].set_xlabel('Date')
plt.sca(axes[i*2])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(4))
xticks = axes[i*2].get_xticks()
axes[i*2].spines['bottom'].set_bounds(xticks[1], xticks[-2])
else:
plt.sca(axes[i*2])
plt.xticks(range(len(indices)),indices, rotation=0)
plt.gca().xaxis.set_major_formatter(IndexFormatter(indices))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(4))
xticks = axes[i*2].get_xticks()
axes[i*2].spines['bottom'].set_bounds(xticks[1], xticks[-2])
#axes[i*2].set_xticklabels([])
if i < np.size(axes)/2-1:
for j in range(i+1,int(np.size(axes)/2)):
axes[j*2+1].spines['top'].set_visible(False)
axes[j*2+1].spines['right'].set_visible(False)
axes[j*2+1].spines['left'].set_visible(False)
axes[j*2+1].spines['bottom'].set_visible(False)
axes[j*2].spines['top'].set_visible(False)
axes[j*2].spines['right'].set_visible(False)
axes[j*2].spines['left'].set_visible(False)
axes[j*2].spines['bottom'].set_visible(False)
axes[j*2].set_yticks([])
axes[j*2].set_xticks([])
axes[j*2+1].set_yticks([])
axes[j*2+1].set_xticks([])
axes[j*2].set_title('')
axes[j*2+1].set_title('')
plt.subplots_adjust(left=.03, bottom=.05, right=.99, top=.95, wspace=.25, hspace=.15)
if mode == 'show':
plt.show()
plt.close()
if mode == 'save':
fig.set_size_inches([7*n_col,2.5*n_rows])
plt.savefig('{0}/{1}/{2}.{3}'.format(folder_dir, OVERVIEW_FOLDER, output_filename,ext),fmt=ext)
plt.close()
def rank_ids_by_metric(self, ids, models, period, metric='RMSE', reverse=False, metric_filename='metrics.csv'):
'''
rank_ids_by_metric compares the performance of two models specified in the models list and
the selected metric. Function substracts model[0] from model[1] (i.e. model[1]-model[0]) and orders
the results based on decreasing order.
Parameters
__________
ids : list
List of strings containing the region identifiers to rank.
models : list
A list of two models to compare
metric : str, optional (default is RMSE)
The metric to use as comparison
order : Boolean, optional (default is False)
If False, orders in increasing order. If set to True, orders in decreasing order
metric_filename : str, optionall (default is 'metric.csv')
period : str
Specify the period of the metric
Returns
_______
ids = An ordered list of IDs based on the results of the comparison
'''
metric_values = []
for id_ in ids:
metric_df = pd.read_csv('{0}/{1}/{2}'.format(self.folder_main, id_, metric_filename))
mod0_val = metric_df[ (metric_df['METRIC'] == metric) & (metric_df['MODEL'] == models[0])][period].values
mod1_val = metric_df[(metric_df['METRIC'] == metric) & (metric_df['MODEL'] == models[1])][period].values
ratio = mod0_val/mod1_val
if metric in ['RMSE', 'NRMSE', 'ERROR', 'MAPE']:
ratio = 1/ratio
metric_values.append(copy.copy(ratio))
ord_values = []
ord_ids = []
for id_, val in sorted(zip(ids, metric_values), key = lambda x : x[1], reverse=reverse):
ord_values.append(val)
ord_ids.append(id_)
return ord_ids
def group_weekly_winner(self, ids=None, cmap='BuPu', models=None, start_period=None, end_period=None, output_filename='weekly_winners', folder_main=None, filename='preds.csv', mode='show', ext='png'):
"""
Fir each ID, chooses the weekly winner out of the models list in a prediction file and plots all of them
together in heatmap.
Parameters
__________
ids : list
The list containing the identifiers for the regions.
filename : str
String containing the filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting index.
end_period : str
timeseries ending index in the pandas dataframe.
output_filename : str, optional (default is series)
Name of the graphics file containing the plots.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
ext : str, optional (default is png)
Extension formal to save the graphics file.
cmap : str, optional (default is 'BuPu')
colormap style to display in the plot. List of colormaps is provided by Matplotlib.
folder_main : str, optiona (default is None)
Path to folder with data. If None, uses default class attribute.
"""
if folder_main is None:
folder_main = self.folder_main
#Getting winners in each id
winners_dict = {}
ind = list(range(len(models)))
map_dict =dict(zip(models, ind))
for i, id_ in enumerate(ids):
df = pd.read_csv('{0}/{1}/{2}'.format(folder_main, id_, filename), index_col=[0])
if i == 0:
if start_period is None:
start_period = df.index[0]
if end_period is None:
end_period = df.index[-1]
df = df[start_period:end_period]
winners = get_winners_from_df(df, models=models)
winners=winners.replace({"winners" : map_dict })
winners_dict[id_] = winners['winners'].values
index = df[start_period:end_period].index.values
winners_df = pd.DataFrame(winners_dict, index=index)
ax= sns.heatmap(data=winners_df.transpose(), linewidths=.6, yticklabels=True, cbar_kws={"ticks":ind})
ax.collections[0].colorbar.ax.set_yticklabels(models)
#plt.matshow(winners_df.transpose(), origin='lower', aspect='auto', cmap='BuPu')
#cb = plt.colorbar(orientation='vertical', ticks=ind, shrink=.5)
#cb.ax.set_yticklabels(models)
#plt.xticks(range(len(index)),index, rotation=45)
#plt.gca().xaxis.set_major_formatter(IndexFormatter(index))
#plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
if mode == 'show':
plt.show()
plt.close()
if mode == 'save':
plt.gcf().set_size_inches([10,6])
plt.subplots_adjust(left=.10, bottom = .15, right = 1, top=.95, wspace=.20, hspace=.20)
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, self.overview_folder, output_filename, ext),fmt=ext)
plt.close()
def plot_series(self,folder_dir=None, id_=None, filename=None, output_filename='series', series_names=None, color_dict=None, alpha_dict=None, start_period=None, end_period=None, mode='save', ext='png', add_weekly_winner=False, winner_models=None):
if folder_dir is None:
folder_dir = self.folder_main
if filename is None:
filename = ID_PREDS
df = pd.read_csv('{0}/{1}/{2}'.format(self.folder_main, id_, filename), index_col=[0])
if start_period is None:
start_period = df.index[0]
if end_period is None:
end_period = df.index[-2]
series = []
index = df.index.values
if add_weekly_winner:
n_rows = 2
gridspec_kw = {'height_ratios':[6,1]}
else:
n_rows = 1
gridspec_kw = None
fig, axes = plt.subplots(n_rows, 1, gridspec_kw = gridspec_kw)
col_names = list(df)
if series_names is None:
series_names = col_names
for col in series_names:
# create top panel
axes[0].plot(df[start_period:end_period][col].values, label=col)
#a = ax.plot_date(x=dates, y=ILI) # fmt="-",color='.20', linewidth=3.2, label='ILI', alpha = 1)
if color_dict:
for i, l in enumerate(axes[0].get_lines()):
l.set_color(color_dict[series_names[i]])
if alpha_dict:
for i, l in enumerate(axes[0].get_lines()):
l.set_alpha(alpha_dict[series_names[i]])
if add_weekly_winner:
winners = get_winners_from_df(df, models=winner_models)
ind = list(range(len(winner_models)))
map_dict =dict(zip(winner_models, ind))
winners=winners.replace({"winners" : map_dict })
im = axes[1].matshow(winners['winners'].values.reshape([1,-1]), origin='lower', aspect='auto', cmap='BuPu')
cb = plt.colorbar(im, ax=axes[1], orientation='horizontal', ticks=ind)
cb.ax.set_xticklabels(winner_models)
axes[0].legend(ncol=len(series_names), frameon=False)
axes[0].set_title('{0}'.format(id_))
axes[0].set_ylabel('Estimates')
axes[0].set_xlabel('Index')
axes[0].spines['top'].set_visible(False)
axes[0].spines['right'].set_visible(False)
plt.xticks(range(len(index)),index, rotation=45)
axes[0].xaxis.set_major_formatter(IndexFormatter(index))
axes[0].xaxis.set_major_locator(mticker.MaxNLocator(6))
axes[1].set_xticks([])
axes[1].set_yticks([])
axes[0].autoscale(enable=True, axis='x', tight=True)
#plt.locator_params(nbins=8)
if mode == 'show':
plt.show()
plt.close()
if mode == 'save':
fig.set_size_inches([10,5])
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, id_, output_filename,ext),fmt=ext)
plt.close()
def season_analysis(self, ids, periods, series_names, folder_main=None, metrics = ['PEARSON', 'NRMSE'], filename='metrics_condensed.csv', output_filename='season_analysis', color_dict=None, alpha_dict=None, mode='save', ext='png'):
'''
Gathers information from all region and does a group plot using matplotlib.
regions are ordered in based on the original ordering from the ids list from left to right, top to bottom
Parameters
__________
ids : list
The list containing the identifiers for the regions.
periods : list
list containing the periods (should be available within the metrics table)
filename : str
String containing the filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting index.
end_period : str
timeseries ending index in the pandas dataframe.
n_col : int, optional (default is one)
series_names : list, optional (default is None)
Names of the timeseries to plot. If set to None, then model plots all of them.
output_filename : str, optional (default is series)
Name of the graphics file containing the plots.
color_dict : dict
Dictionary containing specific colors for the models to plot.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1.
ext : str, optional (default is png)
Extension formal to save the graphics file.
Defines the number of columns to define the plotting array. Function organizes
plots in n_col then
'''
if not folder_main:
folder_main = self.folder_main
metrics_df = pd.read_csv(folder_main + '/_overview/'+ filename)
score_periods = {}
ranks = {}
for metric in metrics:
score_periods[metric] = []
ranks[metric] = getRanks(metrics_df, metric, ids, series_names, periods)
for mod in series_names:
score_periods[metric].append(get_all_periods(metrics_df, mod, metric, periods))
score_periods[metric] = pd.DataFrame(np.hstack(score_periods[metric]), columns=series_names)
f, axarr = plt.subplots(2, len(metrics))
axes = axarr.ravel()
for i, metric in enumerate(metrics):
sns.violinplot(data=score_periods[metric], ax=axes[i], cut=0)
sns.heatmap(data=ranks[metric], ax=axes[i+2], cmap='Reds', cbar=False, annot=True)
axes[i].set_title(metric)
axes[i+2].set_yticklabels(['1th', '2th', '3th', '4th', '5th'], rotation='horizontal')
axes[i+2].set_xticklabels(series_names, rotation='horizontal')
if mode == 'show':
plt.show()
if mode == 'save':
plt.gcf().set_size_inches([7, 4])
plt.subplots_adjust(left=.08, bottom=.09, right=.97, top=.91, wspace=.25, hspace=.20)
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, OVERVIEW_FOLDER, output_filename,ext),fmt=ext)
plt.close()
def group_plot_series(self, ids=None, start_period=None, end_period=None, series_names=None, folder_dir=None, filename='preds.csv', output_filename='series', color_dict=None, alpha_dict=None, mode='save', ext='png', n_col=1):
'''
Gathers information from all region and does a group plot using matplotlib.
regions are ordered in based on the original ordering from the ids list from left to right, top to bottom
Parameters
__________
ids : list
The list containing the identifiers for the regions.
filename : str
String containing the filename to read the series from (using pandas).
start_period : str,
timeseries Pandas dataframe starting index.
end_period : str
timeseries ending index in the pandas dataframe.
n_col : int, optional (default is one)
series_names : list, optional (default is None)
Names of the timeseries to plot. If set to None, then model plots all of them.
output_filename : str, optional (default is series)
Name of the graphics file containing the plots.
color_dict : dict
Dictionary containing specific colors for the models to plot.
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'.
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1.
ext : str, optional (default is png)
Extension formal to save the graphics file.
Defines the number of columns to define the plotting array. Function organizes
plots in n_col then
'''
if folder_dir is None:
folder_dir = self.folder_main
n_ids = len(ids)
n_rows = math.ceil(n_ids/n_col)
fig, axarr = plt.subplots(n_rows,n_col)
axes = axarr.ravel()
for i, id_ in enumerate(ids):
df = pd.read_csv('{0}/{1}/{2}'.format(self.folder_main, id_, filename), index_col=[0])
series = []
index = df[start_period:end_period].index.values
col_names = list(df)
for col in series_names:
if col in col_names:
# create top panel
axes[i].plot(df[start_period:end_period][col].values, label=col)
else:
print('WARNING! {0} not in {1} timeseries list'.format(col, id_))
if color_dict:
for j, l in enumerate(axes[i].get_lines()):
l.set_color(color_dict[series_names[j]])
if alpha_dict:
for j, l in enumerate(axes[i].get_lines()):
l.set_alpha(alpha_dict[series_names[j]])
if i == 0:
axes[i].legend(ncol=len(series_names), frameon=False, loc='upper left', \
bbox_to_anchor=(.0,1.20))
axes[i].text(.80,.9, id_, weight = 'bold', horizontalalignment='left', transform=axes[i].transAxes)
axes[i].spines['top'].set_visible(False)
axes[i].spines['right'].set_visible(False)
if i%n_col == 0:
plt.ylabel('Estimates')
if i > n_col*(n_rows - 1)-1:
time.sleep(3)
axes[i].set_xlabel('Index')
plt.sca(axes[i])
plt.xticks(range(len(index)),index, rotation=45)
plt.gca().xaxis.set_major_formatter(IndexFormatter(index))
plt.gca().xaxis.set_major_locator(mticker.MaxNLocator(6))
#plt.locator_params(nbins=8)
else:
axes[i].set_xticks([])
if mode == 'show':
plt.show()
plt.close()
if mode == 'save':
fig.set_size_inches([5*n_col,3*n_rows])
plt.savefig('{0}/{1}/{2}.{3}'.format(self.folder_main, OVERVIEW_FOLDER, output_filename,ext),fmt=ext)
plt.close()
def merge_models(filename, filename2, output_filename, models=None, start_period=None, end_period=None, erase_duplicates=True):
"""
merges two dataframes for an specified period of time, substracts the models, and stores them in output_filepath.
PARAMETERS:
___________
filename : str,
Path to first dataframe (CSV)
filename2 : str
output_filename : str,
New absolute location of the merged dataframe.
Path to second dataframe
models : list, optional (Default is None)
Name of models to let into the new Dataframe. If set to None, then lets all models in
start_period : str, optional (default is None)
First index in dataframe to merge, if set to None, then grabs the first index of filename's dataframe
end_period : str, optional (default is None)
"""
df1 = pd.read_csv(filename, index_col = [0])
df2 = pd.read_csv(filename2, index_col = [0])
df3 = pd.concat([df1,df2], axis=1)
if start_period and (start_period in df3.index):
pass
elif start_period is None:
start_period = df3.index[0]
else:
print('Unable to identify start_period {0} as valid start reference.\
please review'.format(start_period))
return
if end_period and end_period in df3.index:
pass
elif end_period is None:
end_period = df3.index[-1]
else:
print('Unable to identify end_period {0} as valid start reference.\
please review'.format(start_period))
return
if models is None:
models = df3.columns
df3 = df3[start_period:end_period][models]
if erase_duplicates:
df3=df3.T.drop_duplicates().T
df3.to_csv(output_filename)
def group_barplot_metric(self, ids, metric, period, models, color_dict=None, alpha_dict=None, metric_filename='metrics.csv', bar_separation_multiplier=1.5, mode='save', output_filename=None, plot_domain=None, ext='png', show_values=False, ordering=None):
"""
Produces a bar plot of the desired metric and models for an specific ids.
If looking to make a id group plot, please check group_metric_bargraph()
Parameters
__________
id_: str
Identifier for the region to look for
metric: str
String containing the name of the metric to look for in the predictions file
period: str
Column name containing the values to plot
models: List, optiona (default None)
String list containing the names of the models to plot
color_dict : dict
Dictionary containing specific colors for the models to plot
metric_filename : str, optional (default metrics.csv)
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1
output_filename : str, optional (default is None)
If set to None, output_filename is set metricname_barplot
ext : str, optional (default is png)
Extension formal to save the barplot.
plot_domain : list, optional (default is [0,1])
list of two integers that sets the limits in the plot (plt.xlim)
show_values : boolean, optional (default is False)
plots the values of the metric within the barplot.
bar_separation_multiplier : float, optional (default is 1)
Parameter that functions as multiplier for the separion between bars in the plot.
if set to 1, then bars are plotted in locations 1,2,3... if set to 2, then 2,4,6, etc
"""
if color_dict is None:
color_dict = dict(zip(models, ['b']*len(models)))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1]*len(models)))
values_dict = dict(zip(models, [[] for mod in models]))
indices = []
overview_path = '{0}/{1}'.format(self.folder_main, OVERVIEW_FOLDER)
for i, id_ in enumerate(ids):
indices.append(i*bar_separation_multiplier)
id_path = '{0}/{1}'.format(self.folder_main, id_)
df = pd.read_csv('{0}/{1}'.format(id_path, metric_filename))
df = df[df['METRIC']==metric]
for j, mod in enumerate(models):
try:
values_dict[mod].append(df[df['MODEL']==mod][period].values[0])
except Exception as t:
print(t)
print('\n Missing data in model:{0} for id:{1}'.format(mod, id_))
return
bar_width = 1/len(models)
indices = np.array(indices)
for i, mod in enumerate(models):
heights = values_dict[mod]
bar_positions = indices + bar_width*i
rects = plt.barh(bar_positions, heights, bar_width, label=mod, color=color_dict[mod], alpha=alpha_dict[mod])
if show_values:
for j, rect in enumerate(rects):
yloc = bar_positions[j]
clr = 'black'
p = heights[j]
xloc = heights[j]
plt.gca().text(xloc, yloc, p, horizontalalignment='center', verticalalignment='center', color=clr, weight='bold')
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.title('{0} barplot'.format(metric))
plt.yticks(indices+bar_width*(len(models)/2), ids)
if len(models) > 5:
plt.legend(frameon=False, ncol=1)
else:
plt.legend(frameon=False, ncol=len(models))
if plot_domain:
plt.xlim(plot_domain)
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_barplot'.format(metric)
plt.gcf().set_size_inches([6,15])
plt.savefig('{0}/{1}.{2}'.format(overview_path, output_filename, ext), format=ext)
plt.close()
def barplot_metric(self, id_, metric, period, models=None, color_dict=None, alpha_dict=None, metric_filename='metrics.csv', bar_width=1, bar_separation_multiplier=1, mode='save', output_filename=None, plot_domain=None, ext='png', show_values=True):
"""
Produces a bar plot of the desired metric and models for an specific id.
If looking to make a id group plot, please check group_metric_bargraph()
Parameters
__________
id_: str
Identifier for the region to look for
metric: str
String containing the name of the metric to look for in the predictions file
period: str
Column name containing the values to plot
models: List, optiona (default None)
String list containing the names of the models to plot
color_dict : dict
Dictionary containing specific colors for the models to plot
metric_filename : str, optional (default metrics.csv)
bar_width : float, optional (default is 1)
Bar width in the plots (0 to 1, any more will make bars be on top of each other).
bar_separation_multiplier : float, optional (default is 1)
Parameter that functions as multiplier for the separion between bars in the plot.
if set to 1, then bars are plotted in locations 1,2,3... if set to 2, then 2,4,6, etc
mode : str, optional (default is 'save')
If 'save', then function saves plot on the id_ specific folder.
if 'show, then function plots and used plt.show to pop up the plot real time'
alpha_dict : dict, optional (default is None)
dictionary specifying the opacity of the bars in the plot (alpha argument in matplotlib).
If set to None, then all opacities are set to 1
output_filename : str, optional (default is None)
If set to None, output_filename is set metricname_barplot
ext : str, optional (default is png)
Extension formal to save the barplot.
plot_domain : list, optional (default is [0,1])
list of two integers that sets the limits in the plot (plt.xlim)
show_values : boolean, optional (default is False)
plots the values of the metric within the barplot.
"""
if id_ in self.ids:
id_path = '{0}/{1}'.format(self.folder_main, id_)
df = pd.read_csv('{0}/{1}'.format(id_path, metric_filename))
df = df[df['METRIC']==metric]
if models is None:
models = df['MODEL'].values
if color_dict is None:
color_dict = dict(zip(models, ['b']*len(models)))
if alpha_dict is None:
alpha_dict = dict(zip(models, [1]*len(models)))
indices = []
for i, mod in enumerate(models):
height = df[df['MODEL']==mod][period].values[0]
indices.append(i*bar_separation_multiplier)
rects = plt.barh(indices[i], height, bar_width, color=color_dict[mod], alpha=alpha_dict[mod])
if show_values:
for rect in rects:
yloc = indices[i]
clr = 'black'
p = height
xloc = height
plt.gca().text(xloc, yloc, p, horizontalalignment='center', verticalalignment='center', color=clr, weight='bold')
else:
print(' {1} ID is not in data. current ids : {0}'.format(self.ids, id_))
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.title('{0} barplot'.format(metric))
plt.yticks(indices, models)
plt.legend(frameon=False, ncol=len(models))
if plot_domain:
plt.xlim(plot_domain)
if mode == 'show':
plt.show()
elif mode == 'save':
if output_filename is None:
output_filename = '{0}_barplot'.format(metric)
plt.gcf().set_size_inches([10,7])
plt.savefig('{0}/{1}.{2}'.format(id_path, output_filename, ext), format=ext)
plt.close()
def extract_metrics(self, ids = None, filename = None, folder_main = None, \
metrics=['RMSE', 'PEARSON'], models= ['AR12GO'], seasons=None):
if ids is None:
ids = self.ids
if folder_main is None:
folder_main = self.folder_main+'/'+OVERVIEW_FOLDER
if filename is None:
filename = CSV_METRICS_CONDENSED
df = pd.read_csv(folder_main+'/'+filename, index_col=0)
if models is not None:
df = df[main_df['MODEL'].isin(models)]
if metrics is not None:
df = df[main_df['METRIC'].isin(metrics)]
extracted_df.to_csv(folder_main + '/metrics_extracted.csv')
def group_compute_metrics(self, intervals, interval_labels, which_ids = 'all_ids', \
which_metrics = ['PEARSON', 'RMSE'], remove_missing_values=[0.5, 0], input_file_name=None, output_file_name=None, \
verbose=False, target = None, write_to_overview=False):
'''
For each of the ids analyzed, computes a set of metrics (metris available in the metric_handler variable).
Input:
intervals = a binary tuple list with start and end labels.
interval_labels
which_ids = list that specifies which of the ids to compute the metrics for, if not, computes for all.
which_metrics = (dictionary with per-id lists or list) Specifies which metrics to compute for the ids.
remove_missing_values = list of float numbers to assig which values to ignore in the metric computation
'''
if input_file_name is None:
input_file_name = ID_PREDS
if output_file_name is None:
output_file_name = ID_METRICS
if target is None:
target = TARGET
if which_ids == 'all_ids':
which_ids = self.ids
else:
for i, id_ in enumerate(which_ids):
if id_ not in self.ids:
which_ids.remove(id_)
print('{0} not found in experiment object ids. Removing Please check.'.format(id_))
if isinstance(intervals, list) and isinstance(intervals[0], tuple):
print('Non-specified id intervals received. Using intervals in all ids')
intervals = dict(which_ids, [intervals]*len(which_ids))
elif isinstance(intervals, dict) and isinstance(interval_labels, dict):
for i, id_ in enumerate(which_ids):
try:
if len(intervals[id_]) != len(interval_labels[id_]):
print(' WARNING! Mismatch between interval and interval_labels in id: {0}.'.format(id_))
interval_labels[id_] = []
for i in range(0,len(interval_labels)):
interval_labels[id_].append('s{0}'.format(i))
except KeyError:
print('ID not found within interval data. Please review.')
else:
print('Mismatch between intervals and interval labels types (Non-dictionaries). Please check input parameters')
return
if write_to_overview: id_dfs = []
if verbose: print('Reading on {0}'.format(self.folder_main))
for id_folder in os.listdir(self.folder_main):
if id_folder in which_ids:
file_preds = self.folder_main + '/' + id_folder + '/' + input_file_name
if verbose: print('Checking for data in {0}'.format(file_preds))
if os.path.exists(file_preds):
preds_pd = pd.read_csv(file_preds, index_col = 0)
if verbose:
print('Successfully loaded preds \n \n {0}'.format(preds_pd))
time.sleep(10)
id_interval = intervals[id_folder]
id_interval_labels = interval_labels[id_folder]
model_list = list(preds_pd)
model_list.remove(target)
metric_dict = {}
if verbose:
print('id: {0} \nid_intervals: {1}\n id_interval_labels{2}\n Models_available{3}'.format(id_folder, id_interval, id_interval_labels, model_list))
time.sleep(10)
# generating multi index for pandas dataframe
levels = [which_metrics, model_list]
labels = [[], []]
names = ['METRIC', 'MODEL']
for i, (start_interval, end_interval) in enumerate(id_interval):
metric_dict[id_interval_labels[i]] = []
sub_pd = copy.deepcopy(preds_pd[start_interval:end_interval])
for j, metric in enumerate(which_metrics):
for k, model in enumerate(model_list):
model_timeseries = sub_pd[model].values
target_s = sub_pd[target].values
if remove_missing_values:
model_timeseries, target_s = timeseries_rmv_values(model_timeseries, target_s, remove_missing_values)
#print(preds_pd, model_timeseries, target_s)
#time.sleep(100)
val = metric_handler[metric](model_timeseries, target_s)
metric_dict[id_interval_labels[i]].append(val)
if i == 0:
labels[0].append(j)
labels[1].append(k)
ind = pd.MultiIndex(levels=levels, labels=labels, names=names)
metric_pd = pd.DataFrame(metric_dict, index = ind)
#print(metric_pd)
metric_pd.to_csv(self.folder_main + '/' + id_folder + '/' + output_file_name )
metric_pd['ID'] = np.array([id_folder]*len(labels[0]))
if write_to_overview : id_dfs.append( metric_pd.set_index('ID', append=True, inplace=False))
else:
print('Not able to find results file for {0}. Please check your folder'.format(id_folder))
print('Finished iterating over all ids. Writing out condensed file in {0} folder'.format(OVERVIEW_FOLDER))
if write_to_overview:
id_dfs = pd.concat(id_dfs)
id_dfs.to_csv(self.folder_main+ '/' + OVERVIEW_FOLDER + '/' + CSV_METRICS_CONDENSED)
def efficiency_test(self, id_folder, periods, period_labels,\
model_to_compare, ignore_models=['GFT'],\
confidence_interval=.90, samples = 10000, p=1./52, filename = None,
output_file_name=None, remove_values=None, write=True, op='MSE'):
'''
Performs efficiency test based on politis and romano 1994
https://www.tandfonline.com/doi/abs/10.1080/01621459.1994.10476870
'''
# create path to csv file
if filename is None:
file_path = id_folder +'/'+ID_PREDS
else:
file_path = id_folder +'/'+ filename
if output_file_name is None:
output_file_name = ID_EFFTEST
# load columns
preds_pd = pd.read_csv(file_path, index_col=0)
if remove_values is not None:
rmv_values(preds_pd, ignore = remove_values, verbose = True)
bbts = {}
model_names = list(preds_pd)
model_names.remove(TARGET)
# Removing movels in ignore_models
for i, model in enumerate(ignore_models):
if model in model_names:
del preds_pd[model]
model_names.remove(model)
n_models = len(model_names)
#multiindexing
levels = [['BBT', 'lower_bound', 'upper_bound'], model_names]
labels = [ [0]*n_models + [1]*n_models + [2]*n_models, list(range(n_models))*3 ]
names =['Efficiency_test', 'Model']
ind = pd.MultiIndex(levels = levels, labels = labels, names = names)
print('Computing the efficiency test on {0}'.format(id_folder))
#Main computation loop
for i, period in enumerate(periods):
print('Computing the efficiency test on {0}'.format(period))
bbts[period_labels[i]] = self.bbt(preds_pd,\
model_to_compare=model_to_compare,period=period, \
confidence_interval=confidence_interval, \
samples = samples, p = p, op=op)
eff_pd = pd.DataFrame(bbts, index =ind)
if write:
eff_pd.to_csv(id_folder + '/' + output_file_name )
return eff_pd
def bbt(self, preds_pd, model_to_compare='AR12',\
seed='random', period=('2012-01-01', '2016-12-25'), verbose = True,\
samples=10000, p=1./52, confidence_interval=.90, op='MSE'):
'''
Performs timeseries bootstrap to calculate the ratio of MSEs between model_to_compare and all other available models
Inputs:
preds_pd = pandas dataframe containing the data to analyze
model_to_compare = Str naming the model (must be inside the csv file)
seed = specify a numpy and random seed, otherwise use 'random' to not use any
period = a 2 tuple containing the first index and last index comprising the period to analyze
'''
if isinstance(seed,int):
random.seed(seed)
np.random.seed(seed)
model_names = list(preds_pd)
if verbose == True:
print('Successfully loaded dataframe. \n \n Target name in Config : {0} \n \n model_names found: {1}'.format(TARGET, model_names))
model_preds = []
model_names.remove(TARGET)
# Always place target preds at start
for i, model in enumerate(model_names):
if i == 0:
model_preds.append(preds_pd[TARGET][period[0]:period[1]])
model_preds.append(preds_pd[model][period[0]:period[1]])
methods = np.column_stack(model_preds)
n_models = len(model_names)
eff_obs = np.zeros(n_models)
# calculate observed relative efficiency
for i in range(n_models):
eff_obs[i] = metric_handler[op](methods[:, 0], methods[:, i + 1])
eff_obs = eff_obs / eff_obs[model_names.index(model_to_compare)]
# perform bootstrap
scores = np.zeros((samples, n_models))
for iteration in range(samples):
# construct bootstrap resample
new, n1, n2 = sbb.resample(methods, p)
# calculate sample relative efficiencies
for model in range(n_models):
scores[iteration, model] = metric_handler[op](new[:, 0], new[:, model + 1])
scores[iteration] = scores[iteration] / scores[iteration,model_names.index(model_to_compare)]
if op == 'PEARSON':
eff_obs = 1/eff_obs
scores = 1/scores
# define the variable containing the deviations from the observed rel eff
scores_residual = scores - eff_obs
# construct output array
report_array = np.zeros(3*n_models)
for comp in range(n_models):
tmp = scores_residual[:, comp]
# 95% confidence interval by sorting the deviations and choosing the endpoints of the 95% region
tmp = np.sort(tmp)
ignored_tail_size = (1-confidence_interval)/2
report_array[comp] = eff_obs[comp]
report_array[n_models*1+comp] = eff_obs[comp] + tmp[int(round(samples * (0.0+ignored_tail_size)))]
report_array[n_models*2+comp] = eff_obs[comp] + tmp[int(round(samples * (1.0-ignored_tail_size)))]
return report_array
def mat_load(self, state_dir = None, filename = None, dates_bol = True, verbose = True):
if state_dir is not None:
self.state_dir = state_dir
if filename is not None:
self.filename = filename
self.preds_pd = pd.read_csv(self.state_dir + self.filename + '_preds.csv')
self.metrics_pd = pd.read_csv(self.state_dir + self.filename + '_table.csv')
if dates_bol == True:
self.dates = self.preds_pd['Week'].values
del self.preds_pd['Week']
self.model_names = list(self.preds_pd)
self.season_names = list(self.metrics_pd)
# Removing Gold standard from model list and 'metrics' from metrics list
self.target_name = self.model_names[0]
self.model_names.remove(self.target_name)
del self.season_names[0]
self.ranking = self.model_names
self.target_pred = self.preds_pd[self.target_name].values
print('Loaded data for : {0} \n Models found : {1} \n Seasons found : {2} \n \n '.format(self.state_name, self.model_names, self.season_names))
def mat_metricRank(self, metric = None, season = None, verbose = False):
if metric is not None:
self.ranking_metric = metric
if season is not None:
self.ranking_season = season
if verbose == True:
print('Ranking models based on {0} metric for {1} season. \n \n'.format(self.ranking_metric, self.ranking_season))
metrics_pd = self.metrics_pd
model_names = self.model_names
n_models = np.size(model_names)
if verbose == True:
print('Number of models found = {0}'.format(n_models))
season_names = self.season_names
# Check if metric is in
metrics_and_models = list(metrics_pd['Metric'].values)
season_vals = metrics_pd[self.ranking_season].values
if verbose == True:
print('Table metric and models list : \n', metrics_and_models)
print('Season Values : \n ', season_vals)
if self.ranking_metric in metrics_and_models:
i = metrics_and_models.index(self.ranking_metric)
metric_column = season_vals[i+1:i+n_models+1]
self.ranking_values = metric_column
#metric_column = mat_metricColumn(metrics_pd, self.ranking_metric, self.ranking_season, n_models, verbose)
# Sorted default ordering is minimum to maximum. For correlations we look for highest positive (??).
if self.ranking_metric == 'PEARSON':
metric_column *= -1
# To compare RMSEs values have to be normalized based on gold standard's MS value.
if self.ranking_metric == 'RMSE':
metric_column /= np.sqrt(np.mean(np.power(self.target_pred,2)))
ranking = [model for (metric, model) in sorted(zip(metric_column, model_names), key=lambda pair: pair[0])]
if verbose == True:
print('Old Ranking: {0} \n Values for metric: {2} \n New ranking: {1} \n \n'.format(self.ranking, ranking, self.ranking_values ))
self.ranking = ranking
else:
print('Ranking metric not available. Please use another metric')
def mat_predsAndErrors(self, which_rank = None, verbose = False, start_season = 3, n_top_models = None ,dates_bol = True, gft_bol = True):
'''
Makes time series, error, and % error plot for the specified number of models. Models are chosen based on the ranking.
-If there is no ranking, it plots the models in the order they were written on the prediction csv.
-If the specified number is bigger than the number of models available, it plots all. '''
# Get predictions for the top n
if n_top_models is None:
n_top_models = self.n_top_models
ranking = self.ranking
model_names = self.model_names
preds_pd = self.preds_pd
gold_std = self.target_pred
n_lags = self.n_lags
gstd_rmse = np.sqrt(np.mean(np.power(gold_std,2)))
season_indices = self.season_indices - n_lags
season_indices[season_indices<0] = 0
season_names = self.season_names
if season_indices[1] < 1:
print('Warning! Season indices values may conflict with code', season_indices)
ax2_limit = 2
season_names.remove('ALL_PERIOD')
if gft_bol == True:
season_names.remove('GFT_PERIOD')
if which_rank is None:
which_rank = range(0, n_top_models)
if np.amax(which_rank) > len(ranking)-1:
print('Not accessible rank detected {0}. Please modify value'.format(np.amax(which_rank)))
time.sleep(2)
return
if len(which_rank) > n_top_models:
n_top_models = len(which_rank)
if verbose == True:
print('Initializing predsAndErrors function with following values: \n Ranking = {0}, \n Season names = {1} \n Gold standard MS = {2} \n Start season ={3} \n which_rank = {4}'.format(ranking, season_names, gstd_rmse, start_season, which_rank))
print ('Gold standard shape {0} \n, season indices'.format(np.shape(gold_std), season_indices))
time.sleep(2)
# Adjusting plot ticks and plot length
stind = self.season_indices[(start_season-1)*2]-1
season_indices -= stind
plot_names = [ranking[i] for i in which_rank]
if verbose == True:
print('start index {0} \n season_indices {1} \n plot_names ={2} '.format(stind, season_indices, plot_names))
time.sleep(2)
# Create figure and axes
fig = plt.figure()
ax = plt.subplot(4, 1, (1, 2))
ax1 = plt.subplot(4, 1, 3)
ax2 = plt.subplot(4, 1, 4)
# Plot gold standard
ax.plot(gold_std[stind:],color='.20', linewidth=6.0, label=self.target_name, alpha = 1)
# Compute error and percent error then plot model % CHANGe
for i in range(0, n_top_models):
series = preds_pd[plot_names[i]].values
series_err = series - gold_std
series_errp = np.divide(series_err,gold_std)
series_rmse = np.sqrt(np.mean(np.power(series_err,2)))
norm_rmse = (series_rmse/gstd_rmse)
series_errp[np.isinf(series_errp) ] = float('NaN')
if ax2_limit < norm_rmse:
ax2_limit = norm_rmse
'''
ax.plot(series[stind:], linewidth=2, label=plot_names[i] , alpha = .6)
ax1.plot(series_err[stind:], linewidth=3, label=plot_names[i] , alpha = .6)
ax2.plot(series_errp[stind:], linewidth=2, label=plot_names[i] , alpha = .6)
'''
# TEMP FOR TOP 3
if i == 0:
ax.plot(series[stind:], color='b',label=plot_names[i] , alpha = .8,linewidth=2.3)
ax1.plot(series_err[stind:], color='b', label=plot_names[i] , alpha = .8,linewidth=2.3)
ax2.plot(series_errp[stind:], color='b', label=plot_names[i] , alpha = .8,linewidth=2.3)
if i == 1:
ax.plot(series[stind:], color = 'r',label=plot_names[i] , alpha = .6,linewidth=2.5)
ax1.plot(series_err[stind:], color = 'r', alpha = .6,linewidth=2.5)
ax2.plot(series_errp[stind:], color = 'r', alpha = .6,linewidth=2.5)
if i == 2:
ax.plot(series[stind:], color ='.75', label=plot_names[i] , alpha = .9,linewidth=1.5, linestyle = 'dashed')
ax1.plot(series_err[stind:], color ='.75', alpha = .9,linewidth=1.5, linestyle = 'dashed')
ax2.plot(series_errp[stind:], color ='.75', alpha = .9,linewidth=1.5, linestyle = 'dashed')
if gft_bol == True:
GFTSERIES = preds_pd['GFT'].values
GFTSERIES = GFTSERIES[:,np.newaxis]
ax.plot(GFTSERIES[stind:], color='g', label = 'GFT', linewidth=2.5)
# Add format to plots
ax.tick_params(labelsize=14)
ax1.tick_params(labelsize=14)
ax2.tick_params(labelsize=14)
ax.grid(linestyle = 'dotted', linewidth = .8)
ax1.grid(linestyle = 'dotted', linewidth = .8)
ax2.grid(linestyle = 'dotted', linewidth = .8)
ax.set_ylabel('ILI activity', fontsize = 16)
ax1.set_ylabel('Error', fontsize = 16)
ax2.set_ylabel(' Error (Normalized)', fontsize = 14)
x= gold_std
x[np.isnan(x)]=0
ax.set_ylim([-np.max(x)*.05,np.max(x)*1.05])
ax2.set_ylim([-ax2_limit,ax2_limit]) # Change limits based on mean of error mean above and below zero.
ax1.axhline(0, 0, 1, color='gray')
ax2.axhline(0, 0, 1, color='gray')
ax.set_title('{0}'.format(self.state_name), fontsize = 16, weight='bold')
ax.legend( fontsize = 14)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
# Not good, change
# Yearly
n_seasons = len(season_indices)/2
tick_indices = [season_indices[i*2][0] for i in range(start_season-1, n_seasons)]
# Seasonal
# tick_indices = [season_indices[i][0] for i in range((start_season-1)*2, len(tick_indices))]
if dates_bol == True:
tick_labels = self.dates[stind+tick_indices]
else:
tick_labels = sorted(season_names[start_season-1:]*2)
ax.set_xticks(tick_indices, minor=False)
ax1.set_xticks(tick_indices, minor=False)
ax2.set_xticks(tick_indices, minor=False)
ax2.set_xticklabels(tick_labels, minor=False, rotation=75)
# save figure
fig = plt.gcf()
fig.set_size_inches(12, 9)
fig.savefig(self.state_dir+'/pyplots/{0}/series_errs.png'.format(self.filename) , format='png', dpi=300)
# fig.savefig(self.state_folder+'/_pyplots/_series_errs_'+ self.state_name+'_'+self.filename+ '.png', format='png', dpi=300)
def mat_metric_bargraph(self, verbose = False, which_metric=['RMSE', 'PEARSON']):
# Get table dataframe
rfile = self.svars.mat_ST_RANKS
vfile = self.svars.mat_ST_RANKS_V
color_list = self.svars.colors
ranking_pd = pd.read_csv(self.state_dir + rfile, index_col =0)
values_pd = pd.read_csv(self.state_dir + vfile, index_col =0)
for i, metric in enumerate(which_metric):
r = ranking_pd[metric].values
v = values_pd[metric].values
if i == 0:
#color_dict = color_assign(r,color_list)
color_dict = self.svars.STATE_COLOR_DICT
orderedColors = []
for k, model in enumerate(r):
orderedColors.append(color_dict[model])
fig = plt.figure() # CHANGE SO IT PLOTS EVERYTHING IN ONE GRAPH, GIVE FORMAT
objects = r
y_pos = np.arange(len(objects))
performance = v
plt.bar(y_pos, performance, align='center', alpha=0.8, color= orderedColors)
plt.xticks(y_pos, objects, rotation=90)
if metric == 'RMSE':
ylab = 'RMSE / gold standard MS.'
elif metric == 'PEARSON':
ylab = 'Coefficient value.'
plt.ylim([np.min(v)*.7,1])
plt.ylabel(ylab)
plt.title('{0} for {1}'.format(metric, self.state_name), fontsize = 16)
plt.grid(linestyle = 'dotted', linewidth = .8)
fig.set_size_inches(7 , 9)
fig.savefig(self.state_dir+'/pyplots/{0}/bar_'.format(self.filename)+ metric+ '.png', format='png', dpi=300)
#fig.savefig(self.state_folder+'/_pyplots/_bars_'+ self.state_name+'_'+metric+'_'+self.filename+ '.png', format='png', dpi=300)
plt.close()
def mat_all_ranks2csv(self, verbose = False, which_metric = ['PEARSON', 'RMSE']):
# Get table dataframe
metrics_pd = self.metrics_pd
model_names = self.model_names
season = self.ranking_season
n_models = len(model_names)
gold_std = self.target_pred
ranking_dict = {}
values_dict = {}
if verbose == True:
print('Initializing function ranks2csv with following parameters: \n metrics_pd: {0} \n model_names: {1} \n season: {2} \n n_models: {3}'.format(metrics_pd, model_names, season, n_models))
for i,metric in enumerate(which_metric):
print('Looking for {0} values'.format(metric))
metric_column = mat_metricColumn(metrics_pd, metric, season, n_models, verbose)
ranking, values =mat_getRank(model_names, metric_column, metric, verbose = False, gold_std = gold_std)
if verbose == True:
print('Metric = {0} \n metric_column = {1} \n model_names {2} \n ranking: {3} \n \n \n'.format(metric, metric_column, model_names, ranking))
time.sleep(2)
ranking_dict[metric] = ranking
values_dict[metric] = values
ranking_pd = pd.DataFrame(ranking_dict)
values_pd = pd.DataFrame(values_dict)
if verbose == True:
print('Ranking table looks like this : {0}. \n'.format(ranking_pd))
print('Writing down data')
ranking_pd.to_csv(self.state_dir + '/mat_metric_rankings.csv')
values_pd.to_csv(self.state_dir + '/mat_metric_values.csv')
def mat_eff_matrix_plot(self, bbt_file_name ='BBT_mse_matrix.csv', verbose = False, save_name = None ):
# Plots values for efficiency matrix models specified in the horizontal direction of matrix
eff_matrix_pd = pd.read_csv(self.state_dir + bbt_file_name, index_col =0)
model_names_columns = list(eff_matrix_pd)
model_names_rows = list(eff_matrix_pd.transpose())
if verbose == True:
print(model_names_columns, model_names_rows)
time.sleep(2)
n_model_names_columns = len(model_names_columns)
n_model_names_rows = len(model_names_rows)
n_div = n_model_names_rows -1
average_eff = []
# Entering summing loop
for c, model_c in enumerate(model_names_columns):
sum_eff = 0
if verbose == True:
print(c, model_c)
time.sleep(1)
for r, model_r in enumerate(model_names_rows):
if verbose == True:
print(r,model_r)
time.sleep(1)
if model_r not in model_c:
sum_eff += eff_matrix_pd.get_value(model_r, model_c, takeable=False)
average_eff.append(sum_eff/n_div)
saving_dir = self.state_dir+'/pyplots/bbt_'
if save_name is not None:
saving_dir += save_name
saving_dir += self.filename
average_eff = np.vstack(average_eff)
average_eff[average_eff > 2] == 2
bar_graph(model_names_columns , average_eff, '(No units)', 'Average efficiency', saving_dir )
def mat_eff_matrix(self, filename='', period='all'):
random.seed(2)
np.random.seed(2)
state_name = self.state_name
RESULTS_DIR = self.state_folder
gold_std_name = self.target_name
verbose = True
# load columns
preds_pd = self.preds_pd
model_names = list(preds_pd)
if verbose == True:
print('Successfully loaded dataframe. \n \n gold_standard_name {0} \n \n model_names {1}'.format(gold_std_name, model_names))
model_preds = []
for i, model in enumerate(model_names):
if period == 'gft':
model_preds.append(preds_pd[model].values[143:241])
elif period == 'all':
# check target for zeros
if i == 0:
mask = (preds_pd[model].values == 0)
model_preds.append(preds_pd[model].values[~mask])
model_names.remove(gold_std_name)
# comment or uncomment the following blocks for either WEEK 1 or WEEK 2 horizon.
horizon = 0
#methods = np.column_stack((target_1, ar_1, argo_1))
methods = np.column_stack(model_preds)
# implementation
p = 1./52
samples = 10000
n_models = methods.shape[1] - 1
efficiency_matrix = np.zeros([n_models, n_models])
# calculate observed relative efficiency
for ii in range(n_models):
eff_obs = np.zeros(n_models)
for i in range(n_models):
eff_obs[i] = mse(methods[:, 0], methods[:, i + 1])
eff_obs = eff_obs / eff_obs[ii]
# perform bootstrap
scores = np.zeros((samples, n_models))
for iteration in range(samples):
# construct bootstrap resample
new, n1, n2 = sbb.resample(methods, p)
# calculate sample relative efficiencies
for model in range(n_models):
scores[iteration, model] = mse(new[:, 0], new[:, model + 1])
scores[iteration] = scores[iteration] / scores[iteration, -1]
# define the variable containing the deviations from the observed rel eff
scores_residual = scores - eff_obs
# construct output array
report_array = np.zeros((3,n_models))
for comp in range(n_models):
tmp = scores_residual[:, comp]
# 95% confidence interval by sorting the deviations and choosing the endpoints of the 95% region
tmp = np.sort(tmp)
report_array[0, comp] = eff_obs[comp]
report_array[1, comp] = eff_obs[comp] + tmp[int(round(samples * 0.050))]
report_array[2, comp] = eff_obs[comp] + tmp[int(round(samples * 0.950))]
efficiency_matrix[:,ii] = report_array[0,:].reshape(n_models)
print(efficiency_matrix)
efficiency_matrix_pd = pd.DataFrame(data = efficiency_matrix, index=model_names, columns = model_names)
efficiency_matrix_pd.to_csv(RESULTS_DIR + state_name + '/' + 'BBT_mse_matrix.csv')
def insert_gft(self):
preds_pd = pd.read_csv(self.state_dir + self.filename + '_preds.csv')
gft_pd = pd.read_csv(self.state_dir+'GFT_scaled.csv')
preds_pd['GFT'] = gft_pd['GFT']
print(preds_pd)
preds_pd.to_csv(self.state_dir + self.filename + '_preds.csv')
def term_scatters(self, n_terms = 8, start_date = '2010-01-03', end_date = '2016-12-25',start_window = 26,\
plot_date = ['2012-01-01', '2016-12-25'], window = 'extending', verbose = False):
# Makes a scatter and correlation plot for the top n_terms based on average correlation during whole period specified by
# start_date and end_date. Correlation starts a number of values specified by start_window
# 1.- <NAME>
window_size =104
terms_pd = pd.read_csv(self.state_dir + self.state_name + self.svars.TERM_FILE)
del terms_pd['GFT']
''' Remove terms
terms_pd.fillna(0, inplace=True)
print len(term_names)
for i,name in enumerate(term_names):
series = terms_pd[name].values
nzeros = np.size(series[series == 0])
zvector = np.size(np.zeros_like(series))
print i
if nzeros > zvector*.5:
terms_pd =terms_pd.drop(name, axis =1)
term_names = list(terms_pd)
'''
# Adding auto regression terms
gstd = terms_pd['ILI'].values
for i in range(0,12):
ar_term = np.hstack([gstd[1+i:len(gstd)], np.zeros(1+i)])
terms_pd['AR{0}'.format(1+i)] =
|
pd.Series(ar_term, index=terms_pd.index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 12:55:16 2017
@author: rdk10
"""
import os
import pandas as pd
import sitchensis.Functions as f
import tkinter as tk
from tkinter.filedialog import askopenfilename
import pdb
############# Functions below #############################################
def getFileName():
cwd = os.getcwd()
root = tk.Tk()
root.lift()
root.attributes('-topmost',True)
filename = askopenfilename(initialdir = cwd ,title = "Select a tree file and directory", filetypes = [("Excel","*.xlsx"),("Excel","*.xlsm")]) #Ask user to pick files
root.after_idle(root.attributes,'-topmost',False)
root.withdraw()
fileName = filename.rsplit('/')[-1] #Excludes path to file
filePath = os.path.dirname(filename)
return(filePath, fileName)
def importExcelTree(fullFileName):
"""This section assumed one excel file per tree with a tabe for each type of measurements (trunk, segment, or branch)"""
#Import data all at once
treeData = pd.read_excel(fullFileName, sheet_name = None) #,converters={'name':str,'ref':str, 'referenceType':str})
####IMPORTANT if version of pandas is <21 then sheet_name is not recognized and needs to be sheetname. better to update pandas
#list of dictionary keys
keys = [key for key in treeData]
#This tests for types of data present and assigns keys
if any(['trunk' in t.lower() for t in treeData]):
trunkKey = keys[[i for i, key in enumerate(keys) if 'trunk' in key.lower()][0]]
if len(treeData[trunkKey])>0:
trunkBool = True
else:trunkBool = False
else:trunkBool = False
if any(['seg' in t.lower() for t in treeData]):
segKey = keys[[i for i, key in enumerate(keys) if 'seg' in key.lower()][0]]
if len(treeData[segKey])>0:
segBool = True
else:segBool = False
else:segBool = False
if any(['branch' in t.lower() for t in treeData]):
brKey = keys[[i for i, key in enumerate(keys) if 'branch' in key.lower()][0]]
if len(treeData[brKey])>0:
branchBool = True
else:branchBool = False
else:branchBool = False
#Assign declination to variable
if any(['declin' in t.lower() for t in treeData]):
if len([i for i, key in enumerate(keys) if 'declin' in key.lower()])>0:
declinKey = keys[[i for i, key in enumerate(keys) if 'declin' in key.lower()][0]]
declinRefs = pd.read_excel(fullFileName, sheet_name = declinKey ,converters={'name':str})
declinRefs.columns = [x.lower() for x in declinRefs.columns]
declination = declinRefs['declination'].iloc[0] #extract number
declination = declination.item() # convert to python float from numpy.float64
else:
declination = 0.00
#Assign cust refs to dataFrame
if len([i for i, key in enumerate(keys) if 'cust' in key.lower()])>0:
custKey = keys[[i for i, key in enumerate(keys) if 'cust' in key.lower()][0]]
custRefs = pd.read_excel(fullFileName, sheet_name = custKey ,converters={'name':str})
custRefs.columns = [x.lower() for x in custRefs.columns]
custRefs['azi'] = custRefs['azi'] + declination
custRefs = f.calcCustRefs(custRefs)
else:
custRefs = []
#Saves the data if it exists and makes changes to columns so they work in the program
if trunkBool:
trunkDat = pd.read_excel(fullFileName, sheet_name = trunkKey, converters={'name':str,'ref':str})#, 'ref type':str})
trunkDat.columns = [x.lower() for x in trunkDat.columns]
trunkDat['name'] = trunkDat['name'].str.upper()
trunkDat['name'] = trunkDat['name'].str.replace(" ","")
trunkDat['azi'] = trunkDat['azi'] + declination
if any(pd.isnull(trunkDat.index)):
trunkDat = trunkDat.reset_index(drop = True)
if segBool:
segs = pd.read_excel(fullFileName, parse_dates = False, sheet_name = segKey, converters={'name':str,'O/E':str,'base ref':str, 'top ref':str,'midsegment ref':str})
segs.columns = [x.lower() for x in segs.columns]
segs['name'] = segs['name'].str.replace(" ","")
if segs['base azi'].dtype == 'O':
print("Make sure there is no text in the 'base azi' column such as 'CALC' \n or there will be problems later")
else:
segs['base azi'] = segs['base azi'] + declination
segs['top azi'] = segs['top azi'] + declination
if any(pd.isnull(segs.index)):
segs = segs.reset_index(drop = True)
if 'base ht' in segs.columns and 'top ht' in segs.columns:
segs['base z'] = segs['base ht']
segs['top z'] = segs['top ht']
else:
print("Warning: you must have segment columns labeled 'base ht' and 'top ht'")
if any(pd.isnull(segs['name'])):
print('These is at least one missing name in the segments file, please rectify this.')
names = f.splitName(segs['name'])
segs['top name'] = names['topNames']
segs['base name'] = names['baseNames']
if branchBool:
branches = pd.read_excel(fullFileName, parse_dates = False , sheet_name = brKey, converters={'name':str,'O/E':str, 'L/D':str,'origin':str,'base ref':str, 'top ref':str,'midsegment ref':str})
branches.columns = [x.lower() for x in branches.columns]
branches['name'] = branches['name'].str.replace(" ","")
branches['origin'] = branches['origin'].str.replace(" ","")
branches['orig azi'] = branches['orig azi'] + declination
branches['cent azi'] = branches['cent azi'] + declination
if any(pd.isnull(branches.index)):
branches = branches.reset_index(drop = True)
if 'base ht' in branches.columns and 'top ht' in branches.columns:
branches['base z'] = branches['base ht']
branches['top z'] = branches['top ht']
else:
print("Warning: you must have a branch columns labeled 'base ht' and 'top ht'")
if trunkBool == True:
if segBool == False and branchBool == False:
mapType = 'trunk map'
treeData = {'trunk':trunkDat}
elif segBool == True and branchBool == False: #There are trunks only
mapType = 'segement map'
treeData = {'trunk':trunkDat, 'segments':segs}
elif segBool == False and branchBool == True:
mapType = 'trunk and branch map'
treeData = {'trunk':trunkDat, 'branches':branches}
elif segBool == True and branchBool == True:
mapType = 'full map'
treeData = {'trunk':trunkDat, 'segments':segs, 'branches': branches}
else:
print('There were not trunk data specified, also check your version of pandas, you need version 21 or greater.')
return(treeData, custRefs, mapType)
def renameNotesCol(treeData, treeName, newname = 'notes'):
#logFileName = '{0}_ErrorScan.txt'.format(treeName)
for data in treeData:
i = 0
for col in treeData[data].columns:
if 'notes' in col:
ind = i
i = i + 1
#pdb.set_trace()
currentName = treeData[data].columns.values[ind]
treeData[data] = treeData[data].rename(columns = {currentName:newname})
#textout = 'The notes column labeled {0} for {1} data was changed to "{2}"'.format(currentName, data, newname)
#f.print2Log(logFileName, textout)
noteColChanges = {'treePart':data , 'oldNoteName': currentName, 'renamedTo':newname}
return (treeData, noteColChanges)
def excelExport(treeData, outPath, treeName):
#Brings in raw data and fileName (with full path), and exports and excel file to that location and name
#Appends the current date in daymonthyear format.
date = pd.Timestamp("today").strftime("%d%b%Y").lstrip('0')
#segs.name = segs.name.apply(repr)
#branches.origin = branches.origin.apply(repr)
#branches.to_csv("brFromNode.csv")
#This tests for types of data present
trunkBool = any(['trunk' in t.lower() for t in treeData])
segBool = any(['segment' in t.lower() for t in treeData] )
branchBool = any(['branch' in t.lower() for t in treeData] )
# Create a Pandas Excel writer using XlsxWriter as the engine.
#Makes new directory for output files
writer = pd.ExcelWriter('{0}_{1}.xlsx'.format(outPath + '/' + treeName, date), engine='xlsxwriter')
#Need to make this prettier
# Write each dataframe to a different worksheet.
if trunkBool:
treeData['trunk'].to_excel(writer, sheet_name='main trunk', index = False)
if segBool:
treeData['segments'].to_excel(writer, sheet_name='segments', index = False)
if branchBool:
treeData['branches'].to_excel(writer, sheet_name='branches', index = False)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
def importForAcadTree(fileName):
"""This section assumed one excel file per tree with a tabe for each type of measurements (trunk, segment, or branch)"""
#Import data all at once
treeData = pd.read_excel(fileName, sheet_name = None) #,converters={'name':str,'ref':str, 'referenceType':str})
#list of dictionary keys
keys = [key for key in treeData]
#This tests for types of data present and assigns keys
if any(['trunk' in t.lower() for t in treeData]):
trunkKey = keys[[i for i, key in enumerate(keys) if 'trunk' in key.lower()][0]]
if len(treeData[trunkKey])>0:
trunkBool = True
else:trunkBool = False
else:trunkBool = False
if any(['seg' in t.lower() for t in treeData]):
segKey = keys[[i for i, key in enumerate(keys) if 'seg' in key.lower()][0]]
if len(treeData[segKey])>0:
segBool = True
else:segBool = False
else:segBool = False
if any(['branch' in t.lower() for t in treeData]):
brKey = keys[[i for i, key in enumerate(keys) if 'branch' in key.lower()][0]]
if len(treeData[brKey])>0:
branchBool = True
else:branchBool = False
else:branchBool = False
#Saves the data if it exists and makes changes to columns so they work in the program
if trunkBool:
trunkDat = pd.read_excel("{0}.xlsx".format(fileName), sheet_name = trunkKey, converters={'name':str,'ref':str})#, 'ref type':str})
trunkDat.columns = [x.lower() for x in trunkDat.columns]
trunkDat['name'] = trunkDat['name'].str.upper()
trunkDat['name'] = trunkDat['name'].str.replace(" ","")
if any(pd.isnull(trunkDat.index)):
trunkDat = trunkDat.reset_index(drop = True)
if segBool:
segs = pd.read_excel("{0}.xlsx".format(fileName),parse_dates = False, sheet_name = segKey, converters={'name':str,'O/E':str,'base ref':str, 'top ref':str,'midsegment ref':str})
segs.columns = [x.lower() for x in segs.columns]
segs['name'] = segs['name'].str.replace(" ","")
if any(pd.isnull(segs.index)):
segs = segs.reset_index(drop = True)
if 'base ht' in segs.columns and 'top ht' in segs.columns:
segs['base z'] = segs['base ht']
segs['top z'] = segs['top ht']
else:
print("Warning: you must have segment columns labeled 'base ht' and 'top ht'")
names = f.splitName(segs['name'])
segs['top name'] = names['topNames']
segs['base name'] = names['baseNames']
if branchBool:
branches = pd.read_excel("{0}.xlsx".format(fileName),parse_dates = False , sheet_name = brKey, converters={'name':str,'O/E':str, 'L/D':str,'origin':str,'base ref':str, 'top ref':str,'midsegment ref':str})
branches.columns = [x.lower() for x in branches.columns]
branches['name'] = branches['name'].str.replace(" ","")
branches['origin'] = branches['origin'].str.replace(" ","")
if any(
|
pd.isnull(branches.index)
|
pandas.isnull
|
"""
Name : c9_44_equal_weighted_vs_value_weighted.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import pandas as pd
import scipy as sp
x=pd.read_pickle("c:/temp/yanMonthly.pkl")
def ret_f(ticker):
a=x[x.index==ticker]
p=sp.array(a['VALUE'])
ddate=a['DATE'][1:]
ret=p[1:]/p[:-1]-1
out1=pd.DataFrame(p[1:],index=ddate)
out2=
|
pd.DataFrame(ret,index=ddate)
|
pandas.DataFrame
|
# Spectral_Analysis_Amp_and_Phase.py
import os
import numpy as np
import pandas as pd
import scipy.linalg as la
import matplotlib.pyplot as plt
# Import time from the data or define it
t = np.arange(0.015, 0.021, 10**-7)
dt = 10**-7
# Define trainsize and number of modes
trainsize = 20000 # Number of snapshots used as training data.
num_modes = 44 # Number of POD modes.
reg = 0 # Just an input in case we regularize DMDc.
# Locate the full data of snapshots FOM and ROMs (INPUT)
Folder_name_data = 'C:\\Users\\Admin\\Desktop\\combustion\\'
file_name_FOM = 'traces_gems_60k_final.npy'
file_name_ROM_DMDc = 'traces_rom_DMDc_rsvd.npy'
file_name_ROM_cubic_r25 = 'traces_rom_cubic_tripple_reg_r25.npy'
file_name_ROM_cubic_r44 = 'traces_rom_cubic_r44.npy'
file_name_ROM_Quad_r44 = 'traces_rom_60k_100_30000.npy'
# Define output file location and file names to identify phase and amplitudes (OUTPUT)
folder_name = "C:\\Users\\Admin\\Desktop\\combustion\\spectral\\Final_plots\\"
Amp_name = folder_name + "\\" + "Amp" # Amplitude plots
Phase_name = folder_name + "\\" + "Phase" # Phase plots
# Load the data
FOM_ = np.load(Folder_name_data + file_name_FOM)
ROM_DMDc = np.load(Folder_name_data + file_name_ROM_DMDc)
ROM_cubic_r25 = np.load(Folder_name_data + file_name_ROM_cubic_r25)
ROM_cubic_r44 = np.load(Folder_name_data + file_name_ROM_cubic_r44)
ROM_Quad_r44 = np.load(Folder_name_data + file_name_ROM_Quad_r44)
# Plotting adjustments
End_plot_at = 60000 # 59990 # 40000
freq_limit_to_plot = 15000
# =============================================================================
def lineplots_timeseries(FOM_,
ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit, savefile):
"""Plots for comparision of data in time. Check the saved data in
folder_name.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
savefile
Suffix to save the file name
"""
print("Time series plots")
plt.xlim([0.015, 0.021]) # set axis limits
plt.plot(t[0:End_plot_at],
pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at],
label='FOM', linestyle='solid', c='k')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at],
label='Q-OPINF', linestyle='dashed', c='#ff7f0e')
# plt.plot(t[0:End_plot_at],
# pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at],
# label='C-OPINF_r25', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF', linestyle='dashed', c='b')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at],
label='DMDc', linestyle='dashdot', c='r')
plt.xlabel('time')
plt.ylabel(unit)
plt.axvline(x=t[0] + trainsize*dt, color='black')
plt.legend()
fname = f"{T_st}_ts_{trainsize}_r_{num_modes}_reg_{reg}{savefile}.pdf"
plt.savefig(os.path.join(folder_name, fname),
bbox_inches="tight", dpi=200)
plt.show()
def L2errorplots(FOM_, ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber, unit):
"""Plot L2 norm error comparision between all the ROMs.
Parameters
----------
FOM_
Full order model data input
ROM_Quad_r44
Q-OPINF at r = 44
ROM_cubic_r25
C-OPINF at r = 25
ROM_cubic_r44
C-OPINF at r = 44
ROM_DMDc
DMDc results
datanumber
Defines the state parameter
* -12 = Pressure
* -8 = Vx
* -4 = Vy
* 0 = Temperature
* 8 = [CH4]
* 12 = [O2]
* 16 = [H2O]
* 20 = [CO2]
unit
Unit for each variable (Pa, Kelvin...)
"""
print("L2 norm error plot")
e_ROM_Quad_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r25 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_cubic_r44 = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
e_ROM_DMDc = (la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at] - pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at]))/la.norm(pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at])
plt.plot(t[0:End_plot_at],
pd.DataFrame(FOM_).loc[T_st + datanumber][0:End_plot_at],
label='FOM', linestyle='solid')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:End_plot_at],
label='Q-OPINF', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF_r25', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:End_plot_at],
label='C-OPINF_r44', linestyle='dashed')
plt.plot(t[0:End_plot_at],
pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:End_plot_at],
label='DMDc', linestyle='dashdot')
x_axis = ['ROM_Quad_r44', 'ROM_cubic_r25', 'ROM_cubic_r44', 'ROM_DMDc']
y_axis = [e_ROM_Quad_r44, e_ROM_cubic_r25, e_ROM_cubic_r44, e_ROM_DMDc]
plt.scatter(x_axis,y_axis, s=100)
plt.xlabel('time')
plt.ylabel(unit)
plt.title("L2 norm Error Plot")
plt.legend()
fnm = f"Error_plot_{T_st}_ts_{trainsize}_r_{num_modes}_reg_{reg}{unit}.pdf"
plt.savefig(os.path.join(folder_name, fnm), bbox_inches="tight", dpi=200)
plt.show()
def get_freq_and_amplitude(T_ROM):
"""
Parameters
----------
T_ROM = any input signal
Returns
-------
frequency and amplitude transformation of the signal
"""
fft1 = np.fft.fft(T_ROM)
fftfreq1 = np.fft.fftfreq(len(T_ROM), d=dt)
amplitude_DMD = abs(fft1)
return fftfreq1, amplitude_DMD, fft1
def amplitude_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
amplitude,
amplitude_Quad_r44, amplitude_cubic_r25,
amplitude_cubic_r44, amplitude_DMDc,
unit, savefile,
title_test_or_train="Training results plotted in the frequency domain",
save_id="_ts_"):
"""Amplitude plot comparision and save files in the Amp name folder
Eg. for the test data filename will be : Amp_test_12_ts_20000_r_44_reg_0CO2
For the training data filename will be : Amp12_ts_20000_r_44_reg_0CO2
Parameters
----------
fftfreq
frequency content of the FOM
fftfreq_Quad_r44
frequency content of the Q-OPINF at r = 44
fftfreq_cubic_r25
frequency content of the C-OPINF at r = 25
fftfreq_cubic_r44
frequency content of the C-OPINF at r = 44
fftfreq_DMDc
frequency content of the DMDc at r = 44
amplitude
Amplitude content of the FOM
amplitude_Quad_r44
Amplitude content of the Q-OPINF at r = 44
amplitude_cubic_r25
Amplitude content of the C-OPINF at r = 25
amplitude_cubic_r44
Amplitude content of the C-OPINF at r = 44
amplitude_DMDc
Amplitude content of the DMDc at r = 44
unit
unit for each variable (Pa, Kelvin...)
savefile
Filename to be saved
title_test_or_train
"Training results plotted in the frequency domain"
save_id
'_ts_' for traindata, '_test_' for testing data
"""
st = 1
end = 60
plt.xlim([0,freq_limit_to_plot])
plt.scatter(fftfreq[st:end], amplitude[st:end],
s=50, label='FOM', marker='o', alpha=0.5, c='k')
plt.scatter(fftfreq_Quad_r44[st:end], amplitude_Quad_r44[st:end],
s=50, label='Q-OPINF', marker='s', alpha=0.5, c='#ff7f0e')
# plt.scatter(fftfreq_cubic_r25[st:end], amplitude_cubic_r25[st:end],
# s=50, label='C-OPINF_r25', marker='p', alpha=0.5)
plt.scatter(fftfreq_cubic_r44[st:end], amplitude_cubic_r44[st:end],
s=50, label='C-OPINF', marker='*', alpha=0.5, c='b')
plt.scatter(fftfreq_DMDc[st:end], amplitude_DMDc[st:end],
s=50, label='DMDc', marker='+', alpha=0.5, c='r')
plt.plot(fftfreq[st:end], amplitude[st:end],
linestyle='solid', c='k')
plt.plot(fftfreq_Quad_r44[st:end], amplitude_Quad_r44[st:end],
linestyle='dashed', c='#ff7f0e')
# plt.plot(fftfreq_cubic_r25[st:end], amplitude_cubic_r25[st:end],
# linestyle='dashed')
plt.plot(fftfreq_cubic_r44[st:end], amplitude_cubic_r44[st:end],
linestyle='dashed', c='b')
plt.plot(fftfreq_DMDc[st:end], amplitude_DMDc[st:end],
linestyle='dashdot', c='r')
plt.xlabel('freq')
plt.ylabel('Amplitude')
plt.legend()
# plt.title(title_test_or_train)
if save_id == "_ts_":
fname = f"{Amp_name}{T_st}{save_id}{trainsize}_r_{num_modes}"
fname += f"_reg_{reg}{savefile}.pdf"
elif save_id == "_test_":
fname = f"{Amp_name}{save_id}{T_st}_ts_{trainsize}_r_{num_modes}"
fname += f"_reg_{reg}{savefile}.pdf"
else:
raise ValueError(f"invalid save_id '{save_id}'")
plt.savefig(fname, bbox_inches="tight", dpi=200)
plt.show()
def get_min(X):
"""
Parameters
----------
X
Phase angle array
Returns
-------
min(X, 360-X)
"""
b = abs(X)
a = abs(360-b)
return np.minimum(a,b)
def phase_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
Phase_FOM,
Phase_Quad_r44, Phase_cubic_r25,
Phase_cubic_r44, Phase_DMDc,
unit, savefile,
title_test_or_train="Training results plotted in the frequency domain",
save_id="_ts_"):
"""Phase plot comparision and save files in the Amp name folder.
For the test data filename will be : Phase_test_12_ts_20000_r_44_reg_0CO2
For the training data filename will be : Phase12_ts_20000_r_44_reg_0CO2
Parameters
----------
fftfreq
frequency content of the FOM
fftfreq_Quad_r44
frequency content of the Q-OPINF at r = 44
fftfreq_cubic_r25
frequency content of the C-OPINF at r = 25
fftfreq_cubic_r44
frequency content of the C-OPINF at r = 44
fftfreq_DMDc
frequency content of the DMDc at r = 44
Phase_FOM
Phase content of the FOM
Phase_Quad_r44
Phase content of the Q-OPINF at r = 44
Phase_cubic_r25
Phase content of the C-OPINF at r = 25
Phase_cubic_r44
Phase content of the C-OPINF at r = 44
Phase_DMDc
Phase content of the DMDc at r = 44
unit
unit for each variable (Pa, Kelvin...)
savefile
Filename to be saved
title_test_or_train
"Training results plotted in the frequency domain"
save_id
'_ts_' for traindata, '_test_' for testing data
"""
st = 1
end = 60
plt.xlim([0, freq_limit_to_plot])
# plt.scatter(fftfreq[st:end], Phase_FOM[st:end],
# s=50, label='FOM', marker='o', alpha=0.5, c='k')
plt.scatter(fftfreq_Quad_r44[st:end],
get_min(Phase_FOM[st:end] - Phase_Quad_r44[st:end]),
s=50, label='Q-OPINF', marker='s', alpha=0.5, c='#ff7f0e')
# plt.scatter(fftfreq_cubic_r25[st:end], amplitude_cubic_r25[st:end],
# s=50, label='C-OPINF_r25', marker='p', alpha=0.5)
plt.scatter(fftfreq_cubic_r44[st:end],
get_min(Phase_FOM[st:end] - Phase_cubic_r44[st:end]),
s=50, label='C-OPINF', marker='*', alpha=0.5, c='b')
plt.scatter(fftfreq_DMDc[st:end],
get_min(Phase_FOM[st:end] - Phase_DMDc[st:end]),
s=50, label='DMDc', marker='+', alpha=0.5, c='r')
# plt.plot(fftfreq[st:end],Phase_FOM[st:end], linestyle='solid', c='k')
plt.plot(fftfreq_Quad_r44[st:end],
get_min(Phase_FOM[st:end] - Phase_Quad_r44[st:end]),
linestyle='dashed', c='#ff7f0e')
# plt.plot(fftfreq_cubic_r25[st:end], amplitude_cubic_r25[st:end],
# linestyle='dashed')
plt.plot(fftfreq_cubic_r44[st:end],
get_min(Phase_FOM[st:end] - Phase_cubic_r44[st:end]),
linestyle='dashed', c='b')
plt.plot(fftfreq_DMDc[st:end],
get_min(Phase_FOM[st:end] - Phase_DMDc[st:end]),
linestyle='dashdot', c='r')
plt.xlabel('freq')
plt.ylabel('Phase angle difference FOM-ROM (degree)')
plt.legend()
# plt.title(title_test_or_train)
if save_id == "_ts_":
fname = f"{Phase_name}{T_st}{save_id}{trainsize}_r_{num_modes}"
fname += f"_reg_{reg}{savefile}.pdf"
if save_id == "_test_":
fname = f"{Phase_name}{save_id}{T_st}_ts_{trainsize}_r_{num_modes}"
fname += f"_reg_{reg}{savefile}.pdf"
else:
raise ValueError(f"invalid save_id '{save_id}'")
plt.savefig(fname, bbox_inches="tight", dpi=200)
plt.show()
def fftoutput_train(T_st, t, trainsize, num_modes, reg,
unit='Temperature in Kelvin', datanumber=0,
savefile='filename'):
"""Amplitude and phase plots for training dataset.
Parameters
----------
T_st
monitor location code
* 12: Monitor location 1
* 13: Monitor location 2
* 14: Monitor location 3
* 15: Monitor location 4
t
as defined in input
trainsize
as defined in input
num_modes
as defined in input
reg
as defined in input
unit
unit for each variable (Pa, Kelvin...)
datanumber
defines the state parameter
* -12: Pressure
* -8: Vx
* -4: Vy
* 0: Temperature
* 8: [CH4]
* 12: [O2]
* 16: [H2O]
* 20: [CO2]
savefile
Suffix to save the file name
"""
# fmax = 1/dt
ROM_S = trainsize # 20000
FOM_S = trainsize # 20000
T = pd.DataFrame(FOM_).loc[13][0:FOM_S]
# T_ROM = pd.DataFrame(ROM_DMDc).loc[13][0:ROM_S]
# df = 1/dt/trainsize
# fdomain = np.arange(0,fmax,df)
T = pd.DataFrame(FOM_).loc[T_st + datanumber][0:FOM_S]
T_ROM_Quad_r44 = pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber][0:ROM_S]
T_ROM_DMDc = pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][0:ROM_S]
T_ROM_cubic_r25 = pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][0:ROM_S]
T_ROM_cubic_r44 = pd.DataFrame(ROM_cubic_r44).loc[T_st + datanumber][0:ROM_S]
lineplots_timeseries(FOM_,
ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
datanumber,unit,savefile)
# L2errorplots(FOM_, ROM_Quad_r44, ROM_cubic_r25, ROM_cubic_r44, ROM_DMDc,
# datanumber, unit)
# fftfreq1, amplitude_DMD, fft1 = get_freq_and_amplitude(T_ROM_DMD)
fftfreq_DMDc, amplitude_DMDc, fft_DMDc = get_freq_and_amplitude(T_ROM_DMDc)
fftfreq_Quad_r44, amplitude_Quad_r44, fft_Quad_r44 = get_freq_and_amplitude(T_ROM_Quad_r44)
fftfreq_cubic_r25, amplitude_cubic_r25, fft_cubic_r25 = get_freq_and_amplitude(T_ROM_cubic_r25)
fftfreq_cubic_r44, amplitude_cubic_r44, fft_cubic_r44 = get_freq_and_amplitude(T_ROM_cubic_r44)
fftfreq, amplitude, fft = get_freq_and_amplitude(T)
amplitude_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
amplitude,
amplitude_Quad_r44, amplitude_cubic_r25,
amplitude_cubic_r44, amplitude_DMDc,
unit, savefile,
title_test_or_train="Training results plotted in the frequency domain",
save_id="_ts_")
Phase_FOM = np.angle(fft, deg=True)
Phase_Quad_r44 = np.angle(fft_Quad_r44, deg=True)
Phase_cubic_r25 = np.angle(fft_cubic_r25, deg=True)
Phase_cubic_r44 = np.angle(fft_cubic_r44, deg=True)
Phase_DMDc = np.angle(fft_DMDc, deg=True)
phase_plots(fftfreq,
fftfreq_Quad_r44, fftfreq_cubic_r25,
fftfreq_cubic_r44, fftfreq_DMDc,
Phase_FOM,
Phase_Quad_r44, Phase_cubic_r25,
Phase_cubic_r44, Phase_DMDc,
unit, savefile,
title_test_or_train="Training results plotted in the frequency domain",
save_id="_ts_")
def fftoutput_test(T_st, t, trainsize, num_modes, reg,
unit='Temperature in Kelvin',
datanumber=0, savefile='filename'):
"""
T_st = monitor location code
code number for each location:
12 - Monitor location 1
13 - Monitor location 2
14 - Monitor location 3
15 - Monitor location 4
t = as defined in input
trainsize = as defined in input
num_modes = as defined in input
reg = as defined in input
unit = unit for each variable (Pa, Kelvin...)
datanumber = to define the state parameter
-12 = Pressure
-8 = Vx
-4 = Vy
0 = Temperature
8 = [CH4]
12 = [O2]
16 = [H2O]
20 = [CO2]
savefile = Suffix to save the file name
Returns
-------
The calculation of amplitude and phase plots for testing dataset
"""
# fmax = 1/dt
# ROM_S = len(t[0:End_plot_at]) - trainsize
FOM_S = len(t[0:End_plot_at]) - trainsize
T = pd.DataFrame(FOM_).loc[13][FOM_S::]
# T_ROM = pd.DataFrame(ROM_DMDc).loc[13][ROM_S::]
# df = 1/dt/(len(t[0:End_plot_at]) - trainsize)
# fdomain = np.arange(0,fmax,df)
T = pd.DataFrame(FOM_).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
# T_ROM_DMD = pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
T_ROM_DMDc = pd.DataFrame(ROM_DMDc).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
T_ROM_Quad_r44 = pd.DataFrame(ROM_Quad_r44).loc[T_st + datanumber]
T_ROM_cubic_r25 = pd.DataFrame(ROM_cubic_r25).loc[T_st + datanumber][trainsize:len(t[0:End_plot_at])]
T_ROM_cubic_r44 =
|
pd.DataFrame(ROM_cubic_r44)
|
pandas.DataFrame
|
"""Parses vcf output from VEP/LOFTEE to a table where
rows are rare variants and columns are the annotations
"""
import os
import sys
import argparse
import numpy as np
import pandas as pd
from pysam import VariantFile
def get_info_fields(vcf):
"""Get info fields in vcf
Parameters
----------
vcf : pysam.libcbcf.VariantFile
VCF file read in by pysam
Returns
-------
list
"""
info_fields = (
vcf.header.info["CSQ"]
.record["Description"]
.split("Format: ")[-1]
.strip('"')
.split("|")
)
return info_fields
def get_info_dictionary(rec, info_fields):
"""Parses info into a dictionary with info fields as the keys
Parameters
----------
rec : pysam.libcbcf.VariantRecord
Obatined from iterating over pysam.VariantFile.fetch()
info_fields : list
List of info fields in the vcf
Returns
-------
dict
Info field parsed into a dictionary
"""
info_data = rec.info["CSQ"]
info_dict = [
dict(zip(info_fields, x.split("|")))
for x in info_data
if len(x.split("|")) == len(info_fields)
]
return info_dict
def vcf_to_dataframe(vcf_filepath):
"""Parse info from vcf into dataframes with genomic coordinates and
info fields as the columns. Makes a dataframe for each chromosome
and returns list of paths to the saved dataframe.
Parameters
----------
vcf_filepath : str
Path to the VEP/LOFTEE output vcf
Returns
-------
list
"""
# Read vcf
vcf = VariantFile(vcf_filepath)
info_fields = get_info_fields(vcf)
# Parse info from vcf into dataframe
df_list = []
cur_chrom = None
write_path_list = []
for i, rec in enumerate(vcf.fetch()):
# Keep track of current chrom
chrom = "chr" + rec.chrom
if cur_chrom is None: # Starting chromosome
cur_chrom = chrom
if chrom != "chr22":
continue
cur_chrom = chrom
if cur_chrom != chrom: # Save and print the chromosome dataframe
info_df = pd.concat(df_list, ignore_index=True)
write_path = (
vcf_filepath.split(".vcf.gz")[0] + "." + cur_chrom + ".info.tsv"
)
info_df.to_csv(write_path, sep="\t")
print(f"VCF info dataframe saved to\n{write_path}")
# Reset list of dataframes and update current chromosome
del info_df # Delete to save on memory
df_list = []
cur_chrom = chrom
# Parse info from vcf
info_dict = get_info_dictionary(rec, info_fields)
df = pd.DataFrame(info_dict)
df.insert(0, "Alt", rec.alts[0])
df.insert(0, "Pos", rec.pos)
df.insert(0, "Chrom", chrom)
df_list.append(df)
# Save and print the final chromosome
info_df = pd.concat(df_list, ignore_index=True)
write_path = vcf_filepath.split(".vcf.gz")[0] + "." + cur_chrom + ".info.tsv"
info_df.to_csv(write_path, sep="\t")
print(f"VCF info dataframe saved to\n{write_path}")
def get_annotations_from_df(info_df):
"""Extracts annotations from the `Consequence` and `LoF` columns
Parameters
----------
info_df : pandas.Dataframe
Dataframe created by vcf_to_dataframe
Returns
-------
pandas.Dataframe
"""
# Genomic coordinates
coord_df = info_df[["Chrom", "Pos", "Alt", "Allele"]]
# VEP annotations
vep_df = pd.get_dummies(info_df["Consequence"])
# Obtain splice_region_variant from combined consequences
splice_col_list = [col for col in vep_df.columns if "splice_region_variant" in col]
splice_region_variant = np.amax(vep_df[splice_col_list].values, axis=1)
vep_df["splice_region_variant"] = splice_region_variant
# LOFTEE annotations
lof_df = pd.get_dummies(info_df["LoF"])
return coord_df.join(vep_df).join(lof_df)
def test_get_annotations_from_df(info_df, anno_df):
"""Check that output of `get_annoations_from_df` is correctly representing the
annotations from info_df (Excluding `splice_region_variant` since it is
combined with other consequences)
"""
# Get list of annoations from info_df
vep_list = list(info_df["Consequence"].unique())
lof_list = [x for x in info_df["LoF"].unique() if isinstance(x, str)]
anno_list = vep_list + lof_list
test_result = {}
for anno in anno_list:
anno_df_num = len(anno_df[anno_df[anno] == 1])
if anno in lof_list:
info_df_num = len(info_df[info_df["LoF"] == anno])
else:
info_df_num = len(info_df[info_df["Consequence"] == anno])
test_result[anno] = anno_df_num == info_df_num
for anno in anno_list:
if not test_result[anno]:
print(test_result)
return test_result[anno]
return True
def dataframe_to_tidy(info_df):
"""Extracts annotations from the `Consqeuence` and `LoF` columns.
Returns a tidy dataframe with the columns
Chrom | Pos | Alt | Allele | annotations...
Parameters
----------
info_df : pandas.Dataframe
Dataframe created by vcf_to_dataframe
Returns
-------
pandas.Dataframe
"""
anno_list = [
"Chrom",
"Pos",
"Alt",
"Allele",
"3_prime_UTR_variant",
"5_prime_UTR_variant",
"TF_binding_site_variant",
"downstream_gene_variant",
"intergenic_variant",
"intron_variant",
"missense_variant",
"non_coding_transcript_exon_variant",
"regulatory_region_variant",
"splice_acceptor_variant",
"splice_donor_variant",
"splice_region_variant",
"stop_gained",
"synonymous_variant",
"upstream_gene_variant",
"LoF_HC",
"LoF_LC",
]
anno_df = get_annotations_from_df(info_df)
# check that annotations were properly extracted
if test_get_annotations_from_df(info_df, anno_df):
# Rename and select for annotation columns to be consistent with the
# Watershed paper table S3
anno_df.rename(columns={"HC": "LoF_HC", "LC": "LoF_LC"}, inplace=True)
anno_df = anno_df[anno_list]
# Collapse transcripts by taking maximum so we get SNV level
# annotations
anno_snv_df = anno_df.groupby(
["Chrom", "Pos", "Alt", "Allele"], as_index=False
).max()
# Set LoF_LC to 0 if LoF_HC is 1
lof_hc = anno_snv_df["LoF_HC"].values
lof_lc = anno_snv_df["LoF_LC"].values
anno_snv_df["LoF_LC"] = [0 if hc == 1 else lc for hc, lc in zip(lof_hc, lof_lc)]
return anno_snv_df
return None
def main():
# filename = "/scratch/groups/abattle4/victor/WatershedAFR/data/annotation/gene-AFR-rv.vep.loftee.vcf.gz"
usage = "Parses VCF output from VEP/LOFTEE to a table where rows are rare variants and columns are the annotations"
parser = argparse.ArgumentParser(description=usage)
parser.add_argument(
"--anno",
required=True,
help="Annotations from VEP and LOFTEE as vcf.gz format. Needs to be tbi indexed.",
)
args = parser.parse_args()
filename = args.anno
if not filename.endswith(".vcf.gz"):
print("Annotation file needs to be bgzipped and tabix indexed")
sys.exit(1)
# Info field from VCF parsed to dataframe
df_path_list = [
filename.split(".vcf.gz")[0] + ".chr" + str(x) + ".info.tsv"
for x in range(1, 23)
]
if not os.path.isfile(df_path_list[0]):
vcf_to_dataframe(filename)
# Combine tidy dataframes for each chromosome into one file
anno_snv_all_df = None
for df_path in df_path_list:
# Open info field from vcf parsed to dataframe
info_df =
|
pd.read_csv(df_path, sep="\t", index_col=0)
|
pandas.read_csv
|
import pandas as pd
from utils import Insert_row, check_size_invalidity
from prettytable import PrettyTable
class FirstFit():
def __init__(self, partitions:list, processes:list):
process = {
"Processes": [f"p{i+1}" for i in range(len(processes))],
"Size": processes,
}
partition = {
"Partition": [f"m{i+1}" for i in range(len(partitions))],
"Size": partitions,
}
self.processes = pd.DataFrame(process)
self.partitions =
|
pd.DataFrame(partition)
|
pandas.DataFrame
|
import os
import glob
import lzma
import gzip
import numpy as np
import pandas as pd
import attrdict
def get_open_function(path):
"""Choose an open function based on the file's extension."""
if path.endswith('xz'):
return lzma.open
elif path.endswith('gz'):
return gzip.open
return open
def load_simpoint_weights(simpoints_dir, trace):
"""Load simpoint weights for a given trace."""
simpoints = pd.DataFrame(columns=['trace', 'weight'])
for f in glob.glob(os.path.join(simpoints_dir, '*.csv')):
df = pd.read_csv(f)
simpoints =
|
pd.concat((simpoints, df))
|
pandas.concat
|
import numpy as np
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
NaT,
Series,
TimedeltaIndex,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestShift:
@pytest.mark.parametrize(
"ser",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
assert ser.shift(freq=move_by_freq) is not ser
def test_shift(self, datetime_series):
shifted = datetime_series.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, datetime_series.index)
tm.assert_index_equal(unshifted.index, datetime_series.index)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_series.values[:-1]
)
offset = BDay()
shifted = datetime_series.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
unshifted = datetime_series.shift(0, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
shifted = datetime_series.shift(1, freq="B")
unshifted = shifted.shift(-1, freq="B")
tm.assert_series_equal(unshifted, datetime_series)
# corner case
unshifted = datetime_series.shift(0)
tm.assert_series_equal(unshifted, datetime_series)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, BDay())
tm.assert_series_equal(shifted2, shifted3)
tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
msg = "Given freq D does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
tm.assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_series_equal(result, expected)
# GH#8260
# with tz
s = Series(
date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
)
result = s - s.shift()
exp = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
tm.assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo")
msg = "DatetimeArray subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
s - s2
def test_shift2(self):
ts = Series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
msg = "Cannot shift with no freq"
with pytest.raises(NullFrequencyError, match=msg):
idx.shift(1)
def test_shift_fill_value(self):
# GH#24128
ts = Series(
[1.0, 2.0, 3.0, 4.0, 5.0], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = Series(
[0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("1/1/2000", periods=5, freq="H")
)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_series_equal(result, exp)
exp = Series(
[0.0, 0.0, 1.0, 2.0, 3.0], index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(2, fill_value=0.0)
tm.assert_series_equal(result, exp)
ts = Series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert res.dtype == ts.dtype
def test_shift_categorical_fill_value(self):
ts = Series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = Series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = "'fill_value=f' is not present in this Categorical's categories"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_dst(self):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
s = Series(dates)
res = s.shift(0)
tm.assert_series_equal(res, s)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = s.shift(ex)
exp = Series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_series):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
tm.assert_series_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_series.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(datetime_series, unshifted)
shifted2 = datetime_series.tshift(freq=datetime_series.index.freq)
tm.assert_series_equal(shifted, shifted2)
inferred_ts = Series(
datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts"
)
shifted = inferred_ts.tshift(1)
expected = datetime_series.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(unshifted, inferred_ts)
no_freq = datetime_series[[0, 5, 7]]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_series):
# GH#11631
with tm.assert_produces_warning(FutureWarning):
datetime_series.tshift()
def test_period_index_series_shift_with_freq(self):
ps = tm.makePeriodSeries()
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_series_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_series_equal(shifted, shifted2)
shifted3 = ps.shift(freq=BDay())
|
tm.assert_series_equal(shifted, shifted3)
|
pandas._testing.assert_series_equal
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import re
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import nltk
nltk.download("stopwords")
with open("C:\\Users\\Prathamesh\\Desktop\\Side projects\\Spam classifier\\SMSSpamCollection.txt") as f:
lines = f.readlines()
messages = pd.DataFrame([i.split("\t") for i in lines], columns=["label", "message"])
# Stopwords are words in the messages like [the if in a to] which we have to remove
from nltk.corpus import stopwords
# For stemming purpose to find the base root of the word
from nltk.stem.porter import PorterStemmer
ps = PorterStemmer()
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
corpus = []
for sms in tqdm(messages["message"]):
# Removing unnecessary punctuations, numbers and replacing them with space
review = re.sub("[^a-zA-Z]", " ", sms)
# Convert the message to lowercase
review = review.lower()
# Split each word and create a list
review = review.split()
# Removing all stopwords in english language from the message and getting to the root of each word
review = [ps.stem(w) for w in review if w not in stopwords.words("english")]
# Convert back to full sentence format after cleaning
review = " ".join(review)
# Append to corpus
corpus.append(review)
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from imblearn.over_sampling import SMOTE
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, classification_report
# Creating features using Tf-idf Vectorization
tfidf = TfidfVectorizer(max_features=3000)
X = tfidf.fit_transform(corpus).toarray()
y =
|
pd.get_dummies(messages["label"])
|
pandas.get_dummies
|
###########################################################
# Encode
###########################################################
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import preprocessing,model_selection, ensemble
from sklearn.preprocessing import LabelEncoder
import scipy.stats as ss
from sklearn.externals import joblib
from scipy.sparse import csr_matrix
def cat2MedianShiftEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
med_y = np.median(y)
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_medshftenc'] = datax['y_median']-med_y
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(0)
datatst = datatst.join(datax,on=[c], how='left').fillna(0)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_medshftenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
return train_df, test_df
def cat2MeanShiftEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
mean_y = np.mean(y)
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_meanshftenc'] = datax['y_mean'] - mean_y
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(0)
datatst = datatst.join(datax,on=[c], how='left').fillna(0)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_meanshftenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat.reset_index(drop = True),train_df.reset_index(drop = True)), axis=1)
test_df = pd.concat([enc_mat_test.reset_index(drop = True),test_df.reset_index(drop = True)],axis=1)
return train_df, test_df
def cat2MeanEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = np.mean(y)
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_meanenc'] = datax['y_mean']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_meanenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def cat2MedianEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = np.mean(y)
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_medianenc'] = datax['y_mean']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold*nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_medianenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def countEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = 999
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_countenc'] = datax['y_len']
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= nbag
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_countenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def rankCountEncode(train_char, test_char, y, nbag = 10, nfold = 20, minCount = 3):
train_df = train_char.copy()
test_df = test_char.copy()
rn = 999
for c in train_char.columns:
data = train_char[[c]].copy()
data['y'] = y
enc_mat = np.zeros((y.shape[0],1))
enc_mat_test = np.zeros((test_char.shape[0],1))
for bag in np.arange(nbag):
kf = model_selection.KFold(n_splits= nfold, shuffle=True, random_state=2017*bag)
for dev_index, val_index in kf.split(range(data['y'].shape[0])):
dev_X, val_X = data.iloc[dev_index,:], data.iloc[val_index,:]
datax = dev_X.groupby(c).agg([len,np.mean,np.std, np.median])
datax.columns = ['_'.join(col).strip() for col in datax.columns.values]
datax = datax.loc[datax.y_len > minCount]
datax[c+'_rankenc'] = datax['y_len']
datax[c+'_rankenc'] = ss.rankdata(datax[c+'_rankenc'].values)
datax.drop(['y_len','y_mean','y_std','y_median'],axis=1,inplace=True)
print(datax.columns)
datatst = test_char[[c]].copy()
val_X = val_X.join(datax,on=[c], how='left').fillna(rn)
datatst = datatst.join(datax,on=[c], how='left').fillna(rn)
enc_mat[val_index,...] += val_X[list(set(datax.columns)-set([c]))]
enc_mat_test += datatst[list(set(datax.columns)-set([c]))]
enc_mat_test /= (nfold * nbag)
enc_mat /= (nbag)
enc_mat = pd.DataFrame(enc_mat)
enc_mat.columns=[c+'_rankenc'+str(x) for x in enc_mat.columns]
enc_mat_test = pd.DataFrame(enc_mat_test)
enc_mat_test.columns=[enc_mat.columns]
train_df = pd.concat((enc_mat,train_df), axis=1)
test_df = pd.concat([enc_mat_test,test_df],axis=1)
return train_df, test_df
def catLabelEncode(train_char, test_char):
train_df = train_char.copy()
test_df = test_char.copy()
train_test = pd.concat((train_df,test_df))
for feat in train_df.columns:
train_test[feat] = pd.factorize(train_test[feat])[0]
train_df = train_test.iloc[:train_char.shape[0],:]
test_df = train_test.iloc[train_char.shape[0]:,:]
return train_df, test_df
def OHCEncode(train_char, test_char):
train_df = train_char.copy()
test_df = test_char.copy()
train_test = pd.concat((train_df,test_df))
ohe = csr_matrix(pd.get_dummies(train_test, dummy_na=False, sparse=True))
train_df = ohe[:train_df.shape[0],:]
test_df = ohe[train_df.shape[0]:,:]
return train_df, test_df
###########################################################
# START
###########################################################
import pandas as pd
import numpy as np
import datetime as dt
import gc
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
#from low_memory import reduce_mem_usage
print('Loading train data ...')
train1 = pd.read_csv('../input/train_2016_v2.csv')
train2 = pd.read_csv('../input/train_2017.csv')
#prop1 = pd.read_csv('../input/properties_2016.csv')
prop2 = pd.read_csv('../input/properties_2017.csv')
print('Binding to float32')
for c, dtype in zip(prop2.columns, prop2.dtypes):
if dtype == np.float64:
# prop1[c] = prop1[c].astype(np.float32)
prop2[c] = prop2[c].astype(np.float32)
train1 = train1.merge(prop2, how='left', on='parcelid') # change this to prop2
train2 = train2.merge(prop2, how='left', on='parcelid')
dftrain = pd.concat((train1, train2))
del(train1, train2); gc.collect()
trainx = dftrain[['parcelid', 'transactiondate']].groupby(['parcelid','transactiondate']).agg('size').groupby(level=[0]).cumsum()
trainx = trainx.reset_index()
trainx = trainx[trainx[0] > 1]
dftrain['resell'] = 0
keys = ['parcelid', 'transactiondate']
i1 = dftrain.set_index(keys).index
i2 = trainx.set_index(keys).index
dftrain.loc[i1.isin(i2), 'resell'] = 1
dftrain.reset_index(drop = True, inplace = True)
del(trainx, i2); gc.collect()
###########################################
sample = pd.read_csv('../input/sample_submission.csv')
test = sample[['ParcelId']]
test.columns = ['parcelid']
dftest = test.merge(prop2, how='left', on='parcelid')# change this to prop2
dftest['transactiondate'] = '2017-10-01'
dftest['logerror'] = 0
dftest['resell'] = 0
keys = ['parcelid']
i1 = dftrain.set_index(keys).index
i2 = dftest.set_index(keys).index
dftest.loc[i2.isin(i1), 'resell'] = 1
del(i1, i2, test, sample, prop2)#prop1,
gc.collect()
def featureGen(x):
x['countnullcol'] = x.isnull().sum(axis = 1)
x["transactiondate"] = pd.to_datetime(x["transactiondate"])
x["yr"] = pd.DatetimeIndex(x["transactiondate"]).year
x["month"] = pd.DatetimeIndex(x["transactiondate"]).month
x["qtr"] = pd.DatetimeIndex(x["transactiondate"]).quarter
x['latitudetrim'] = x['latitude'].fillna(999999999)/10000
x['latitudetrim'] = x['latitudetrim'].astype(int)
x['longitudetrim'] = x['longitude'].fillna(999999999)/10000
x['longitudetrim'] = x['longitudetrim'].astype(int)
x['yearbuilt'] = x['yearbuilt'].fillna(1700)
x['yearbuilt'] = 2017 - x.yearbuilt
## Binary columns for features -> Zero means its has it and 1 means feature is absent
x['hasaircond'] = np.isnan(x.airconditioningtypeid).astype(int)
x['hasdeck'] = np.isnan(x.decktypeid).astype(int)
x['has34bath'] = np.isnan(x.threequarterbathnbr).astype(int)
x['hasfullbath'] = np.isnan(x.fullbathcnt).astype(int)
x['hashottuborspa'] = x.hashottuborspa.fillna(False).astype(int)
x['hasheat'] = np.isnan(x.heatingorsystemtypeid).astype(int)
x['hasstories'] = np.isnan(x.numberofstories).astype(int)
x['haspatio'] = np.isnan(x.yardbuildingsqft17).astype(int)
x['taxdelinquencyyear'] = 2017 - x['taxdelinquencyyear']
return x
#['regionidzip_meanshftenc0', 'buildingclasstypeid_meanshftenc0', 'propertycountylandusecode_meanshftenc0', 'propertyzoningdesc_meanshftenc0', 'rawcensustractandblock_meanshftenc0', 'taxdelinquencyflag_meanshftenc0', 'countnullcol', 'regionidneighborhood_meanshftenc0']
print("Start ftr gen..")
dftest = featureGen(dftest)
#joblib.dump(dftest,"../input/testNewFtr1.pkl")
dftrain = featureGen(dftrain)
#joblib.dump(dftrain,"../input/trainNewFtr1.pkl")
catcols = ['bathroomcnt', 'bedroomcnt', 'buildingqualitytypeid', 'buildingclasstypeid', 'calculatedbathnbr', 'decktypeid', 'threequarterbathnbr', 'fips', 'fireplacecnt', 'fireplaceflag', 'fullbathcnt', 'garagecarcnt', 'hashottuborspa', 'heatingorsystemtypeid', 'numberofstories', 'poolcnt', 'pooltypeid10', 'pooltypeid2', 'pooltypeid7', 'propertycountylandusecode', 'propertylandusetypeid', 'propertyzoningdesc', 'rawcensustractandblock', 'censustractandblock', 'regionidcounty', 'regionidcity', 'regionidzip', 'regionidneighborhood','storytypeid', 'typeconstructiontypeid', 'unitcnt', 'yardbuildingsqft17', 'yardbuildingsqft26', 'assessmentyear','taxdelinquencyflag','taxdelinquencyyear', 'roomcnt', 'latitudetrim', 'longitudetrim','airconditioningtypeid','architecturalstyletypeid']
numcols = ['countnullcol', 'latitude', 'longitude', 'yearbuilt', 'hasaircond', 'hasdeck', 'has34bath', 'hasfullbath', 'hasheat', 'hasstories', 'haspatio', 'basementsqft', 'finishedfloor1squarefeet', 'calculatedfinishedsquarefeet', 'finishedsquarefeet6', 'finishedsquarefeet12', 'finishedsquarefeet13', 'finishedsquarefeet15', 'finishedsquarefeet50', 'lotsizesquarefeet', 'garagetotalsqft', 'poolsizesum','taxvaluedollarcnt', 'structuretaxvaluedollarcnt', 'landtaxvaluedollarcnt', 'taxamount', 'logerror','parcelid', 'yr','month','qtr', 'resell']# ,'transactiondate'
### numerical processing
print("Num processing..")
dftrainnum = dftrain[numcols].fillna(-9)
dftestnum = dftest[numcols].fillna(-9)
############################
dftraincat = dftrain[catcols].fillna(-9)
dftestcat = dftest[catcols].fillna(-9)
ntrain = dftrain.shape[0]
print("Cat Processing..")
#from encoding import cat2MeanShiftEncode, catLabelEncode, countEncode, cat2MedianEncode, cat2MeanEncode, cat2MedianShiftEncode
train, test = cat2MeanShiftEncode(dftraincat.copy(), dftestcat.copy(), dftrainnum.logerror.values, nbag = 10, nfold = 20, minCount = 10)
trainmn = train.iloc[:,:41].copy()
testmn = test.iloc[:,:41].copy()
joblib.dump(trainmn,"../input/train_catMeanshftenc_v2.pkl")
joblib.dump(testmn,"../input/testcat_catMeanshftenc_v2.pkl")
#train, test = cat2MedianEncode(dftraincat.copy(), dftestcat.copy(), y, nbag = 10, nfold = 20, minCount = 10)
#joblib.dump(train.iloc[:,:41],"../input/train_catMedianenc.pkl")
#joblib.dump(test.iloc[:,:41],"../input/testcat_catMedianenc.pkl")
#train, test = cat2MeanEncode(dftraincat.copy(), dftestcat.copy(), y, nbag = 10, nfold = 20, minCount = 10)
#joblib.dump(train.iloc[:,:41],"../input/train_catMeanenc.pkl")
#joblib.dump(test.iloc[:,:41],"../input/testcat_catMeanenc.pkl")
train, test = catLabelEncode(dftraincat.copy(), dftestcat.copy())
trainlbl = train.iloc[:,:41].copy()
testlbl = test.iloc[:,:41].copy()
joblib.dump(trainlbl,"../input/trainlblcat_v2.pkl")
joblib.dump(testlbl,"../input/testcatlblcat_v2.pkl")
train, test = countEncode(dftraincat.copy(), dftestcat.copy(), dftrainnum.logerror.values, nbag = 1, nfold = 20, minCount = 10)
traincnt = train.iloc[:,:41].copy()
testcnt = test.iloc[:,:41].copy()
joblib.dump(traincnt,"../input/train_countenc_v2.pkl")
joblib.dump(testcnt,"../input/testcat_countenc_v2.pkl")
del(train, test)
gc.collect()
#joblib.dump(dftrainnum,"../input/trainnum9Impute.pkl")
#joblib.dump(dftestnum,"../input/testnum9Impute.pkl")
#traincnt = traincnt.iloc[:,:41]
#trainlbl = trainlbl.iloc[:,:41]
#trainmn = trainmn.iloc[:,:41]
##########
# Big DS
train = pd.concat((dftrainnum, traincnt, trainmn, trainlbl), axis =1)
test = pd.concat((dftestnum, testcnt, testmn, testlbl), axis =1)
del(dftrainnum, traincnt, trainmn, trainlbl); gc.collect()
del(dftestnum, testcnt, testmn, testlbl); gc.collect()
joblib.dump(train,"../input/trainmod.pkl")
joblib.dump(test,"../input/testmod.pkl")
########################
# Get stat Ftrs
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import preprocessing,model_selection, ensemble
from sklearn.preprocessing import LabelEncoder
import scipy.stats as ss
from sklearn.externals import joblib
from scipy.sparse import csr_matrix
train = joblib.load("../input/trainmod.pkl")
test = joblib.load("../input/testmod.pkl")
print("Statistical Ftr Gen..")
train['yrmonth'] = train.apply(lambda x: str(int(x.yr)) + '-' + str(int(x.month)), axis = 1)
test['yrmonth'] = test.apply(lambda x: str(int(x.yr)) + '-' + str(int(x.month)), axis = 1)
def getStatsFtr(dftrain, dftest, skipmon =0, yrmonNum='2016-09', aggPer = 6, colLst=[], ind = '1'):
start = pd.Period(yrmonNum) - skipmon - aggPer
end =
|
pd.Period(yrmonNum)
|
pandas.Period
|
"""
Unit Test for the deactivated_participants module
Ensures that get_token function fetches the access token properly, get_deactivated_participants
fetches all deactivated participants information, and store_participant_data properly stores all
the fetched deactivated participant data
Original Issues: DC-797, DC-971 (sub-task), DC-972 (sub-task)
The intent of this module is to check that GCR access token is generated properly, the list of
deactivated participants returned contains `participantID`, `suspensionStatus`, and `suspensionTime`,
and that the fetched deactivated participants data is stored properly in a BigQuery dataset.
"""
# Python imports
import unittest
import mock
# Third Party imports
import pandas
import pandas.testing
# Project imports
import utils.participant_summary_requests as psr
class ParticipantSummaryRequests(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
# Input parameters expected by the class
self.project_id = 'foo_project'
self.dataset_id = 'bar_dataset'
self.tablename = 'baz_table'
self.destination_table = 'bar_dataset.foo_table'
self.fake_url = 'www.fake_site.com'
self.fake_headers = {
'content-type': 'application/json',
'Authorization': 'Bearer ya29.12345'
}
self.columns = ['participantId', 'suspensionStatus', 'suspensionTime']
self.deactivated_participants = [[
'P111', 'NO_CONTACT', '2018-12-07T08:21:14'
], ['P222', 'NO_CONTACT', '2018-12-07T08:21:14']]
self.updated_deactivated_participants = [[
111, 'NO_CONTACT', '2018-12-07T08:21:14'
], [222, 'NO_CONTACT', '2018-12-07T08:21:14']]
self.fake_dataframe = pandas.DataFrame(
self.updated_deactivated_participants, columns=self.columns)
self.participant_data = [{
'fullUrl':
'https//foo_project.appspot.com/rdr/v1/Participant/P111/Summary',
'resource': {
'participantId': 'P111',
'suspensionStatus': 'NO_CONTACT',
'suspensionTime': '2018-12-07T08:21:14'
}
}, {
'fullUrl':
'https//foo_project.appspot.com/rdr/v1/Participant/P222/Summary',
'resource': {
'participantId': 'P222',
'suspensionStatus': 'NO_CONTACT',
'suspensionTime': '2018-12-07T08:21:14'
}
}]
self.json_response_entry = {
'entry': [{
'fullUrl':
'https//foo_project.appspot.com/rdr/v1/Participant/P111/Summary',
'resource': {
'participantId': 'P111',
'suspensionStatus': 'NO_CONTACT',
'suspensionTime': '2018-12-07T08:21:14'
}
}, {
'fullUrl':
'https//foo_project.appspot.com/rdr/v1/Participant/P222/Summary',
'resource': {
'participantId': 'P222',
'suspensionStatus': 'NO_CONTACT',
'suspensionTime': '2018-12-07T08:21:14'
}
}]
}
@mock.patch('utils.participant_summary_requests.default')
@mock.patch('utils.participant_summary_requests.auth')
@mock.patch('utils.participant_summary_requests.req')
def test_get_access_token(self, mock_req, mock_auth, mock_default):
# pre conditions
scopes = [
'https://www.googleapis.com/auth/cloud-platform', 'email', 'profile'
]
creds = mock.MagicMock()
mock_default.return_value = (creds, None)
req = mock.MagicMock()
mock_req.Request.return_value = req
# test
actual_token = psr.get_access_token()
# post conditions
mock_default.assert_called_once_with()
mock_auth.delegated_credentials.assert_called_once_with(creds,
scopes=scopes)
mock_req.Request.assert_called_once_with()
# assert the credential refresh still happens
mock_auth.delegated_credentials().refresh.assert_called_once_with(req)
self.assertEqual(mock_auth.delegated_credentials().token, actual_token)
@mock.patch('utils.participant_summary_requests.requests.get')
def test_get_participant_data(self, mock_get):
mock_get.return_value.status_code = 200
mock_get.return_value.json.return_value = self.json_response_entry
expected_response = psr.get_participant_data(self.fake_url,
self.fake_headers)
self.assertEqual(expected_response, self.participant_data)
@mock.patch('utils.participant_summary_requests.store_participant_data')
@mock.patch(
'utils.participant_summary_requests.get_deactivated_participants')
def test_get_deactivated_participants(self,
mock_get_deactivated_participants,
mock_store_participant_data):
# pre conditions
mock_get_deactivated_participants.return_value = self.fake_dataframe
# tests
dataframe_response = psr.get_deactivated_participants(
self.project_id, self.dataset_id, self.tablename, self.columns)
dataset_response = psr.store_participant_data(dataframe_response,
self.project_id,
self.destination_table)
expected_response = mock_store_participant_data(dataframe_response,
self.project_id,
self.destination_table)
# post conditions
pandas.testing.assert_frame_equal(
dataframe_response,
pandas.DataFrame(self.updated_deactivated_participants,
columns=self.columns))
self.assertEqual(expected_response, dataset_response)
def test_participant_id_to_int(self):
# pre conditions
columns = ['suspensionStatus', 'participantId', 'suspensionTime']
deactivated_participants = [[
'NO_CONTACT', 'P111', '2018-12-07T08:21:14'
]]
updated_deactivated_participants = [[
'NO_CONTACT', 111, '2018-12-07T08:21:14'
]]
dataframe =
|
pandas.DataFrame(deactivated_participants, columns=columns)
|
pandas.DataFrame
|
'''''
Authors: <NAME> (@anabab1999) and <NAME> (@felipezara2013)
'''
from calendars import DayCounts
import pandas as pd
from pandas.tseries.offsets import DateOffset
from bloomberg import BBG
import numpy as np
bbg = BBG()
#Puxando os tickers para a curva zero
tickers_zero_curve = ['S0023Z 1Y BLC2 Curncy',
'S0023Z 1D BLC2 Curncy',
'S0023Z 3M BLC2 Curncy',
'S0023Z 1W BLC2 Curncy',
'S0023Z 10Y BLC2 Curncy',
'S0023Z 1M BLC2 Curncy',
'S0023Z 2Y BLC2 Curncy',
'S0023Z 6M BLC2 Curncy',
'S0023Z 2M BLC2 Curncy',
'S0023Z 5Y BLC2 Curncy',
'S0023Z 4M BLC2 Curncy',
'S0023Z 2D BLC2 Curncy',
'S0023Z 9M BLC2 Curncy',
'S0023Z 3Y BLC2 Curncy',
'S0023Z 4Y BLC2 Curncy',
'S0023Z 50Y BLC2 Curncy',
'S0023Z 12Y BLC2 Curncy',
'S0023Z 18M BLC2 Curncy',
'S0023Z 7Y BLC2 Curncy',
'S0023Z 5M BLC2 Curncy',
'S0023Z 6Y BLC2 Curncy',
'S0023Z 2W BLC2 Curncy',
'S0023Z 11M BLC2 Curncy',
'S0023Z 15M BLC2 Curncy',
'S0023Z 21M BLC2 Curncy',
'S0023Z 15Y BLC2 Curncy',
'S0023Z 25Y BLC2 Curncy',
'S0023Z 8Y BLC2 Curncy',
'S0023Z 10M BLC2 Curncy',
'S0023Z 20Y BLC2 Curncy',
'S0023Z 33M BLC2 Curncy',
'S0023Z 7M BLC2 Curncy',
'S0023Z 8M BLC2 Curncy',
'S0023Z 11Y BLC2 Curncy',
'S0023Z 14Y BLC2 Curncy',
'S0023Z 18Y BLC2 Curncy',
'S0023Z 19Y BLC2 Curncy',
'S0023Z 23D BLC2 Curncy',
'S0023Z 9Y BLC2 Curncy',
'S0023Z 17M BLC2 Curncy',
'S0023Z 1I BLC2 Curncy',
'S0023Z 22Y BLC2 Curncy',
'S0023Z 28Y BLC2 Curncy',
'S0023Z 2I BLC2 Curncy',
'S0023Z 30Y BLC2 Curncy',
'S0023Z 31Y BLC2 Curncy',
'S0023Z 32Y BLC2 Curncy',
'S0023Z 38Y BLC2 Curncy',
'S0023Z 39Y BLC2 Curncy',
'S0023Z 40Y BLC2 Curncy',
'S0023Z 42D BLC2 Curncy',
'S0023Z 48Y BLC2 Curncy']
df_bbg = bbg.fetch_series(tickers_zero_curve, "PX_LAST",
startdate = pd.to_datetime('today'),
enddate = pd.to_datetime('today'))
df_bbg = df_bbg.transpose()
df_bbg_m = bbg.fetch_contract_parameter(tickers_zero_curve, "MATURITY")
''''
The Zero curve will be used on the interpolation, to discover the rate for a specific term.
'''
# fazendo a curva zero
zero_curve = pd.concat([df_bbg, df_bbg_m], axis=1, sort= True).set_index('MATURITY').sort_index()
zero_curve = zero_curve.astype(float)
zero_curve = zero_curve.interpolate(method='linear', axis=0, limit=None, inplace=False, limit_direction='backward', limit_area=None, downcast=None)
zero_curve.index = pd.to_datetime(zero_curve.index)
#def que calcula a parte fixa do contrato de swap
''''
The function below will calculate the value of swap fixed leg
for a specific term. It calculates based on the interpolation of the Zero curve.
'''
def swap_fixed_leg_pv(today, rate, busdays, calendartype, maturity=10, periodcupons=6, notional=1000000):
global zero_curve
dc1 = DayCounts(busdays, calendar=calendartype)
today = pd.to_datetime(today)
date_range = pd.date_range(start=today, end=today + DateOffset(years=maturity), freq=DateOffset(months=periodcupons))
date_range = dc1.modified_following(date_range)
df = pd.DataFrame(data=date_range[:-1], columns=['Accrual Start'])
df['Accrual End'] = date_range[1:]
df['days'] = (df['Accrual End'] - df['Accrual Start']).dt.days
df['Notional'] = notional
df['Principal'] = 0
lastline = df.tail(1)
df.loc[lastline.index, 'Principal'] = notional
df['Payment'] = (df['days']/ 360) * rate * df['Notional']
df['Cash Flow'] = df['Payment'] + df['Principal']
df['Cumulative Days'] = df['days'].cumsum()
days = pd.DataFrame(index = df['Accrual End'])
zero_curve_discount = pd.concat([zero_curve, days], sort=True).sort_index()
zero_curve_discount = zero_curve_discount.interpolate(method='linear', axis=0, limit=None, inplace=False, limit_direction='forward',
limit_area=None, downcast=None)
zero_curve_discount = zero_curve_discount.drop(index = zero_curve.index)
zero_curve_discount = pd.DataFrame (data = zero_curve_discount.values)
df['zero_curve_discount'] = zero_curve_discount/100
df['Discount'] = 1/(1+(df['zero_curve_discount']*df['Cumulative Days']/360))
df['Present Value'] = (df['Cash Flow'] * df['Discount'])
fixed = np.sum(df['Present Value'])
return fixed
#criando uma variavel para a parte float
swap_fixed = swap_fixed_leg_pv('2019-04-04', 0.02336, 'ACT/360', 'us_trading', 5, 6, 10000000)
# puxando tickers para floating leg
tickers_floating_leg = ["USSWAP2 BGN Curncy",
"USSWAP3 BGN Curncy",
"USSWAP4 BGN Curncy",
"USSWAP5 BGN Curncy",
"USSW6 BGN Curncy",
"USSWAP7 BGN Curncy",
"USSW8 BGN Curncy",
"USSW9 BGN Curncy",
"USSWAP10 BGN Curncy",
"USSWAP11 BGN Curncy",
"USSWAP12 BGN Curncy",
"USSWAP15 BGN Curncy",
"USSWAP20 BGN Curncy",
"USSWAP25 BGN Curncy",
"USSWAP30 BGN Curncy",
"USSWAP40 BGN Curncy",
"USSWAP50 BGN Curncy"]
bbg_floating_leg = bbg.fetch_series(tickers_floating_leg, "PX_LAST",
startdate =
|
pd.to_datetime('today')
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
r"""
general helper functions
"""
# Import standard library
import os
import logging
import itertools
from pathlib import Path
from glob import glob
from operator import concat
from functools import reduce
from os.path import join, exists
from pprint import pprint
# Import from module
# from matplotlib.figure import Figure
# from matplotlib.image import AxesImage
# from loguru import logger
from uncertainties import unumpy
from tqdm import tqdm
import numpy as np
import pandas as pd
from scipy.stats import norm
from scipy.ndimage import zoom
import matplotlib.pyplot as pl
import lightkurve as lk
from astropy.visualization import hist
from astropy import units as u
from astropy import constants as c
from astropy.timeseries import LombScargle
from astropy.modeling import models, fitting
from astropy.io import ascii
from astropy.coordinates import (
SkyCoord,
Distance,
sky_coordinate,
Galactocentric,
match_coordinates_3d,
)
from skimage import measure
from astroquery.vizier import Vizier
from astroquery.mast import Catalogs, tesscut
from astroquery.gaia import Gaia
import deepdish as dd
# Import from package
from chronos import target
from chronos import cluster
from chronos import gls
from chronos.config import DATA_PATH
log = logging.getLogger(__name__)
__all__ = [
"get_nexsci_archive",
"get_tess_ccd_info",
"get_all_campaigns",
"get_all_sectors",
"get_sector_cam_ccd",
"get_tois",
"get_toi",
"get_ctois",
"get_ctoi",
"get_target_coord",
"get_epicid_from_k2name",
"get_target_coord_3d",
"get_transformed_coord",
"query_gaia_params_of_all_tois",
"get_mamajek_table",
"get_distance",
"get_excess_from_extiction",
"get_absolute_color_index",
"get_absolute_gmag",
"parse_aperture_mask",
"make_round_mask",
"make_square_mask",
"remove_bad_data",
"is_point_inside_mask",
"get_fluxes_within_mask",
"get_harps_bank",
"get_specs_table_from_tfop",
"get_rotation_period",
"get_transit_mask",
"get_mag_err_from_flux",
"get_err_quadrature",
"get_phase",
"bin_data",
"map_float",
"map_int",
"flatten_list",
"detrend",
"query_tpf",
"query_tpf_tesscut",
"is_gaiaid_in_cluster",
"get_pix_area_threshold",
"get_above_lower_limit",
"get_below_upper_limit",
"get_between_limits",
"get_RV_K",
"get_RM_K",
"get_tois_mass_RV_K",
"get_vizier_tables",
"get_mist_eep_table",
"get_tepcat",
]
# Ax/Av
extinction_ratios = {
"U": 1.531,
"B": 1.324,
"V": 1.0,
"R": 0.748,
"I": 0.482,
"J": 0.282,
"H": 0.175,
"K": 0.112,
"G": 0.85926,
"Bp": 1.06794,
"Rp": 0.65199,
}
def query_WDSC():
"""
Washington Double Star Catalog
"""
url = "http://www.astro.gsu.edu/wds/Webtextfiles/wdsnewframe.html"
df = pd.read_csv(url)
return df
def get_tepcat(catalog="all"):
"""
TEPCat
https://www.astro.keele.ac.uk/jkt/tepcat/
Choices:
all, homogenerous, planning, obliquity
"""
base_url = "https://www.astro.keele.ac.uk/jkt/tepcat/"
if catalog == "all":
full_url = base_url + "allplanets-csv.csv"
elif catalog == "homogeneous":
full_url = base_url + "homogeneous-par-csv.csv"
elif catalog == "planning":
full_url = base_url + "observables.csv"
elif catalog == "obliquity":
full_url = base_url + "obliquity.csv"
else:
raise ValueError("catalog=[all,homogeneous,planning,obliquity]")
df = pd.read_csv(full_url)
return df
def get_mist_eep_table():
"""
For eep phases, see
http://waps.cfa.harvard.edu/MIST/README_tables.pdf
"""
fp = Path(DATA_PATH, "mist_eep_table.csv")
return pd.read_csv(fp, comment="#")
def get_nexsci_archive(table="all"):
base_url = "https://exoplanetarchive.ipac.caltech.edu/"
settings = "cgi-bin/nstedAPI/nph-nstedAPI?table="
if table == "all":
url = base_url + settings + "exomultpars"
elif table == "confirmed":
url = base_url + settings + "exoplanets"
elif table == "composite":
url = base_url + settings + "compositepars"
else:
raise ValueError("table=[all, confirmed, composite]")
df = pd.read_csv(url)
return df
def get_vizier_tables(key, tab_index=None, row_limit=50, verbose=True):
"""
Parameters
----------
key : str
vizier catalog key
tab_index : int
table index to download and parse
Returns
-------
tables if tab_index is None else parsed df
"""
if row_limit == -1:
msg = f"Downloading all tables in "
else:
msg = f"Downloading the first {row_limit} rows of each table in "
msg += f"{key} from vizier."
if verbose:
print(msg)
# set row limit
Vizier.ROW_LIMIT = row_limit
tables = Vizier.get_catalogs(key)
errmsg = f"No data returned from Vizier."
assert tables is not None, errmsg
if tab_index is None:
if verbose:
print({k: tables[k]._meta["description"] for k in tables.keys()})
return tables
else:
df = tables[tab_index].to_pandas()
df = df.applymap(
lambda x: x.decode("ascii") if isinstance(x, bytes) else x
)
return df
def get_tois_mass_RV_K(clobber=False):
fp = Path(DATA_PATH, "TOIs2.csv")
if clobber:
try:
from mrexo import predict_from_measurement, generate_lookup_table
except Exception:
raise ModuleNotFoundError("pip install mrexo")
tois = get_tois()
masses = {}
for key, row in tqdm(tois.iterrows()):
toi = row["TOI"]
Rp = row["Planet Radius (R_Earth)"]
Rp_err = row["Planet Radius (R_Earth) err"]
Mp, (Mp_lo, Mp_hi), iron_planet = predict_from_measurement(
measurement=Rp,
measurement_sigma=Rp_err,
qtl=[0.16, 0.84],
dataset="kepler",
)
masses[toi] = (Mp, Mp_lo, Mp_hi)
df = pd.DataFrame(masses).T
df.columns = [
"Planet mass (Mp_Earth)",
"Planet mass (Mp_Earth) lo",
"Planet mass (Mp_Earth) hi",
]
df.index.name = "TOI"
df = df.reset_index()
df["RV_K_lo"] = get_RV_K(
tois["Period (days)"],
tois["Stellar Radius (R_Sun)"], # should be Mstar
df["Planet mass (Mp_Earth) lo"],
with_unit=True,
)
df["RV_K_hi"] = get_RV_K(
tois["Period (days)"],
tois["Stellar Radius (R_Sun)"], # should be Mstar
df["Planet mass (Mp_Earth) hi"],
with_unit=True,
)
joint = pd.merge(tois, df, on="TOI")
joint.to_csv(fp, index=False)
print(f"Saved: {fp}")
else:
joint = pd.read_csv(fp)
print(f"Loaded: {fp}")
return joint
def get_phase(time, period, epoch, offset=0.5):
"""phase offset -0.5,0.5
"""
phase = (((((time - epoch) / period) + offset) % 1) / offset) - 1
return phase
def bin_data(array, binsize, func=np.mean):
"""
"""
a_b = []
for i in range(0, array.shape[0], binsize):
a_b.append(func(array[i : i + binsize], axis=0))
return a_b
def get_tess_ccd_info(target_coord):
"""use search_targetpixelfile like get_all_sectors?"""
ccd_info = tesscut.Tesscut.get_sectors(target_coord)
errmsg = f"Target not found in any TESS sectors"
assert len(ccd_info) > 0, errmsg
return ccd_info.to_pandas()
def get_all_sectors(target_coord):
""" """
ccd_info = get_tess_ccd_info(target_coord)
all_sectors = [int(i) for i in ccd_info["sector"].values]
return np.array(all_sectors)
def get_all_campaigns(epicid):
""" """
res = lk.search_targetpixelfile(
f"K2 {epicid}", campaign=None, mission="K2"
)
errmsg = "No data found"
assert len(res) > 0, errmsg
df = res.table.to_pandas()
campaigns = df["observation"].apply(lambda x: x.split()[-1]).values
return np.array([int(c) for c in campaigns])
def get_sector_cam_ccd(target_coord, sector=None):
"""get TESS sector, camera, and ccd numbers using Tesscut
"""
df = get_tess_ccd_info(target_coord)
all_sectors = [int(i) for i in df["sector"].values]
if sector is not None:
sector_idx = df["sector"][df["sector"].isin([sector])].index.tolist()
if len(sector_idx) == 0:
raise ValueError(f"Available sector(s): {all_sectors}")
cam = str(df.iloc[sector_idx]["camera"].values[0])
ccd = str(df.iloc[sector_idx]["ccd"].values[0])
else:
sector_idx = 0
sector = str(df.iloc[sector_idx]["sector"])
cam = str(df.iloc[sector_idx]["camera"])
ccd = str(df.iloc[sector_idx]["ccd"])
return sector, cam, ccd
def is_gaiaid_in_cluster(
gaiaid, cluster_name=None, catalog_name="Bouma2019", verbose=True
):
"""
See scripts/check_target_in_cluster
"""
# reduce the redundant names above
gaiaid = int(gaiaid)
if cluster_name is None:
cc = cluster.ClusterCatalog(catalog_name=catalog_name, verbose=False)
df_mem = cc.query_catalog(return_members=True)
else:
c = cluster.Cluster(
catalog_name=catalog_name, cluster_name=cluster_name, verbose=False
)
df_mem = c.query_cluster_members()
idx = df_mem.source_id.isin([gaiaid])
if idx.sum() > 0:
if verbose:
if cluster_name is None:
cluster_match = df_mem[idx].Cluster.values[0]
else:
# TODO: what if cluster_match != cluster_name?
cluster_match = cluster_name
print(
f"Gaia DR2 {gaiaid} is IN {cluster_match} cluster based on {catalog_name} catalog!"
)
return True
else:
if verbose:
print(f"Gaia DR2 {gaiaid} is NOT in {catalog_name} catalog!")
return False
def query_tpf(
query_str,
sector=None,
campaign=None,
quality_bitmask="default",
apply_data_quality_mask=False,
mission="TESS",
verbose=True,
):
"""
"""
if verbose:
print(f"Searching targetpixelfile for {query_str} using lightkurve")
tpf = lk.search_targetpixelfile(
query_str, mission=mission, sector=sector, campaign=campaign
).download()
if apply_data_quality_mask:
tpf = remove_bad_data(tpf, sector=sector, verbose=verbose)
return tpf
def query_tpf_tesscut(
query_str,
sector=None,
quality_bitmask="default",
cutout_size=(15, 15),
apply_data_quality_mask=False,
verbose=True,
):
"""
"""
if verbose:
if isinstance(query_str, sky_coordinate.SkyCoord):
query = f"ra,dec=({query_str.to_string()})"
else:
query = query_str
print(f"Searching targetpixelfile for {query} using Tesscut")
tpf = lk.search_tesscut(query_str, sector=sector).download(
quality_bitmask=quality_bitmask, cutout_size=cutout_size
)
assert tpf is not None, "No results from Tesscut search."
# remove zeros
zero_mask = (tpf.flux_err == 0).all(axis=(1, 2))
if zero_mask.sum() > 0:
tpf = tpf[~zero_mask]
if apply_data_quality_mask:
tpf = remove_bad_data(tpf, sector=sector, verbose=verbose)
return tpf
def detrend(self, polyorder=1, break_tolerance=10):
"""mainly to be added as method to lk.LightCurve
"""
lc = self.copy()
half = lc.time.shape[0] // 2
if half % 2 == 0:
# add 1 if even
half += 1
return lc.flatten(
window_length=half,
polyorder=polyorder,
break_tolerance=break_tolerance,
)
def get_rotation_period(
time,
flux,
flux_err=None,
min_per=0.5,
max_per=None,
method="ls",
npoints=20,
plot=True,
verbose=True,
):
"""
time, flux : array
time and flux
min_period, max_period : float
minimum & maxmimum period (default=half baseline e.g. ~13 days)
method : str
ls = lomb-scargle; gls = generalized ls
npoints : int
datapoints around which to fit a Gaussian
Note:
1. Transits are assumed to be masked already
2. The period and uncertainty were determined from the mean and the
half-width at half-maximum of a Gaussian fit to the periodogram peak, respectively
See also:
https://arxiv.org/abs/1702.03885
"""
baseline = int(time[-1] - time[0])
max_per = max_per if max_per is not None else baseline / 2
if method == "ls":
if verbose:
print("Using Lomb-Scargle method")
ls = LombScargle(time, flux, dy=flux_err)
frequencies, powers = ls.autopower(
minimum_frequency=1.0 / max_per, maximum_frequency=1.0 / min_per
)
idx = np.argmax(powers)
while npoints > idx:
npoints -= 1
best_freq = frequencies[idx]
best_period = 1.0 / best_freq
# specify which points to fit a gaussian
x = (1 / frequencies)[idx - npoints : idx + npoints]
y = powers[idx - npoints : idx + npoints]
# Fit the data using a 1-D Gaussian
g_init = models.Gaussian1D(amplitude=0.5, mean=best_period, stddev=1)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
label = f"P={g.mean.value:.2f}+/-{g.stddev.value:.2f} d"
if plot:
# Plot the data with the best-fit model
pl.plot(x, y, "ko", label="_nolegend_")
pl.plot(x, g(x), label="_nolegend_")
pl.ylabel("Lomb-Scargle Power")
pl.xlabel("Period [days]")
pl.axvline(g.mean, 0, 1, ls="--", c="r", label=label)
pl.legend()
if verbose:
print(label)
return (g.mean.value, g.stddev.value)
elif method == "gls":
if verbose:
print("Using Generalized Lomb-Scargle method")
data = (time, flux, flux_err)
ls = gls.Gls(data, Pbeg=min_per, Pend=max_per, verbose=verbose)
prot, prot_err = ls.hpstat["P"], ls.hpstat["e_P"]
if plot:
_ = ls.plot(block=False, figsize=(10, 8))
return (prot, prot_err)
else:
raise ValueError("Use method=[ls | gls]")
def get_transit_mask(lc, period, epoch, duration_hours):
"""
lc : lk.LightCurve
lightcurve that contains time and flux properties
mask = []
t0 += np.ceil((time[0] - dur - t0) / period) * period
for t in np.arange(t0, time[-1] + dur, period):
mask.extend(np.where(np.abs(time - t) < dur / 2.)[0])
return np.array(mask)
"""
assert isinstance(lc, lk.LightCurve)
assert (
(period is not None)
& (epoch is not None)
& (duration_hours is not None)
)
temp_fold = lc.fold(period, t0=epoch)
fractional_duration = (duration_hours / 24.0) / period
phase_mask = np.abs(temp_fold.phase) < (fractional_duration * 1.5)
transit_mask = np.in1d(lc.time, temp_fold.time_original[phase_mask])
return transit_mask
def get_harps_bank(
target_coord, separation=30, outdir=DATA_PATH, verbose=True
):
"""
Check if target has archival HARPS data from:
http://www.mpia.de/homes/trifonov/HARPS_RVBank.html
See also https://github.com/3fon3fonov/HARPS_RVBank
For column meanings:
https://www2.mpia-hd.mpg.de/homes/trifonov/HARPS_RVBank_header.txt
"""
homeurl = "http://www.mpia.de/homes/trifonov/HARPS_RVBank.html"
fp = os.path.join(outdir, "HARPS_RVBank_table.csv")
if os.path.exists(fp):
df = pd.read_csv(fp)
msg = f"Loaded: {fp}\n"
else:
if verbose:
print(
f"Downloading HARPS bank from {homeurl}. This may take a while."
)
# csvurl = "http://www.mpia.de/homes/trifonov/HARPS_RVBank_v1.csv"
# df = pd.read_csv(csvurl)
df = pd.read_html(homeurl, header=0)[0] # choose first table
df.to_csv(fp, index=False)
msg = f"Saved: {fp}\n"
if verbose:
print(msg)
# coordinates
coords = SkyCoord(
ra=df["RA"],
dec=df["DEC"],
distance=df["Dist [pc]"],
unit=(u.hourangle, u.deg, u.pc),
)
# check which falls within `separation`
idxs = target_coord.separation(coords) < separation * u.arcsec
if idxs.sum() > 0:
# result may be multiple objects
res = df[idxs]
if verbose:
targets = res["Target"].values
print(f"There are {len(res)} matches: {targets}")
print(f"{df.loc[idxs, df.columns[7:14]].T}\n\n")
return res
else:
# find the nearest HARPS object in the database to target
# idx, sep2d, dist3d = match_coordinates_3d(
# target_coord, coords, nthneighbor=1)
idx = target_coord.separation(coords).argmin()
sep2d = target_coord.separation(coords[idx])
nearest_obj = df.iloc[idx]["Target"]
ra, dec = df.iloc[idx][["RA", "DEC"]]
print(
f"Nearest HARPS object is\n{nearest_obj}: ra,dec=({ra},{dec}) @ d={sep2d.arcsec/60:.2f} arcmin\n"
)
return None
# def get_harps_bank(url, verbose=True):
# """
# Download archival HARPS data from url
# http://www.mpia.de/homes/trifonov/HARPS_RVBank.html
# """
# homeurl = ""
# fp = os.path.join(outdir, "HARPS_RVBank_table.csv")
# return
def get_mamajek_table(clobber=False, verbose=True, data_loc=DATA_PATH):
fp = join(data_loc, f"mamajek_table.csv")
if not exists(fp) or clobber:
url = "http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt"
# cols="SpT Teff logT BCv Mv logL B-V Bt-Vt G-V U-B V-Rc V-Ic V-Ks J-H H-Ks Ks-W1 W1-W2 W1-W3 W1-W4 Msun logAge b-y M_J M_Ks Mbol i-z z-Y R_Rsun".split(' ')
df = pd.read_csv(
url,
skiprows=21,
skipfooter=524,
delim_whitespace=True,
engine="python",
)
# tab = ascii.read(url, guess=None, data_start=0, data_end=124)
# df = tab.to_pandas()
# replace ... with NaN
df = df.replace(["...", "....", "....."], np.nan)
# replace header
# df.columns = cols
# drop last duplicate column
df = df.drop(df.columns[-1], axis=1)
# df['#SpT_num'] = range(df.shape[0])
# df['#SpT'] = df['#SpT'].astype('category')
# remove the : type in M_J column
df["M_J"] = df["M_J"].apply(lambda x: str(x).split(":")[0])
# convert columns to float
for col in df.columns:
if col == "#SpT":
df[col] = df[col].astype("category")
else:
df[col] = df[col].astype(float)
# if col=='SpT':
# df[col] = df[col].astype('categorical')
# else:
# df[col] = df[col].astype(float)
df.to_csv(fp, index=False)
print(f"Saved: {fp}")
else:
df =
|
pd.read_csv(fp)
|
pandas.read_csv
|
import pandas as pd
from datetime import datetime
def get_daily_data(name, start="2010-01-01", end=None, modify=1):
"""
Get daily open/high/low/close/volume data from Daum Finance (finance.daum.net)
Parameters
----------
name : string of ticker, e.g. '000660'
start : string, e.g. '2010-01-01'
end : string, e.g. '2018-01-31'
modify : int
0: close
1: adj close
Returns
-------
ret : DataFrame
"""
start = datetime.strptime(start, "%Y-%m-%d")
if end is None:
end = datetime.now()
frames = []
page = 1
while True:
url_form = "http://finance.daum.net/item/quote_yyyymmdd_sub.daum?page=%d&code=%s&modify=%d"
url = url_form % (page, name, modify)
page += 1
dfs = pd.read_html(url, header=0)
df = dfs[0]
if df.empty:
break
# delete N/A rows
df = df.dropna()
date =
|
pd.to_datetime(df['일자'], format="%y.%m.%d")
|
pandas.to_datetime
|
"""This module provides access to the Vicon and biplane fluoroscopy filesystem-based database."""
from pathlib import Path
import itertools
import functools
import numpy as np
import pandas as pd
import quaternion
from lazy import lazy
from typing import Union, Callable, Type, Tuple
from biokinepy.cs import ht_r, change_cs, ht_inv
from ..kinematics.joint_cs import torso_cs_isb, torso_cs_v3d
from ..kinematics.segments import StaticTorsoSegment
from .db_common import TrialDescription, ViconEndpts, SubjectDescription, ViconCSTransform, trial_descriptor_df, MARKERS
from biokinepy.trajectory import PoseTrajectory
from ..misc.python_utils import NestedDescriptor
BIPLANE_FILE_HEADERS = {'frame': np.int32, 'pos_x': np.float64, 'pos_y': np.float64, 'pos_z': np.float64,
'quat_w': np.float64, 'quat_x': np.float64, 'quat_y': np.float64, 'quat_z': np.float64}
TORSO_FILE_HEADERS = {'pos_x': np.float64, 'pos_y': np.float64, 'pos_z': np.float64,
'quat_w': np.float64, 'quat_x': np.float64, 'quat_y': np.float64, 'quat_z': np.float64}
LANDMARKS_FILE_HEADERS = {'Landmark': 'string', 'X': np.float64, 'Y': np.float64, 'Z': np.float64}
TORSO_TRACKING_MARKERS = ['STRN', 'C7', 'T5', 'T10', 'CLAV']
def csv_get_item_method(csv_data: pd.DataFrame, marker_name: str) -> np.ndarray:
"""Return the marker data, (n, 3) numpy array view, associated with marker_name."""
return csv_data.loc[:, marker_name:(marker_name + '.2')].to_numpy()
def landmark_get_item_method(csv_data: pd.DataFrame, landmark_name: str) -> np.ndarray:
"""Return the landmark data, (3,) numpy array view, associated with landmark_name."""
return csv_data.loc[landmark_name, 'X':'Z'].to_numpy()
def csv_get_item_method_squeeze(csv_data: pd.DataFrame, marker_name: str) -> np.ndarray:
"""Return the marker data, (n, 3) numpy array view, associated with marker_name."""
return np.squeeze(csv_get_item_method(csv_data, marker_name))
def insert_nans(func: Callable) -> Callable:
"""Return a new dataframe derived from the original dataframe with appended columns filled with NaNs for missing
markers."""
@functools.wraps(func)
def wrapper(self) -> pd.DataFrame:
orig_data = func(self)
if not self.nan_missing_markers:
return orig_data
new_columns = [marker for marker in MARKERS if marker not in orig_data.columns]
new_columns1 = [col + '.1' for col in new_columns]
new_columns2 = [col + '.2' for col in new_columns]
raw_data = orig_data.to_numpy()
data_with_nan = np.concatenate((raw_data, np.full((orig_data.shape[0], len(new_columns) * 3), np.nan)), 1)
all_columns = itertools.chain(orig_data.columns,
itertools.chain.from_iterable(zip(new_columns, new_columns1, new_columns2)))
return pd.DataFrame(data=data_with_nan, columns=all_columns, dtype=np.float64)
return wrapper
class ViconCsvTrial(TrialDescription, ViconEndpts):
"""A Vicon trial that has been exported to CSV format.
Enables lazy (and cached) access to the labeled and filled Vicon Data.
Attributes
----------
trial_dir_path: pathlib.Path or str
Path to the directory where the Vicon CSV trial data resides.
vicon_csv_file_labeled: pathlib.Path
Path to the labeled marker data for the Vicon CSV trial.
vicon_csv_file_filled: pathlib.Path
Path to the filled marker data for the Vicon CSV trial.
nan_missing_markers: bool
Specifies whether to insert NaNs in the dataset for missing markers
"""
def __init__(self, trial_dir: Union[str, Path], nan_missing_markers: bool = False, **kwargs):
self.nan_missing_markers = nan_missing_markers
self.trial_dir_path = trial_dir if isinstance(trial_dir, Path) else Path(trial_dir)
super().__init__(trial_dir_path=self.trial_dir_path,
endpts_file=lambda: self.trial_dir_path / (self.trial_name + '_vicon_endpts.csv'), **kwargs)
# file paths
self.vicon_csv_file_labeled = self.trial_dir_path / (self.trial_name + '_vicon_labeled.csv')
self.vicon_csv_file_filled = self.trial_dir_path / (self.trial_name + '_vicon_filled.csv')
# make sure the files are actually there
assert (self.vicon_csv_file_labeled.is_file())
assert (self.vicon_csv_file_filled.is_file())
@lazy
@insert_nans
def vicon_csv_data_labeled(self) -> pd.DataFrame:
"""Pandas dataframe with the labeled Vicon CSV data."""
# TODO: this works fine for now and by using the accessor method below we get a view (rather than a copy) of the
# data, however it probably makes sense to using something like structured arrays or xarray. Note that
# multi-level column labels should not be used (i.e. header=[0, 1) because a copy of the data, not a view is
# returned
return pd.read_csv(self.vicon_csv_file_labeled, header=[0], skiprows=[1], dtype=np.float64)
@lazy
@insert_nans
def vicon_csv_data_filled(self) -> pd.DataFrame:
"""Pandas dataframe with the filled Vicon CSV data."""
return pd.read_csv(self.vicon_csv_file_filled, header=[0], skiprows=[1], dtype=np.float64)
@lazy
def labeled(self) -> NestedDescriptor:
"""Descriptor that allows marker indexed ([marker_name]) access to labeled CSV data. The indexed access returns
a (n, 3) numpy array view."""
return NestedDescriptor(self.vicon_csv_data_labeled, csv_get_item_method)
@lazy
def filled(self) -> NestedDescriptor:
"""Descriptor that allows marker indexed ([marker_name]) access to filled CSV data. The indexed access return
a (n, 3) numpy array view."""
return NestedDescriptor(self.vicon_csv_data_filled, csv_get_item_method)
class ViconCsvSubject(SubjectDescription):
"""A subject that contains multiple Vicon CSV trials.
Attributes
----------
subject_dir_path: pathlib.Path
Path to directory containing subject data.
trials: list of biplane_kine.database.biplane_vicon_db.ViconCsvTrial
List of trials for the subject.
"""
def __init__(self, subj_dir: Union[str, Path], **kwargs):
self.subject_dir_path = subj_dir if isinstance(subj_dir, Path) else Path(subj_dir)
super().__init__(subject_dir_path=self.subject_dir_path, **kwargs)
self.trials = [ViconCsvTrial(folder) for folder in self.subject_dir_path.iterdir() if (folder.is_dir() and
folder.stem != 'Static')]
@lazy
def subject_df(self) -> pd.DataFrame:
"""A Pandas dataframe summarizing the Vicon CSV trials belonging to the subject."""
df = trial_descriptor_df(self.subject_name, self.trials)
df['Trial'] = pd.Series(self.trials, dtype=object)
return df
class BiplaneViconTrial(ViconCsvTrial):
"""A trial that contains both biplane and Vicon data.
Attributes
----------
vicon_csv_file_smoothed: pathlib.Path
Path to the smoothed marker data for the Vicon CSV trial.
humerus_biplane_file: pathlib.Path
File path to the raw kinematic trajectory for the humerus as derived from biplane fluoroscopy
scapula_biplane_file: pathlib.Path
File path to the raw kinematic trajectory for the scapula as derived from biplane fluoroscopy
humerus_biplane_file_avg_smooth: pathlib.Path
File path to the smoothed kinematic trajectory for the humerus as derived from biplane fluoroscopy
scapula_biplane_file_avg_smooth: pathlib.Path
File path to the smoothed kinematic trajectory for the scapula as derived from biplane fluoroscopy
torso_vicon_file: pathlib.Path
File path to the kinematic trajectory for the torso (ISB definition) as derived from skin markers
torso_vicon_file_v3d: pathlib.Path
File path to the kinematic trajectory for the torso (V3D definition) as derived from skin markers
subject: biplane_kine.database.vicon_accuracy.BiplaneViconSubject
Pointer to the subject that contains this trial.
"""
def __init__(self, trial_dir: Union[str, Path], subject: 'BiplaneViconSubject', nan_missing_markers: bool = True,
**kwargs):
super().__init__(trial_dir, nan_missing_markers, **kwargs)
self.subject = subject
# file paths
self.vicon_csv_file_smoothed = self.trial_dir_path / (self.trial_name + '_vicon_smoothed.csv')
self.humerus_biplane_file = self.trial_dir_path / (self.trial_name + '_humerus_biplane.csv')
self.humerus_biplane_file_avg_smooth = self.trial_dir_path / (self.trial_name +
'_humerus_biplane_avgSmooth.csv')
self.scapula_biplane_file = self.trial_dir_path / (self.trial_name + '_scapula_biplane.csv')
self.scapula_biplane_file_avg_smooth = self.trial_dir_path / (self.trial_name +
'_scapula_biplane_avgSmooth.csv')
self.torso_vicon_file = self.trial_dir_path / (self.trial_name + '_torso.csv')
self.torso_vicon_file_v3d = self.trial_dir_path / (self.trial_name + '_torso_v3d.csv')
# make sure the files are actually there
assert (self.vicon_csv_file_smoothed.is_file())
assert (self.humerus_biplane_file.is_file())
assert (self.scapula_biplane_file.is_file())
assert (self.humerus_biplane_file_avg_smooth.is_file())
assert (self.scapula_biplane_file_avg_smooth.is_file())
assert (self.torso_vicon_file.is_file())
assert (self.torso_vicon_file_v3d.is_file())
@lazy
@insert_nans
def vicon_csv_data_smoothed(self) -> pd.DataFrame:
"""Pandas dataframe with the smoothed Vicon CSV data."""
return pd.read_csv(self.vicon_csv_file_smoothed, header=[0], skiprows=[1], dtype=np.float64)
@lazy
def smoothed(self) -> NestedDescriptor:
"""Descriptor that allows marker indexed ([marker_name]) access to smoothed CSV data. The indexed access returns
a (n, 3) numpy array view."""
return NestedDescriptor(self.vicon_csv_data_smoothed, csv_get_item_method)
@lazy
def humerus_biplane_data(self) -> pd.DataFrame:
"""Humerus raw biplane data."""
return
|
pd.read_csv(self.humerus_biplane_file, header=0, dtype=BIPLANE_FILE_HEADERS, index_col='frame')
|
pandas.read_csv
|
#!/usr/bin/python3
# Module with dataframe operations.
# -
# append to a dataframe a.append(pd.DataFrame({'close':99.99},index=[datetime.datetime.now()])
import pandas as pd
from scipy import signal
import numpy
from numpy import NaN
import matplotlib.pyplot as plt
import datetime
from scipy.stats import linregress
# Creates DataFrame line
def CreateHorizontalLine(indexes, startValue, endValue, allIndexes=False):
data = pd.DataFrame()
# Only start and begin
if (allIndexes == False):
data = data.append(pd.DataFrame(
{'value': startValue}, index=[indexes[0]]))
data = data.append(pd.DataFrame(
{'value': endValue}, index=[indexes[-1]]))
# All data
else:
N = len(indexes)
alpha = (endValue - startValue) / N
for i in range(len(indexes)):
data = data.append(pd.DataFrame(
{'value': alpha * i + startValue},
index=[indexes[i]]))
return data
# Creates DataFrame line
def CreateVerticalLine(index, startValue, endValue):
data = pd.DataFrame()
data = data.append(pd.DataFrame({'value': startValue}, index=[index]))
data = data.append(pd.DataFrame({'value': endValue}, index=[index]))
return data
# Creates DataFrame rect
def CreateRect(index1, value1, index2, value2):
data = pd.DataFrame()
data = data.append(
|
pd.DataFrame({'value': value1}, index=[index1])
|
pandas.DataFrame
|
import pandas as pd
data = pd.read_csv("../data/features/brand_3_12_market_features.csv")
print(data.shape)
def grouped(data, col: str = "", shifter: int = 1):
aux = data.groupby(["month", "region"])[col].sum().shift(shifter).reset_index()
title = col + "shift" + str(shifter)
aux.columns = ["month", "region", title]
return pd.merge(data, aux, on=["month", "region"])
for col in ["sales_brand_3", "sales_brand_3_market", "sales_brand_12_market"]:
for i in range(-12, 12):
data = grouped(data, col=col, shifter=i)
print(data.shape)
reg3 = data.groupby("region")["sales_brand_3_market"].sum().reset_index()
reg12 = data.groupby("region")["sales_brand_12_market"].sum().reset_index()
reg3.columns = ["region", "sales_brand_3_market_per_region"]
reg12.columns = ["region", "sales_brand_12_market_per_region"]
data = pd.merge(data, reg3, on="region")
data =
|
pd.merge(data, reg12, on="region")
|
pandas.merge
|
import numpy as np
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from tqdm import tqdm
import pandas as pd
def init_driver(player_url=None):
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.headless = True # Avoid google chrome GUI
driver = webdriver.Chrome(options=options)
chrome_prefs = {} # Avoid loading images to speed up scraping
options.experimental_options["prefs"] = chrome_prefs
chrome_prefs["profile.default_content_settings"] = {"images": 2}
chrome_prefs["profile.managed_default_content_settings"] = {"images": 2}
if player_url: # In case we init just a player url
driver.get(player_url)
else: # In case we are scraping the urls
driver.get("https://www.trackingthepros.com/players/")
time.sleep(1)
return driver
def scrape_one_page(driver):
one_page_players_url = []
element = driver.find_elements(By.XPATH, "//tbody//tr")
for x in element:
td = x.find_elements(By.XPATH, "td")
a = td[0].text
player_name = a[6:]
player_link = f"https://www.trackingthepros.com/player/{player_name}/"
one_page_players_url.append(player_link)
# print("\n SCRAPED : ", len(one_page_players_url), "PRO PAGES")
return one_page_players_url
def scrape_all_pages():
print("SCRAPING ALL PLAYER URLS ON EACH PAGES ...")
all_pages_players_url = []
driver = init_driver() # Init our driver
pagination = driver.find_element(By.CLASS_NAME, "pagination") # Gets the first occurence of .pagination
last_page = int(pagination.text[-7:-4]) # Gets the last pages (the one before "next")
# last_page = 2 # For debug purpose
all_pages_players_url.append(scrape_one_page(driver)) # Scrapes first page
for page_number in tqdm(range(2, last_page+1)):
# print("SCRAPING PAGE NUMBER :", page_number)
pagination = driver.find_element(By.CLASS_NAME, "pagination") # Refresh our pagination class
lis = pagination.find_elements(By.TAG_NAME, "li") # Regresh our lists tag
for x in lis:
try: # Avoid debugging because the html of trackingthepros is kinda clunky
links = x.find_elements(By.TAG_NAME, "a")
for link in links:
case_found = int(link.get_attribute("data-dt-idx")) # Gets the index of the case
if case_found > 5: case_found = 5 # The index is the same than the page until page 5, where the next case index will always be 5
if case_found == page_number or case_found == 5: # Check that we are going to click and scrape the right page
# time.sleep(1)
# print("CLICKED ON PAGE :", page_number)
link.click() # Click to next page
# time.sleep(1)
all_pages_players_url.append(scrape_one_page(driver)) # Scrape the page
except:
pass
driver.close() # Close the page since now we are going to go through all the player pages to get their informations
return np.array(all_pages_players_url).flatten() # Flatten because we don't care about the pages number
def scrape_one_player_infos(player_driver):
player_infos = []
url_str = str(player_driver.current_url)
name = url_str.split("/")[-2]
if name == "player": # Sometimes there is a player in table but url doesn't work
return [] # So we return empty array
player_infos.append(name)
trs = player_driver.find_elements(By.XPATH, "//tbody//tr")
country = "MISSING"
age = "MISSING"
server = "MISSING"
role = "MISSING"
residency = "MISSING"
summoner_names = []
for tr in trs:
tds = tr.find_elements(By.XPATH, "td")
for i, td in enumerate(tds):
if td.text == "Birthplace":
country = tds[i+1].text
elif td.text == "Birthday":
age_string = tds[i+1].text
age = int(age_string[age_string.find('(')+1:age_string.find(')')])
elif "[" in td.text:
string = tds[i].text
server = string[string.find('[')+1:string.find(']')] # Server is between [ ]
summoner_name = string.replace(server, "").replace("]", "").replace("[", "").lstrip()
summoner_names.append(summoner_name)
elif td.text == "Role":
role = tds[i+1].text.lstrip()
elif td.text == "Residency":
residency = tds[i+1].text.lstrip()
player_infos.append(role)
player_infos.append(age)
player_infos.append(country)
player_infos.append(residency)
player_infos.append(server)
player_infos.append(summoner_names)
return player_infos
def scrape_players_infos(all_players_pages):
print("SCRAPING EACH PLAYER INFOS ON THEIR PERSONAL PAGES ...")
all_data = []
for player_url in tqdm(all_players_pages):
player_driver = init_driver(player_url)
all_data.append(scrape_one_player_infos(player_driver))
player_driver.close()
return all_data
def save_data(data):
print("SAVING DATA ...")
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# Write collated data to new file
import pandas as pd
class Write:
# Write the collated and formated data to a new file
def To_File(self, data, dir):
print(f'Writing Processed Data to File in directory "{dir}" ....')
# Create separate DataFrames for each sheet in the Migration Template
df_member = pd.DataFrame.from_dict(data['Member'])
df_membership = pd.DataFrame.from_dict(data['Membership'])
df_membership_category = pd.DataFrame.from_dict(data['Membership Category'])
df_teams = pd.DataFrame.from_dict(data['Teams'])
df_training_fee = pd.DataFrame.from_dict(data['Training fee'])
df_training_locations = pd.DataFrame.from_dict(data['Training locations'])
df_department = pd.DataFrame.from_dict(data['Department info'])
df_club = pd.DataFrame.from_dict(data['Club info'])
df_committees =
|
pd.DataFrame.from_dict(data['Committees'])
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
# Arithmetc tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import timedelta
import operator
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import long
from pandas.core import ops
from pandas.errors import NullFrequencyError, PerformanceWarning
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
timedelta_range,
Timedelta, Timestamp, NaT, Series, TimedeltaIndex, DatetimeIndex)
# ------------------------------------------------------------------
# Fixtures
@pytest.fixture
def tdser():
"""
Return a Series with dtype='timedelta64[ns]', including a NaT.
"""
return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]')
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=lambda x: type(x).__name__)
def delta(request):
"""
Several ways of representing two hours
"""
return request.param
@pytest.fixture(params=[timedelta(minutes=5, seconds=4),
Timedelta('5m4s'),
Timedelta('5m4s').to_timedelta64()],
ids=lambda x: type(x).__name__)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(params=[pd.Index, Series, pd.DataFrame],
ids=lambda x: x.__name__)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(strict=True))],
ids=lambda x: x.__name__)
def box_df_fail(request):
"""
Fixture equivalent to `box` fixture but xfailing the DataFrame case.
"""
return request.param
# ------------------------------------------------------------------
# Numeric dtypes Arithmetic with Timedelta Scalar
class TestNumericArraylikeArithmeticWithTimedeltaScalar(object):
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="block.eval incorrect",
strict=True))
])
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_mul_tdscalar(self, scalar_td, index, box):
# GH#19333
if (box is Series and
type(scalar_td) is timedelta and index.dtype == 'f8'):
raise pytest.xfail(reason="Cannot multiply timedelta by float")
expected = timedelta_range('1 days', '10 days')
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = index * scalar_td
tm.assert_equal(result, expected)
commute = scalar_td * index
tm.assert_equal(commute, expected)
@pytest.mark.parametrize('index', [
pd.Int64Index(range(1, 3)),
pd.UInt64Index(range(1, 3)),
pd.Float64Index(range(1, 3)),
pd.RangeIndex(1, 3)],
ids=lambda x: type(x).__name__)
@pytest.mark.parametrize('scalar_td', [
Timedelta(days=1),
Timedelta(days=1).to_timedelta64(),
Timedelta(days=1).to_pytimedelta()],
ids=lambda x: type(x).__name__)
def test_numeric_arr_rdiv_tdscalar(self, scalar_td, index, box):
if box is Series and type(scalar_td) is timedelta:
raise pytest.xfail(reason="TODO: Figure out why this case fails")
if box is pd.DataFrame and isinstance(scalar_td, timedelta):
raise pytest.xfail(reason="TODO: Figure out why this case fails")
expected = TimedeltaIndex(['1 Day', '12 Hours'])
index = tm.box_expected(index, box)
expected = tm.box_expected(expected, box)
result = scalar_td / index
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
index / scalar_td
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedeltaArraylikeAddSubOps(object):
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize('op', [operator.add, ops.radd,
operator.sub, ops.rsub],
ids=lambda x: x.__name__)
def test_td64arr_add_sub_float(self, box, op, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdi = tm.box_expected(tdi, box)
if box is pd.DataFrame and op in [operator.add, operator.sub]:
pytest.xfail(reason="Tries to align incorrectly, "
"raises ValueError")
with pytest.raises(TypeError):
op(tdi, other)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to cast df to "
"Period",
strict=True,
raises=IncompatibleFrequency))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="broadcasts along "
"wrong axis",
raises=ValueError,
strict=True))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box)
msg = "cannot subtract a datelike from|Could not operate"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
result = idx + Timestamp('2011-01-01')
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype",
strict=True))
], ids=lambda x: x.__name__)
def test_td64_radd_timestamp(self, box):
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, box)
# TODO: parametrize over scalar datetime types?
result = Timestamp('2011-01-01') + idx
tm.assert_equal(result, expected)
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Returns object dtype "
"instead of "
"datetime64[ns]",
strict=True))
], ids=lambda x: x.__name__)
def test_td64arr_add_sub_timestamp(self, box):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdser = Series(timedelta_range('1 day', periods=3))
expected = Series(pd.date_range('2012-01-02', periods=3))
tdser = tm.box_expected(tdser, box)
expected = tm.box_expected(expected, box)
tm.assert_equal(ts + tdser, expected)
tm.assert_equal(tdser + ts, expected)
expected2 = Series(pd.date_range('2011-12-31',
periods=3, freq='-1D'))
expected2 = tm.box_expected(expected2, box)
tm.assert_equal(ts - tdser, expected2)
tm.assert_equal(ts + (-tdser), expected2)
with pytest.raises(TypeError):
tdser - ts
def test_tdi_sub_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# Operations with int-like others
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser + Series([2, 3, 4])
@pytest.mark.parametrize('box', [
pd.Index,
pytest.param(Series,
marks=pytest.mark.xfail(reason="GH#19123 integer "
"interpreted as "
"nanoseconds",
strict=True)),
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_radd_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
Series([2, 3, 4]) + tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_sub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
with pytest.raises(err):
tdser - Series([2, 3, 4])
@pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds',
strict=True)
def test_td64arr_rsub_int_series_invalid(self, box, tdser):
tdser = tm.box_expected(tdser, box)
with pytest.raises(TypeError):
Series([2, 3, 4]) - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Attempts to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
def test_td64arr_add_intlike(self, box):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box)
err = TypeError if box is not pd.Index else NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser):
if box is pd.DataFrame and isinstance(scalar, np.ndarray):
# raises ValueError
pytest.xfail(reason="DataFrame to broadcast incorrectly")
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('box', [
pd.Index,
Series,
pytest.param(pd.DataFrame,
marks=pytest.mark.xfail(reason="Tries to broadcast "
"incorrectly",
strict=True, raises=ValueError))
], ids=lambda x: x.__name__)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser):
if type(vec) is Series and not dtype.startswith('float'):
pytest.xfail(reason='GH#19123 integer interpreted as nanos')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
# TODO: parametrize over these four ops?
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
def test_td64arr_add_td64_array(self, box_df_fail):
box = box_df_fail # DataFrame tries to broadcast incorrectly
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
|
tm.assert_equal(result, expected)
|
pandas.util.testing.assert_equal
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import collections
from datetime import (
datetime,
timedelta,
)
import logging
import operator
import unittest
from nose_parameterized import parameterized
import nose.tools as nt
import pytz
import itertools
import pandas as pd
import numpy as np
from six.moves import range, zip
import zipline.utils.factory as factory
import zipline.finance.performance as perf
from zipline.finance.performance import position_tracker
from zipline.finance.slippage import Transaction, create_transaction
import zipline.utils.math_utils as zp_math
from zipline.gens.composites import date_sorted_sources
from zipline.finance.trading import SimulationParameters
from zipline.finance.blotter import Order
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.trading import TradingEnvironment
from zipline.utils.factory import create_simulation_parameters
from zipline.utils.serialization_utils import (
loads_with_persistent_ids, dumps_with_persistent_ids
)
import zipline.protocol as zp
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources.data_frame_source import DataPanelSource
logger = logging.getLogger('Test Perf Tracking')
onesec = timedelta(seconds=1)
oneday = timedelta(days=1)
tradingday = timedelta(hours=6, minutes=30)
# nose.tools changed name in python 3
if not hasattr(nt, 'assert_count_equal'):
nt.assert_count_equal = nt.assert_items_equal
def check_perf_period(pp,
gross_leverage,
net_leverage,
long_exposure,
longs_count,
short_exposure,
shorts_count):
perf_data = pp.to_dict()
np.testing.assert_allclose(
gross_leverage, perf_data['gross_leverage'], rtol=1e-3)
np.testing.assert_allclose(
net_leverage, perf_data['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(
long_exposure, perf_data['long_exposure'], rtol=1e-3)
np.testing.assert_allclose(
longs_count, perf_data['longs_count'], rtol=1e-3)
np.testing.assert_allclose(
short_exposure, perf_data['short_exposure'], rtol=1e-3)
np.testing.assert_allclose(
shorts_count, perf_data['shorts_count'], rtol=1e-3)
def check_account(account,
settled_cash,
equity_with_loan,
total_positions_value,
regt_equity,
available_funds,
excess_liquidity,
cushion,
leverage,
net_leverage,
net_liquidation):
# this is a long only portfolio that is only partially invested
# so net and gross leverage are equal.
np.testing.assert_allclose(settled_cash,
account['settled_cash'], rtol=1e-3)
np.testing.assert_allclose(equity_with_loan,
account['equity_with_loan'], rtol=1e-3)
np.testing.assert_allclose(total_positions_value,
account['total_positions_value'], rtol=1e-3)
np.testing.assert_allclose(regt_equity,
account['regt_equity'], rtol=1e-3)
np.testing.assert_allclose(available_funds,
account['available_funds'], rtol=1e-3)
np.testing.assert_allclose(excess_liquidity,
account['excess_liquidity'], rtol=1e-3)
np.testing.assert_allclose(cushion,
account['cushion'], rtol=1e-3)
np.testing.assert_allclose(leverage, account['leverage'], rtol=1e-3)
np.testing.assert_allclose(net_leverage,
account['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(net_liquidation,
account['net_liquidation'], rtol=1e-3)
def create_txn(trade_event, price, amount):
"""
Create a fake transaction to be filled and processed prior to the execution
of a given trade event.
"""
mock_order = Order(trade_event.dt, trade_event.sid, amount, id=None)
return create_transaction(trade_event, mock_order, price, amount)
def benchmark_events_in_range(sim_params, env):
return [
Event({'dt': dt,
'returns': ret,
'type': zp.DATASOURCE_TYPE.BENCHMARK,
# We explicitly rely on the behavior that benchmarks sort before
# any other events.
'source_id': '1Abenchmarks'})
for dt, ret in env.benchmark_returns.iteritems()
if dt.date() >= sim_params.period_start.date() and
dt.date() <= sim_params.period_end.date()
]
def calculate_results(sim_params,
env,
benchmark_events,
trade_events,
dividend_events=None,
splits=None,
txns=None):
"""
Run the given events through a stripped down version of the loop in
AlgorithmSimulator.transform.
IMPORTANT NOTE FOR TEST WRITERS/READERS:
This loop has some wonky logic for the order of event processing for
datasource types. This exists mostly to accomodate legacy tests accomodate
existing tests that were making assumptions about how events would be
sorted.
In particular:
- Dividends passed for a given date are processed PRIOR to any events
for that date.
- Splits passed for a given date are process AFTER any events for that
date.
Tests that use this helper should not be considered useful guarantees of
the behavior of AlgorithmSimulator on a stream containing the same events
unless the subgroups have been explicitly re-sorted in this way.
"""
txns = txns or []
splits = splits or []
perf_tracker = perf.PerformanceTracker(sim_params, env)
if dividend_events is not None:
dividend_frame = pd.DataFrame(
[
event.to_series(index=zp.DIVIDEND_FIELDS)
for event in dividend_events
],
)
perf_tracker.update_dividends(dividend_frame)
# Raw trades
trade_events = sorted(trade_events, key=lambda ev: (ev.dt, ev.source_id))
# Add a benchmark event for each date.
trades_plus_bm = date_sorted_sources(trade_events, benchmark_events)
# Filter out benchmark events that are later than the last trade date.
filtered_trades_plus_bm = (filt_event for filt_event in trades_plus_bm
if filt_event.dt <= trade_events[-1].dt)
grouped_trades_plus_bm = itertools.groupby(filtered_trades_plus_bm,
lambda x: x.dt)
results = []
bm_updated = False
for date, group in grouped_trades_plus_bm:
for txn in filter(lambda txn: txn.dt == date, txns):
# Process txns for this date.
perf_tracker.process_transaction(txn)
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
perf_tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.DIVIDEND:
perf_tracker.process_dividend(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
perf_tracker.process_benchmark(event)
bm_updated = True
elif event.type == zp.DATASOURCE_TYPE.COMMISSION:
perf_tracker.process_commission(event)
for split in filter(lambda split: split.dt == date, splits):
# Process splits for this date.
perf_tracker.process_split(split)
if bm_updated:
msg = perf_tracker.handle_market_close_daily()
msg['account'] = perf_tracker.get_account(True)
results.append(msg)
bm_updated = False
return results
def check_perf_tracker_serialization(perf_tracker):
scalar_keys = [
'emission_rate',
'txn_count',
'market_open',
'last_close',
'_dividend_count',
'period_start',
'day_count',
'capital_base',
'market_close',
'saved_dt',
'period_end',
'total_days',
]
p_string = dumps_with_persistent_ids(perf_tracker)
test = loads_with_persistent_ids(p_string, env=perf_tracker.env)
for k in scalar_keys:
nt.assert_equal(getattr(test, k), getattr(perf_tracker, k), k)
for period in test.perf_periods:
nt.assert_true(hasattr(period, '_position_tracker'))
class TestSplitPerformance(unittest.TestCase):
def setUp(self):
self.env = TradingEnvironment()
self.env.write_data(equities_identifiers=[1])
self.sim_params = create_simulation_parameters(num_days=2)
# start with $10,000
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
def test_split_long_position(self):
events = factory.create_trade_history(
1,
[20, 20],
[100, 100],
oneday,
self.sim_params,
env=self.env
)
# set up a long position in sid 1
# 100 shares at $20 apiece = $2000 position
txns = [create_txn(events[0], 20, 100)]
# set up a split with ratio 3 occurring at the start of the second
# day.
splits = [
factory.create_split(
1,
3,
events[1].dt,
),
]
results = calculate_results(self.sim_params, self.env,
self.benchmark_events,
events, txns=txns, splits=splits)
# should have 33 shares (at $60 apiece) and $20 in cash
self.assertEqual(2, len(results))
latest_positions = results[1]['daily_perf']['positions']
self.assertEqual(1, len(latest_positions))
# check the last position to make sure it's been updated
position = latest_positions[0]
self.assertEqual(1, position['sid'])
self.assertEqual(33, position['amount'])
self.assertEqual(60, position['cost_basis'])
self.assertEqual(60, position['last_sale_price'])
# since we started with $10000, and we spent $2000 on the
# position, but then got $20 back, we should have $8020
# (or close to it) in cash.
# we won't get exactly 8020 because sometimes a split is
# denoted as a ratio like 0.3333, and we lose some digits
# of precision. thus, make sure we're pretty close.
daily_perf = results[1]['daily_perf']
self.assertTrue(
zp_math.tolerant_equals(8020,
daily_perf['ending_cash'], 1))
# Validate that the account attributes were updated.
account = results[1]['account']
self.assertEqual(float('inf'), account['day_trades_remaining'])
# this is a long only portfolio that is only partially invested
# so net and gross leverage are equal.
np.testing.assert_allclose(0.198, account['leverage'], rtol=1e-3)
np.testing.assert_allclose(0.198, account['net_leverage'], rtol=1e-3)
np.testing.assert_allclose(8020, account['regt_equity'], rtol=1e-3)
self.assertEqual(float('inf'), account['regt_margin'])
np.testing.assert_allclose(8020, account['available_funds'], rtol=1e-3)
self.assertEqual(0, account['maintenance_margin_requirement'])
np.testing.assert_allclose(10000,
account['equity_with_loan'], rtol=1e-3)
self.assertEqual(float('inf'), account['buying_power'])
self.assertEqual(0, account['initial_margin_requirement'])
np.testing.assert_allclose(8020, account['excess_liquidity'],
rtol=1e-3)
np.testing.assert_allclose(8020, account['settled_cash'], rtol=1e-3)
np.testing.assert_allclose(10000, account['net_liquidation'],
rtol=1e-3)
np.testing.assert_allclose(0.802, account['cushion'], rtol=1e-3)
np.testing.assert_allclose(1980, account['total_positions_value'],
rtol=1e-3)
self.assertEqual(0, account['accrued_interest'])
for i, result in enumerate(results):
for perf_kind in ('daily_perf', 'cumulative_perf'):
perf_result = result[perf_kind]
# prices aren't changing, so pnl and returns should be 0.0
self.assertEqual(0.0, perf_result['pnl'],
"day %s %s pnl %s instead of 0.0" %
(i, perf_kind, perf_result['pnl']))
self.assertEqual(0.0, perf_result['returns'],
"day %s %s returns %s instead of 0.0" %
(i, perf_kind, perf_result['returns']))
class TestCommissionEvents(unittest.TestCase):
def setUp(self):
self.env = TradingEnvironment()
self.env.write_data(
equities_identifiers=[0, 1, 133]
)
self.sim_params = create_simulation_parameters(num_days=5)
logger.info("sim_params: %s" % self.sim_params)
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
def test_commission_event(self):
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
# Test commission models and validate result
# Expected commission amounts:
# PerShare commission: 1.00, 1.00, 1.50 = $3.50
# PerTrade commission: 5.00, 5.00, 5.00 = $15.00
# PerDollar commission: 1.50, 3.00, 4.50 = $9.00
# Total commission = $3.50 + $15.00 + $9.00 = $27.50
# Create 3 transactions: 50, 100, 150 shares traded @ $20
transactions = [create_txn(events[0], 20, i)
for i in [50, 100, 150]]
# Create commission models and validate that produce expected
# commissions.
models = [PerShare(cost=0.01, min_trade_cost=1.00),
PerTrade(cost=5.00),
PerDollar(cost=0.0015)]
expected_results = [3.50, 15.0, 9.0]
for model, expected in zip(models, expected_results):
total_commission = 0
for trade in transactions:
total_commission += model.calculate(trade)[1]
self.assertEqual(total_commission, expected)
# Verify that commission events are handled correctly by
# PerformanceTracker.
cash_adj_dt = events[0].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
# Insert a purchase order.
txns = [create_txn(events[0], 20, 1)]
results = calculate_results(self.sim_params,
self.env,
self.benchmark_events,
events,
txns=txns)
# Validate that we lost 320 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9680)
# Validate that the cost basis of our position changed.
self.assertEqual(results[-1]['daily_perf']['positions']
[0]['cost_basis'], 320.0)
# Validate that the account attributes were updated.
account = results[1]['account']
self.assertEqual(float('inf'), account['day_trades_remaining'])
np.testing.assert_allclose(0.001, account['leverage'], rtol=1e-3,
atol=1e-4)
np.testing.assert_allclose(9680, account['regt_equity'], rtol=1e-3)
self.assertEqual(float('inf'), account['regt_margin'])
np.testing.assert_allclose(9680, account['available_funds'],
rtol=1e-3)
self.assertEqual(0, account['maintenance_margin_requirement'])
np.testing.assert_allclose(9690,
account['equity_with_loan'], rtol=1e-3)
self.assertEqual(float('inf'), account['buying_power'])
self.assertEqual(0, account['initial_margin_requirement'])
np.testing.assert_allclose(9680, account['excess_liquidity'],
rtol=1e-3)
np.testing.assert_allclose(9680, account['settled_cash'],
rtol=1e-3)
np.testing.assert_allclose(9690, account['net_liquidation'],
rtol=1e-3)
np.testing.assert_allclose(0.999, account['cushion'], rtol=1e-3)
np.testing.assert_allclose(10, account['total_positions_value'],
rtol=1e-3)
self.assertEqual(0, account['accrued_interest'])
def test_commission_zero_position(self):
"""
Ensure no div-by-zero errors.
"""
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
# Buy and sell the same sid so that we have a zero position by the
# time of events[3].
txns = [
create_txn(events[0], 20, 1),
create_txn(events[1], 20, -1),
]
# Add a cash adjustment at the time of event[3].
cash_adj_dt = events[3].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
results = calculate_results(self.sim_params,
self.env,
self.benchmark_events,
events,
txns=txns)
# Validate that we lost 300 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9700)
def test_commission_no_position(self):
"""
Ensure no position-not-found or sid-not-found errors.
"""
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
# Add a cash adjustment at the time of event[3].
cash_adj_dt = events[3].dt
cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)
events.append(cash_adjustment)
results = calculate_results(self.sim_params,
self.env,
self.benchmark_events,
events)
# Validate that we lost 300 dollars from our cash pool.
self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
9700)
class TestDividendPerformance(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
cls.env.write_data(equities_identifiers=[1, 2])
@classmethod
def tearDownClass(cls):
del cls.env
def setUp(self):
self.sim_params = create_simulation_parameters(num_days=6)
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
def test_market_hours_calculations(self):
# DST in US/Eastern began on Sunday March 14, 2010
before = datetime(2010, 3, 12, 14, 31, tzinfo=pytz.utc)
after = factory.get_next_trading_dt(
before,
timedelta(days=1),
self.env,
)
self.assertEqual(after.hour, 13)
def test_long_position_receives_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[1].dt,
# pay date, when the algorithm receives the dividend.
events[2].dt
)
# Simulate a transaction being filled prior to the ex_date.
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.1, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.10, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 0, 0])
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000, 9000, 10000, 10000, 10000])
def test_long_position_receives_stock_dividend(self):
# post some trades in the market
events = []
for sid in (1, 2):
events.extend(
factory.create_trade_history(
sid,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env)
)
dividend = factory.create_stock_dividend(
1,
payment_sid=2,
ratio=2,
# declared date, when the algorithm finds out about
# the dividend
declared_date=events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
ex_date=events[1].dt,
# pay date, when the algorithm receives the dividend.
pay_date=events[2].dt
)
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.2, 0.2, 0.2])
daily_returns = [event['daily_perf']['returns']
for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.2, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used']
for event in results]
self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000] * 5)
cash_pos = \
[event['cumulative_perf']['ending_cash'] for event in results]
self.assertEqual(cash_pos, [9000] * 5)
def test_long_position_purchased_on_ex_date_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt, # Declared date
events[1].dt, # Exclusion date
events[2].dt # Pay date
)
# Simulate a transaction being filled on the ex_date.
txns = [create_txn(events[1], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000])
def test_selling_before_dividend_payment_still_gets_paid(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt, # Declared date
events[1].dt, # Exclusion date
events[3].dt # Pay date
)
buy_txn = create_txn(events[0], 10.0, 100)
sell_txn = create_txn(events[2], 10.0, -100)
txns = [buy_txn, sell_txn]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.1, 0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0.1, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [-1000, 0, 1000, 1000, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [-1000, -1000, 0, 1000, 1000])
def test_buy_and_sell_before_ex(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10, 10],
[100, 100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
events[3].dt,
events[4].dt,
events[5].dt
)
buy_txn = create_txn(events[1], 10.0, 100)
sell_txn = create_txn(events[2], 10.0, -100)
txns = [buy_txn, sell_txn]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 6)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0, 0, 0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, -1000, 0, 0, 0, 0])
def test_ending_before_pay_date(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
pay_date = self.sim_params.first_open
# find pay date that is much later.
for i in range(30):
pay_date = factory.get_next_trading_dt(pay_date, oneday, self.env)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[0].dt,
pay_date
)
txns = [create_txn(events[1], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0, 0, 0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0, 0, 0, 0, 0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, -1000, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(
cumulative_cash_flows,
[0, -1000, -1000, -1000, -1000]
)
def test_short_position_pays_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
# declare at open of test
events[0].dt,
# ex_date same as trade 2
events[2].dt,
events[3].dt
)
txns = [create_txn(events[1], 10.0, -100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, -0.1, -0.1])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, -0.1, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 1000, 0, -1000, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 1000, 1000, 0, 0])
def test_no_position_receives_no_dividend(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
events[0].dt,
events[1].dt,
events[2].dt
)
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [0, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows, [0, 0, 0, 0, 0])
def test_no_dividend_at_simulation_end(self):
# post some trades in the market
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
self.sim_params,
env=self.env
)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[-3].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[-2].dt,
# pay date, when the algorithm receives the dividend.
# This pays out on the day after the last event
self.env.next_trading_day(events[-1].dt)
)
# Set the last day to be the last event
self.sim_params.period_end = events[-1].dt
self.sim_params.update_internal_from_env(self.env)
# Simulate a transaction being filled prior to the ex_date.
txns = [create_txn(events[0], 10.0, 100)]
results = calculate_results(
self.sim_params,
self.env,
self.benchmark_events,
events,
dividend_events=[dividend],
txns=txns,
)
self.assertEqual(len(results), 5)
cumulative_returns = \
[event['cumulative_perf']['returns'] for event in results]
self.assertEqual(cumulative_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
daily_returns = [event['daily_perf']['returns'] for event in results]
self.assertEqual(daily_returns, [0.0, 0.0, 0.0, 0.0, 0.0])
cash_flows = [event['daily_perf']['capital_used'] for event in results]
self.assertEqual(cash_flows, [-1000, 0, 0, 0, 0])
cumulative_cash_flows = \
[event['cumulative_perf']['capital_used'] for event in results]
self.assertEqual(cumulative_cash_flows,
[-1000, -1000, -1000, -1000, -1000])
class TestDividendPerformanceHolidayStyle(TestDividendPerformance):
# The holiday tests begins the simulation on the day
# before Thanksgiving, so that the next trading day is
# two days ahead. Any tests that hard code events
# to be start + oneday will fail, since those events will
# be skipped by the simulation.
def setUp(self):
self.dt = datetime(2003, 11, 30, tzinfo=pytz.utc)
self.end_dt = datetime(2004, 11, 25, tzinfo=pytz.utc)
self.sim_params = SimulationParameters(
self.dt,
self.end_dt,
env=self.env)
self.sim_params.capital_base = 10e3
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
class TestPositionPerformance(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
cls.env.write_data(equities_identifiers=[1, 2])
@classmethod
def tearDownClass(cls):
del cls.env
def setUp(self):
self.sim_params = create_simulation_parameters(num_days=4)
self.finder = self.env.asset_finder
self.benchmark_events = benchmark_events_in_range(self.sim_params,
self.env)
def test_long_short_positions(self):
"""
start with $1000
buy 100 stock1 shares at $10
sell short 100 stock2 shares at $10
stock1 then goes down to $9
stock2 goes to $11
"""
trades_1 = factory.create_trade_history(
1,
[10, 10, 10, 9],
[100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
trades_2 = factory.create_trade_history(
2,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
txn1 = create_txn(trades_1[1], 10.0, 100)
txn2 = create_txn(trades_2[1], 10.0, -100)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(txn1)
pp.handle_execution(txn1)
pt.execute_transaction(txn2)
pp.handle_execution(txn2)
for trade in itertools.chain(trades_1[:-2], trades_2[:-2]):
pt.update_last_sale(trade)
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=2.0,
net_leverage=0.0,
long_exposure=1000.0,
longs_count=1,
short_exposure=-1000.0,
shorts_count=1)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=1000.0,
equity_with_loan=1000.0,
total_positions_value=0.0,
regt_equity=1000.0,
available_funds=1000.0,
excess_liquidity=1000.0,
cushion=1.0,
leverage=2.0,
net_leverage=0.0,
net_liquidation=1000.0)
# now simulate stock1 going to $9
pt.update_last_sale(trades_1[-1])
# and stock2 going to $11
pt.update_last_sale(trades_2[-1])
pp.calculate_performance()
# Validate that the account attributes were updated.
account = pp.as_account()
check_perf_period(
pp,
gross_leverage=2.5,
net_leverage=-0.25,
long_exposure=900.0,
longs_count=1,
short_exposure=-1100.0,
shorts_count=1)
check_account(account,
settled_cash=1000.0,
equity_with_loan=800.0,
total_positions_value=-200.0,
regt_equity=1000.0,
available_funds=1000.0,
excess_liquidity=1000.0,
cushion=1.25,
leverage=2.5,
net_leverage=-0.25,
net_liquidation=800.0)
def test_levered_long_position(self):
"""
start with $1,000, then buy 1000 shares at $10.
price goes to $11
"""
# post some trades in the market
trades = factory.create_trade_history(
1,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
txn = create_txn(trades[1], 10.0, 1000)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
for trade in trades[:-2]:
pt.update_last_sale(trade)
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=10.0,
net_leverage=10.0,
long_exposure=10000.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=-9000.0,
equity_with_loan=1000.0,
total_positions_value=10000.0,
regt_equity=-9000.0,
available_funds=-9000.0,
excess_liquidity=-9000.0,
cushion=-9.0,
leverage=10.0,
net_leverage=10.0,
net_liquidation=1000.0)
# now simulate a price jump to $11
pt.update_last_sale(trades[-1])
pp.calculate_performance()
check_perf_period(
pp,
gross_leverage=5.5,
net_leverage=5.5,
long_exposure=11000.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=-9000.0,
equity_with_loan=2000.0,
total_positions_value=11000.0,
regt_equity=-9000.0,
available_funds=-9000.0,
excess_liquidity=-9000.0,
cushion=-4.5,
leverage=5.5,
net_leverage=5.5,
net_liquidation=2000.0)
def test_long_position(self):
"""
verify that the performance period calculates properly for a
single buy transaction
"""
# post some trades in the market
trades = factory.create_trade_history(
1,
[10, 10, 10, 11],
[100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
txn = create_txn(trades[1], 10.0, 100)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
# This verifies that the last sale price is being correctly
# set in the positions. If this is not the case then returns can
# incorrectly show as sharply dipping if a transaction arrives
# before a trade. This is caused by returns being based on holding
# stocks with a last sale price of 0.
self.assertEqual(pp.positions[1].last_sale_price, 10.0)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction \
cost of sole txn in test"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position")
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security with id 1")
self.assertEqual(
pp.positions[1].amount,
txn.amount,
"should have a position of {sharecount} shares".format(
sharecount=txn.amount
)
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1]['price'],
"last sale should be same as last trade. \
expected {exp} actual {act}".format(
exp=trades[-1]['price'],
act=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.ending_value,
1100,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(pp.pnl, 100, "gain of 1 on 100 shares should be 100")
check_perf_period(
pp,
gross_leverage=1.0,
net_leverage=1.0,
long_exposure=1100.0,
longs_count=1,
short_exposure=0.0,
shorts_count=0)
# Validate that the account attributes were updated.
account = pp.as_account()
check_account(account,
settled_cash=0.0,
equity_with_loan=1100.0,
total_positions_value=1100.0,
regt_equity=0.0,
available_funds=0.0,
excess_liquidity=0.0,
cushion=0.0,
leverage=1.0,
net_leverage=1.0,
net_liquidation=1100.0)
def test_short_position(self):
"""verify that the performance period calculates properly for a \
single short-sale transaction"""
trades = factory.create_trade_history(
1,
[10, 10, 10, 11, 10, 9],
[100, 100, 100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
trades_1 = trades[:-2]
txn = create_txn(trades[1], 10.0, -100)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(txn)
pp.handle_execution(txn)
for trade in trades_1:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction\
cost of sole txn in test"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position")
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades_1[-1]['price'],
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
-1100,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(pp.pnl, -100, "gain of 1 on 100 shares should be 100")
# simulate additional trades, and ensure that the position value
# reflects the new price
trades_2 = trades[-2:]
# simulate a rollover to a new period
pp.rollover()
for trade in trades_2:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.period_cash_flow,
0,
"capital used should be zero, there were no transactions in \
performance period"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position"
)
self.assertEqual(
pp.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades_2[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
-900,
"ending value should be price of last trade times number of \
shares in position")
self.assertEqual(
pp.pnl,
200,
"drop of 2 on -100 shares should be 200"
)
# now run a performance period encompassing the entire trade sample.
ptTotal = perf.PositionTracker(self.env.asset_finder)
ppTotal = perf.PerformancePeriod(1000.0, self.env.asset_finder)
ppTotal.position_tracker = pt
for trade in trades_1:
ptTotal.update_last_sale(trade)
ptTotal.execute_transaction(txn)
ppTotal.handle_execution(txn)
for trade in trades_2:
ptTotal.update_last_sale(trade)
ppTotal.calculate_performance()
self.assertEqual(
ppTotal.period_cash_flow,
-1 * txn.price * txn.amount,
"capital used should be equal to the opposite of the transaction \
cost of sole txn in test"
)
self.assertEqual(
len(ppTotal.positions),
1,
"should be just one position"
)
self.assertEqual(
ppTotal.positions[1].sid,
txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
ppTotal.positions[1].amount,
-100,
"should have a position of -100 shares"
)
self.assertEqual(
ppTotal.positions[1].cost_basis,
txn.price,
"should have a cost basis of 10"
)
self.assertEqual(
ppTotal.positions[1].last_sale_price,
trades_2[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
ppTotal.ending_value,
-900,
"ending value should be price of last trade times number of \
shares in position")
self.assertEqual(
ppTotal.pnl,
100,
"drop of 1 on -100 shares should be 100"
)
check_perf_period(
pp,
gross_leverage=0.8181,
net_leverage=-0.8181,
long_exposure=0.0,
longs_count=0,
short_exposure=-900.0,
shorts_count=1)
# Validate that the account attributes.
account = ppTotal.as_account()
check_account(account,
settled_cash=2000.0,
equity_with_loan=1100.0,
total_positions_value=-900.0,
regt_equity=2000.0,
available_funds=2000.0,
excess_liquidity=2000.0,
cushion=1.8181,
leverage=0.8181,
net_leverage=-0.8181,
net_liquidation=1100.0)
def test_covering_short(self):
"""verify performance where short is bought and covered, and shares \
trade after cover"""
trades = factory.create_trade_history(
1,
[10, 10, 10, 11, 9, 8, 7, 8, 9, 10],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
onesec,
self.sim_params,
env=self.env
)
short_txn = create_txn(
trades[1],
10.0,
-100,
)
cover_txn = create_txn(trades[6], 7.0, 100)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
pt.execute_transaction(short_txn)
pp.handle_execution(short_txn)
pt.execute_transaction(cover_txn)
pp.handle_execution(cover_txn)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
short_txn_cost = short_txn.price * short_txn.amount
cover_txn_cost = cover_txn.price * cover_txn.amount
self.assertEqual(
pp.period_cash_flow,
-1 * short_txn_cost - cover_txn_cost,
"capital used should be equal to the net transaction costs"
)
self.assertEqual(
len(pp.positions),
1,
"should be just one position"
)
self.assertEqual(
pp.positions[1].sid,
short_txn.sid,
"position should be in security from the transaction"
)
self.assertEqual(
pp.positions[1].amount,
0,
"should have a position of -100 shares"
)
self.assertEqual(
pp.positions[1].cost_basis,
0,
"a covered position should have a cost basis of 0"
)
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1].price,
"last sale should be price of last trade"
)
self.assertEqual(
pp.ending_value,
0,
"ending value should be price of last trade times number of \
shares in position"
)
self.assertEqual(
pp.pnl,
300,
"gain of 1 on 100 shares should be 300"
)
check_perf_period(
pp,
gross_leverage=0.0,
net_leverage=0.0,
long_exposure=0.0,
longs_count=0,
short_exposure=0.0,
shorts_count=0)
account = pp.as_account()
check_account(account,
settled_cash=1300.0,
equity_with_loan=1300.0,
total_positions_value=0.0,
regt_equity=1300.0,
available_funds=1300.0,
excess_liquidity=1300.0,
cushion=1.0,
leverage=0.0,
net_leverage=0.0,
net_liquidation=1300.0)
def test_cost_basis_calc(self):
history_args = (
1,
[10, 11, 11, 12],
[100, 100, 100, 100],
onesec,
self.sim_params,
self.env
)
trades = factory.create_trade_history(*history_args)
transactions = factory.create_txn_history(*history_args)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
average_cost = 0
for i, txn in enumerate(transactions):
pt.execute_transaction(txn)
pp.handle_execution(txn)
average_cost = (average_cost * i + txn.price) / (i + 1)
self.assertEqual(pp.positions[1].cost_basis, average_cost)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(
pp.positions[1].last_sale_price,
trades[-1].price,
"should have a last sale of 12, got {val}".format(
val=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(
pp.pnl,
400
)
down_tick = factory.create_trade(
1,
10.0,
100,
trades[-1].dt + onesec)
sale_txn = create_txn(
down_tick,
10.0,
-100)
pp.rollover()
pt.execute_transaction(sale_txn)
pp.handle_execution(sale_txn)
pt.update_last_sale(down_tick)
pp.calculate_performance()
self.assertEqual(
pp.positions[1].last_sale_price,
10,
"should have a last sale of 10, was {val}".format(
val=pp.positions[1].last_sale_price)
)
self.assertEqual(
pp.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(pp.pnl, -800, "this period goes from +400 to -400")
pt3 = perf.PositionTracker(self.env.asset_finder)
pp3 = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp3.position_tracker = pt3
average_cost = 0
for i, txn in enumerate(transactions):
pt3.execute_transaction(txn)
pp3.handle_execution(txn)
average_cost = (average_cost * i + txn.price) / (i + 1)
self.assertEqual(pp3.positions[1].cost_basis, average_cost)
pt3.execute_transaction(sale_txn)
pp3.handle_execution(sale_txn)
trades.append(down_tick)
for trade in trades:
pt3.update_last_sale(trade)
pp3.calculate_performance()
self.assertEqual(
pp3.positions[1].last_sale_price,
10,
"should have a last sale of 10"
)
self.assertEqual(
pp3.positions[1].cost_basis,
11,
"should have a cost basis of 11"
)
self.assertEqual(
pp3.pnl,
-400,
"should be -400 for all trades and transactions in period"
)
def test_cost_basis_calc_close_pos(self):
history_args = (
1,
[10, 9, 11, 8, 9, 12, 13, 14],
[200, -100, -100, 100, -300, 100, 500, 400],
onesec,
self.sim_params,
self.env
)
cost_bases = [10, 10, 0, 8, 9, 9, 13, 13.5]
trades = factory.create_trade_history(*history_args)
transactions = factory.create_txn_history(*history_args)
pt = perf.PositionTracker(self.env.asset_finder)
pp = perf.PerformancePeriod(1000.0, self.env.asset_finder)
pp.position_tracker = pt
for txn, cb in zip(transactions, cost_bases):
pt.execute_transaction(txn)
pp.handle_execution(txn)
self.assertEqual(pp.positions[1].cost_basis, cb)
for trade in trades:
pt.update_last_sale(trade)
pp.calculate_performance()
self.assertEqual(pp.positions[1].cost_basis, cost_bases[-1])
class TestPerformanceTracker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
cls.env.write_data(equities_identifiers=[1, 2, 133, 134])
@classmethod
def tearDownClass(cls):
del cls.env
NumDaysToDelete = collections.namedtuple(
'NumDaysToDelete', ('start', 'middle', 'end'))
@parameterized.expand([
("Don't delete any events",
NumDaysToDelete(start=0, middle=0, end=0)),
("Delete first day of events",
NumDaysToDelete(start=1, middle=0, end=0)),
("Delete first two days of events",
NumDaysToDelete(start=2, middle=0, end=0)),
("Delete one day of events from the middle",
NumDaysToDelete(start=0, middle=1, end=0)),
("Delete two events from the middle",
NumDaysToDelete(start=0, middle=2, end=0)),
("Delete last day of events",
NumDaysToDelete(start=0, middle=0, end=1)),
("Delete last two days of events",
NumDaysToDelete(start=0, middle=0, end=2)),
("Delete all but one event.",
NumDaysToDelete(start=2, middle=1, end=2)),
])
def test_tracker(self, parameter_comment, days_to_delete):
"""
@days_to_delete - configures which days in the data set we should
remove, used for ensuring that we still return performance messages
even when there is no data.
"""
# This date range covers Columbus day,
# however Columbus day is not a market holiday
#
# October 2008
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start_dt = datetime(year=2008,
month=10,
day=9,
tzinfo=pytz.utc)
end_dt = datetime(year=2008,
month=10,
day=16,
tzinfo=pytz.utc)
trade_count = 6
sid = 133
price = 10.1
price_list = [price] * trade_count
volume = [100] * trade_count
trade_time_increment = timedelta(days=1)
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
env=self.env,
)
benchmark_events = benchmark_events_in_range(sim_params, self.env)
trade_history = factory.create_trade_history(
sid,
price_list,
volume,
trade_time_increment,
sim_params,
source_id="factory1",
env=self.env
)
sid2 = 134
price2 = 12.12
price2_list = [price2] * trade_count
trade_history2 = factory.create_trade_history(
sid2,
price2_list,
volume,
trade_time_increment,
sim_params,
source_id="factory2",
env=self.env
)
# 'middle' start of 3 depends on number of days == 7
middle = 3
# First delete from middle
if days_to_delete.middle:
del trade_history[middle:(middle + days_to_delete.middle)]
del trade_history2[middle:(middle + days_to_delete.middle)]
# Delete start
if days_to_delete.start:
del trade_history[:days_to_delete.start]
del trade_history2[:days_to_delete.start]
# Delete from end
if days_to_delete.end:
del trade_history[-days_to_delete.end:]
del trade_history2[-days_to_delete.end:]
sim_params.capital_base = 1000.0
sim_params.frame_index = [
'sid',
'volume',
'dt',
'price',
'changed']
perf_tracker = perf.PerformanceTracker(
sim_params, self.env
)
events = date_sorted_sources(trade_history, trade_history2)
events = [event for event in
self.trades_with_txns(events, trade_history[0].dt)]
# Extract events with transactions to use for verification.
txns = [event for event in
events if event.type == zp.DATASOURCE_TYPE.TRANSACTION]
orders = [event for event in
events if event.type == zp.DATASOURCE_TYPE.ORDER]
all_events = date_sorted_sources(events, benchmark_events)
filtered_events = [filt_event for filt_event
in all_events if filt_event.dt <= end_dt]
filtered_events.sort(key=lambda x: x.dt)
grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
perf_messages = []
for date, group in grouped_events:
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
perf_tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.ORDER:
perf_tracker.process_order(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
perf_tracker.process_benchmark(event)
elif event.type == zp.DATASOURCE_TYPE.TRANSACTION:
perf_tracker.process_transaction(event)
msg = perf_tracker.handle_market_close_daily()
perf_messages.append(msg)
self.assertEqual(perf_tracker.txn_count, len(txns))
self.assertEqual(perf_tracker.txn_count, len(orders))
positions = perf_tracker.cumulative_performance.positions
if len(txns) == 0:
self.assertNotIn(sid, positions)
else:
expected_size = len(txns) / 2 * -25
cumulative_pos = positions[sid]
self.assertEqual(cumulative_pos.amount, expected_size)
self.assertEqual(len(perf_messages),
sim_params.days_in_period)
check_perf_tracker_serialization(perf_tracker)
def trades_with_txns(self, events, no_txn_dt):
for event in events:
# create a transaction for all but
# first trade in each sid, to simulate None transaction
if event.dt != no_txn_dt:
order = Order(
sid=event.sid,
amount=-25,
dt=event.dt
)
order.source_id = 'MockOrderSource'
yield order
yield event
txn = Transaction(
sid=event.sid,
amount=-25,
dt=event.dt,
price=10.0,
commission=0.50,
order_id=order.id
)
txn.source_id = 'MockTransactionSource'
yield txn
else:
yield event
def test_minute_tracker(self):
""" Tests minute performance tracking."""
start_dt = self.env.exchange_dt_in_utc(datetime(2013, 3, 1, 9, 31))
end_dt = self.env.exchange_dt_in_utc(datetime(2013, 3, 1, 16, 0))
foosid = 1
barsid = 2
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
emission_rate='minute',
env=self.env,
)
tracker = perf.PerformanceTracker(sim_params, env=self.env)
foo_event_1 = factory.create_trade(foosid, 10.0, 20, start_dt)
order_event_1 = Order(sid=foo_event_1.sid,
amount=-25,
dt=foo_event_1.dt)
bar_event_1 = factory.create_trade(barsid, 100.0, 200, start_dt)
txn_event_1 = Transaction(sid=foo_event_1.sid,
amount=-25,
dt=foo_event_1.dt,
price=10.0,
commission=0.50,
order_id=order_event_1.id)
benchmark_event_1 = Event({
'dt': start_dt,
'returns': 0.01,
'type': zp.DATASOURCE_TYPE.BENCHMARK
})
foo_event_2 = factory.create_trade(
foosid, 11.0, 20, start_dt + timedelta(minutes=1))
bar_event_2 = factory.create_trade(
barsid, 11.0, 20, start_dt + timedelta(minutes=1))
benchmark_event_2 = Event({
'dt': start_dt + timedelta(minutes=1),
'returns': 0.02,
'type': zp.DATASOURCE_TYPE.BENCHMARK
})
events = [
foo_event_1,
order_event_1,
benchmark_event_1,
txn_event_1,
bar_event_1,
foo_event_2,
benchmark_event_2,
bar_event_2,
]
grouped_events = itertools.groupby(
events, operator.attrgetter('dt'))
messages = {}
for date, group in grouped_events:
tracker.set_date(date)
for event in group:
if event.type == zp.DATASOURCE_TYPE.TRADE:
tracker.process_trade(event)
elif event.type == zp.DATASOURCE_TYPE.BENCHMARK:
tracker.process_benchmark(event)
elif event.type == zp.DATASOURCE_TYPE.ORDER:
tracker.process_order(event)
elif event.type == zp.DATASOURCE_TYPE.TRANSACTION:
tracker.process_transaction(event)
msg, _ = tracker.handle_minute_close(date)
messages[date] = msg
self.assertEquals(2, len(messages))
msg_1 = messages[foo_event_1.dt]
msg_2 = messages[foo_event_2.dt]
self.assertEquals(1, len(msg_1['minute_perf']['transactions']),
"The first message should contain one "
"transaction.")
# Check that transactions aren't emitted for previous events.
self.assertEquals(0, len(msg_2['minute_perf']['transactions']),
"The second message should have no "
"transactions.")
self.assertEquals(1, len(msg_1['minute_perf']['orders']),
"The first message should contain one orders.")
# Check that orders aren't emitted for previous events.
self.assertEquals(0, len(msg_2['minute_perf']['orders']),
"The second message should have no orders.")
# Ensure that period_close moves through time.
# Also, ensure that the period_closes are the expected dts.
self.assertEquals(foo_event_1.dt,
msg_1['minute_perf']['period_close'])
self.assertEquals(foo_event_2.dt,
msg_2['minute_perf']['period_close'])
# In this test event1 transactions arrive on the first bar.
# This leads to no returns as the price is constant.
# Sharpe ratio cannot be computed and is None.
# In the second bar we can start establishing a sharpe ratio.
self.assertIsNone(msg_1['cumulative_risk_metrics']['sharpe'])
self.assertIsNotNone(msg_2['cumulative_risk_metrics']['sharpe'])
check_perf_tracker_serialization(tracker)
def test_close_position_event(self):
pt = perf.PositionTracker(asset_finder=self.env.asset_finder)
dt = pd.Timestamp("1984/03/06 3:00PM")
pos1 = perf.Position(1, amount=np.float64(120.0),
last_sale_date=dt, last_sale_price=3.4)
pos2 = perf.Position(2, amount=np.float64(-100.0),
last_sale_date=dt, last_sale_price=3.4)
pt.update_positions({1: pos1, 2: pos2})
event_type = DATASOURCE_TYPE.CLOSE_POSITION
index = [dt + timedelta(days=1)]
pan = pd.Panel({1: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index),
2: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index),
3: pd.DataFrame({'price': 1, 'volume': 0,
'type': event_type}, index=index)})
source = DataPanelSource(pan)
for i, event in enumerate(source):
txn = pt.maybe_create_close_position_transaction(event)
if event.sid == 1:
# Test owned long
self.assertEqual(-120, txn.amount)
elif event.sid == 2:
# Test owned short
self.assertEqual(100, txn.amount)
elif event.sid == 3:
# Test not-owned SID
self.assertIsNone(txn)
def test_handle_sid_removed_from_universe(self):
# post some trades in the market
sim_params = create_simulation_parameters(num_days=5)
events = factory.create_trade_history(
1,
[10, 10, 10, 10, 10],
[100, 100, 100, 100, 100],
oneday,
sim_params,
env=self.env
)
# Create a tracker and a dividend
perf_tracker = perf.PerformanceTracker(sim_params, env=self.env)
dividend = factory.create_dividend(
1,
10.00,
# declared date, when the algorithm finds out about
# the dividend
events[0].dt,
# ex_date, the date before which the algorithm must hold stock
# to receive the dividend
events[1].dt,
# pay date, when the algorithm receives the dividend.
events[2].dt
)
dividend_frame = pd.DataFrame(
[dividend.to_series(index=zp.DIVIDEND_FIELDS)],
)
perf_tracker.update_dividends(dividend_frame)
# Ensure that the dividend is in the tracker
self.assertIn(1, perf_tracker.dividend_frame['sid'].values)
# Inform the tracker that sid 1 has been removed from the universe
perf_tracker.handle_sid_removed_from_universe(1)
# Ensure that the dividend for sid 1 has been removed from dividend
# frame
self.assertNotIn(1, perf_tracker.dividend_frame['sid'].values)
def test_serialization(self):
start_dt = datetime(year=2008,
month=10,
day=9,
tzinfo=pytz.utc)
end_dt = datetime(year=2008,
month=10,
day=16,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt,
env=self.env,
)
perf_tracker = perf.PerformanceTracker(
sim_params, env=self.env
)
check_perf_tracker_serialization(perf_tracker)
class TestPosition(unittest.TestCase):
def setUp(self):
pass
def test_serialization(self):
dt =
|
pd.Timestamp("1984/03/06 3:00PM")
|
pandas.Timestamp
|
import time
from collections import Counter
import warnings; warnings.filterwarnings('ignore')
import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
from algorithms import ShapeletTransformer
from extractors.extractor import MultiGeneticExtractor
from data.load_all_datasets import load_data_train_test
from sklearn.metrics import accuracy_score, log_loss
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from tslearn.shapelets import ShapeletModel
def grabocka_params_to_shapelet_size_dict(n_ts, ts_sz, n_shapelets, l, r):
base_size = int(l * ts_sz)
d = {}
for sz_idx in range(r):
shp_sz = base_size * (sz_idx + 1)
d[shp_sz] = n_shapelets
return d
np.random.seed(1337)
random.seed(1337)
TRAIN_PATH = 'data/partitioned/MoteStrain/MoteStrain_train.csv'
TEST_PATH = 'data/partitioned/MoteStrain/MoteStrain_test.csv'
# Load the training and testing dataset (features + label vector)
train_df = pd.read_csv(TRAIN_PATH)
test_df = pd.read_csv(TEST_PATH)
X_train = train_df.drop('target', axis=1).values
y_train = train_df['target']
X_test = test_df.drop('target', axis=1).values
y_test = test_df['target']
map_dict = {}
for j, c in enumerate(np.unique(y_train)):
map_dict[c] = j
y_train = y_train.map(map_dict)
y_test = y_test.map(map_dict)
print(set(y_train), set(y_test))
y_train = y_train.values
y_test = y_test.values
nr_shap, l, r, reg, max_it = 0.05, 0.075, 2, 0.01, 5000
measurements = []
for _nr_shap in np.arange(0.05, 0.55, 0.05):
shapelet_dict = grabocka_params_to_shapelet_size_dict(
X_train.shape[0], X_train.shape[1], int(_nr_shap*X_train.shape[1]), l, r
)
clf = ShapeletModel(n_shapelets_per_size=shapelet_dict,
max_iter=max_it, verbose_level=0, batch_size=8,
optimizer='sgd', weight_regularizer=reg)
start = time.time()
clf.fit(
np.reshape(
X_train,
(X_train.shape[0], X_train.shape[1], 1)
),
y_train
)
learning_time = time.time() - start
X_distances_train = clf.transform(X_train)
X_distances_test = clf.transform(X_test)
lr = GridSearchCV(LogisticRegression(), {'penalty': ['l1', 'l2'], 'C': [0.001, 0.01, 0.1, 1.0, 10.0]})
lr.fit(X_distances_train, y_train)
print('[K]', _nr_shap, accuracy_score(y_test, lr.predict(X_distances_test)))
measurements.append([_nr_shap, accuracy_score(y_test, lr.predict(X_distances_test))])
plt.figure()
plt.plot([x[0] for x in measurements], [x[1] for x in measurements])
plt.title('Sensitive of LTS on hyper-parameter K')
plt.show()
measurements_df = pd.DataFrame(measurements, columns=['nr_shap', 'accuracy'])
measurements_df.to_csv('lts_param_K.csv')
measurements = []
for _l in np.arange(0.075, 0.4, 0.075):
shapelet_dict = grabocka_params_to_shapelet_size_dict(
X_train.shape[0], X_train.shape[1], int(nr_shap*X_train.shape[1]), _l, r
)
clf = ShapeletModel(n_shapelets_per_size=shapelet_dict,
max_iter=max_it, verbose_level=0, batch_size=8,
optimizer='sgd', weight_regularizer=reg)
start = time.time()
clf.fit(
np.reshape(
X_train,
(X_train.shape[0], X_train.shape[1], 1)
),
y_train
)
learning_time = time.time() - start
X_distances_train = clf.transform(X_train)
X_distances_test = clf.transform(X_test)
lr = GridSearchCV(LogisticRegression(), {'penalty': ['l1', 'l2'], 'C': [0.001, 0.01, 0.1, 1.0, 10.0]})
lr.fit(X_distances_train, y_train)
print('[L', _l, accuracy_score(y_test, lr.predict(X_distances_test)))
measurements.append([_l, accuracy_score(y_test, lr.predict(X_distances_test))])
plt.figure()
plt.plot([x[0] for x in measurements], [x[1] for x in measurements])
plt.title('Sensitive of LTS on hyper-parameter L (ItalyPowerDemand)')
plt.show()
measurements_df =
|
pd.DataFrame(measurements, columns=['l', 'accuracy'])
|
pandas.DataFrame
|
import pandas as pd
from sodapy import Socrata
import datetime
import definitions
# global variables for main data:
hhs_data, test_data, nyt_data_us, nyt_data_state, max_hosp_date = [],[],[],[],[]
"""
get_data()
Fetches data from API, filters, cleans, and combines with provisional.
After running, global variables are filled for use in subsequent functions
"""
def get_data():
global nyt_data_us
global nyt_data_state
global test_data
global hhs_data
global max_hosp_date
nyt_data_us = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us.csv")
nyt_data_state = pd.read_csv("https://raw.githubusercontent.com/nytimes/covid-19-data/master/rolling-averages/us-states.csv")
client = Socrata("healthdata.gov", None)
results = client.get("g62h-syeh", limit=2000000)
test_results = client.get("j8mb-icvb", limit=2000000)
print("LOG: Fetched all raw data")
# Filter data to get columns of interest
hhs_data = pd.DataFrame.from_records(results)[['state', 'date', 'inpatient_beds_used_covid']]
hhs_data.inpatient_beds_used_covid = hhs_data.inpatient_beds_used_covid.fillna(0)
hhs_data = hhs_data.astype({'inpatient_beds_used_covid': 'int32'})
test_data = pd.DataFrame.from_records(test_results)[['state', 'date', 'overall_outcome', 'new_results_reported']]
test_data.new_results_reported = test_data.new_results_reported.fillna(0)
test_data = test_data.astype({'new_results_reported': 'int32'})
print("LOG: Filtered Data")
# For provisional data, gets days since most recent update of HHS time series
max_date = hhs_data.date.max()
max_hosp_date = max_date
provisional = client.get("4cnb-m4rz", limit=2000000, where=f"update_date > '{max_date}'")
hhs_provisional = pd.DataFrame.from_records(provisional)[['update_date', 'archive_link']]
hhs_provisional.update_date = hhs_provisional.update_date.apply(lambda x: x[:10])
hhs_provisional.update_date = pd.to_datetime(hhs_provisional.update_date)
# Gets last archive of every day
group = hhs_provisional.groupby(['update_date'])
hhs_provisional = group.last()
# Add provisional data to HHS data
frames = []
for a in hhs_provisional.iterrows():
date = a[0]
url = a[1].item()['url']
df = pd.read_csv(url)[['state', 'inpatient_beds_used_covid']]
df['date']=date
if date > pd.Timestamp(max_date): # Avoids double counting if provisional update came after real update
frames.append(df)
frames.append(hhs_data)
hhs_data = (pd.concat(frames))
print("LOG: Added HHS Provisional data")
# Make date columns in proper format
# hhs_data.date = hhs_data.date.apply(lambda x: x[:10])
hhs_data.date= pd.to_datetime(hhs_data.date)
# hhs_data.to_csv("../data/hospitalizations.csv")
print("LOG: Wrote HHS data to CSV")
test_data.date = test_data.date.apply(lambda x: x[:10])
test_data.date = pd.to_datetime(test_data.date)
nyt_data_us.date = pd.to_datetime(nyt_data_us.date)
nyt_data_state.date = pd.to_datetime(nyt_data_state.date)
print("LOG: Done getting data")
"""
get_state_cases
Creates dataframe of time series date and cases for given state
inputs:
state_codes: List of 2-letter codes of states to query
start_date (pd.Timestamp): starting date, defaults to 1-1-2020
end_date (pd.Timestamp): ending date, defaults to today
returns:
df with 'date' and 'test_positivity'
"""
def get_state_cases(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)][:]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.cases.sum() / states_population * 1000000
else:
case_sum = day_data.cases.sum()
newRow = {'date': curr_date, 'cases': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
def get_us_cases(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
us_data = nyt_data_us[(nyt_data_us.date >= start_date) & (nyt_data_us.date <= end_date)]
return us_data[['date', 'cases']]
"""
get_state_deaths
Same as above, deaths
"""
def get_state_deaths(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
input_states = [definitions.states[s] for s in state_codes]
state_data = nyt_data_state[nyt_data_state.state.isin(input_states)]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
case_sum = day_data.deaths.sum() / states_population * 1000000
else:
case_sum = day_data.deaths.sum()
newRow = {'date': curr_date, 'deaths': case_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
def get_us_deaths(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
us_data = nyt_data_us[(nyt_data_us.date >= start_date) & (nyt_data_us.date <= end_date)]
return us_data[['date', 'deaths']]
"""
get_state_hospitalizations
Same as above, hospitalizations
"""
def get_state_hospitalizations(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today(), normalize=True):
curr_date = start_date
state_data = hhs_data[hhs_data.state.isin(state_codes)]
input_states = [definitions.states[s] for s in state_codes]
max_date = state_data.date.max()
states_population = sum([definitions.populations[s] for s in input_states])
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = state_data[state_data.date == str(curr_date)]
if normalize:
hosp_sum = day_data.inpatient_beds_used_covid.sum() / states_population * 1000000
else:
hosp_sum = day_data.inpatient_beds_used_covid.sum()
newRow = {'date': curr_date, 'hospitalizations': hosp_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
"""
get_us_hospitalizations
Same as above, hospitalizations
"""
def get_us_hospitalizations(start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
curr_date = start_date
max_date = hhs_data.date.max()
lst = []
while(curr_date <= end_date and curr_date <= max_date):
day_data = hhs_data[hhs_data.date == str(curr_date)]
hosp_sum = day_data.inpatient_beds_used_covid.sum()
newRow = {'date': curr_date, 'inpatient_beds_used_covid': hosp_sum}
lst.append(newRow)
curr_date += datetime.timedelta(1)
return pd.DataFrame(lst)
"""
get_state_positivity
Creates dataframe of time series date and test positivity for given state
inputs:
state_code: list of 2-letter codes of states
start_date (pd.Timestamp): starting date, defaults to 1-1-2020
end_date (pd.Timestamp): ending date, defaults to today
returns:
df with 'date' and 'test_positivity'
"""
def get_state_positivity(state_codes, start_date = pd.Timestamp(2020,1,1), end_date = pd.Timestamp.today()):
test_data_state = test_data[test_data.state.isin(state_codes)] # Get only data from input State
max_date = test_data_state.date.max()
curr_date = start_date
lst = []
while(curr_date <= end_date and curr_date <= max_date): # Loop through all unique dates
day_data = test_data_state[test_data_state.date == str(curr_date)]
test_pos = day_data[day_data.overall_outcome == "Positive"].new_results_reported # Get num positive tests
test_pos = test_pos.sum() if test_pos.any() else 0 # Extract number if exists
test_neg = day_data[day_data.overall_outcome == "Negative"].new_results_reported # Get num negative tests
test_neg = test_neg.sum() if test_neg.any() else 0 # Extract number if exists
if(test_pos == 0 and test_neg == 0):
test_pct = 0 # Fixes divide by zero issue
else:
test_pct = test_pos/ (test_pos + test_neg) * 100
newRow = {"date": curr_date, "test_positivity": test_pct, "positive_tests" : test_pos, "negative_tests" : test_neg}
lst.append(newRow)
curr_date += datetime.timedelta(1)
df = pd.DataFrame(lst) # Create dataframe with all dates and test positivity
a = df.rolling(7).sum()
df['avg'] = a.apply(lambda x: (100* (x.positive_tests / (x.positive_tests + x.negative_tests))) if (x.positive_tests + x.negative_tests) > 0 else None, axis=1)
return df
"""
get_us_positivity
Constructs a data table of the entire US test positivity
start_date (datetime.date) : Starting date of table
end_date (datetime.date) : Ending date of table
returns: dataframe with date, test positivity
"""
def get_us_positivity(start_date = pd.Timestamp(2020,1,1), end_date =
|
pd.Timestamp.today()
|
pandas.Timestamp.today
|
#%%
import pandas as pd
import glob
import numpy as np
import math
#%%
def parse_single(year):
PUS_start = pd.DataFrame()
useful_cols = [
"WAGP",
"SEX",
"AGEP",
"DECADE",
"RAC2P",
"RAC1P",
"SCHL",
"WKW",
"WKHP",
"OCCP",
"POWSP",
"ST",
"HISP",
]
path = "data/data_raw/%s" % year
PUS_start = pd.concat(
[pd.read_csv(f, usecols=useful_cols) for f in glob.glob(path + "/*.csv")],
ignore_index=True,
)
return PUS_start
def mapping_features(df):
# entry date
df["RACE"] = df["RAC2P"].map(
lambda y: "White"
if y == 1
else "Black"
if y == 2
else "American Indian"
if y <= 29
else "Native Alaskan"
if y <= 37
else y
if y <= 58
else "Hispanic"
if y == 70
else np.nan
)
df["DECADE"] = df["DECADE"].replace(np.nan, 0)
df["DECADE"] = df["DECADE"].map(
lambda y: "Born in US"
if y == 0
else "Before 1950"
if y == 1
else "1950 - 1959"
if y == 2
else "1960 - 1969"
if y == 3
else "1970 - 1979"
if y == 4
else "1980 - 1989"
if y == 5
else "1990 - 1999"
if y == 6
else "2000 - 2009"
if y == 7
else "2010 or later"
if y == 8
else np.nan
)
# Race
df["RAC2P"] = np.where(df["HISP"] == 1, df["RAC2P"], 70)
df["RACE"] = df["RAC2P"].map(
lambda y: "White"
if y == 1
else "Black"
if y == 2
else "American Indian"
if y <= 29
else "Native Alaskan"
if y <= 37
else y
if y <= 58
else "Hispanic"
if y == 70
else np.nan
)
df["RAC2P"] = np.where(df["HISP"] == 1, df["RAC2P"], 70)
df["RACE2"] = df["RAC2P"].map(
lambda y: "White"
if y == 1
else "Black"
if y == 2
else "American Indian"
if y <= 29
else "Native Alaskan"
if y <= 37
else "Asian"
if y <= 58
else "Hispanic"
if y == 70
else np.nan
)
# Sex
df["SEX"] = df["SEX"].map(
lambda y: "Male" if y == 1 else "Female" if y == 2 else "na"
)
# AGE
df["AGE"] = df["AGEP"].map(
lambda y: "0-17"
if y <= 18
else "18-24"
if y <= 24
else "25-54"
if y <= 54
else "55-64"
if y <= 64
else "65+"
)
# Education
df["EDU"] = df["SCHL"].map(
lambda y: "No_Highschool"
if y <= 15
else "Highschool"
if y <= 17
else "Some_College"
if y <= 19
else "Some_College"
if y == 20
else "B.S._Degree"
if y == 21
else "M.S._Degree"
if y == 22
else "PhD_or_Prof"
if y <= 24
else np.nan
)
# Occupation
df["JOB"] = df["OCCP"].map(
lambda y: "Business"
if y <= 960
else "Science"
if y <= 1980
else "Art"
if y <= 2970
else "Healthcare"
if y <= 3550
else "Services"
if y <= 4655
else "Sales"
if y <= 5940
else "Maintenance"
if y <= 7640
else "Production"
if y <= 8990
else "Transport"
if y <= 9760
else "Military"
if y <= 9830
else np.nan
)
return df
# %%
df_raw = parse_single(2018)
#%%
df_raw = parse_single(2018)
df = mapping_features(df_raw)
groupby_col = ["RACE", "DECADE"]
agg_df = df.groupby(groupby_col).count().reset_index()
agg_df = agg_df.iloc[:, 0 : len(groupby_col) + 1]
coding_df = pd.read_csv("data/data_raw/race_coding.csv")
result = pd.merge(agg_df, coding_df, how="left", on="RACE")
result["% Total Pop"] = result["ST"] / result["ST"].sum()
asian_result = result.dropna()
asian_result["% Asian Pop"] = asian_result["ST"] / asian_result["ST"].sum()
recomb_df = pd.DataFrame()
for race in asian_result["Asian"].unique():
subset_df = asian_result[asian_result["Asian"] == race]
subset_df = asian_result[asian_result["Asian"] == race]
asian_result.iloc[:, 1] != "na"
subset_df["% Race Pop"] = subset_df["ST"] / subset_df["ST"].sum()
recomb_df = pd.concat([recomb_df, subset_df])
def panda_strip(x):
r = []
for y in x:
if isinstance(y, str):
y = y.strip()
r.append(y)
return pd.Series(r)
recomb_df = recomb_df.apply(lambda x: panda_strip(x))
recomb_df[["Asian"]] = recomb_df[["Asian"]].apply(lambda x: x.str.split().str[0])
wide_format = recomb_df.pivot(
index="Asian", columns="DECADE", values="% Race Pop"
).reset_index()
wide_format = wide_format[
[
"Asian",
"Born in US",
"Before 1950",
"1950 - 1959",
"1960 - 1969",
"1970 - 1979",
"1980 - 1989",
"1990 - 1999",
"2000 - 2009",
"2010 or later",
]
]
wide_format["% Immigrated"] = 1 - wide_format["Born in US"]
wide_format = wide_format.rename(columns={"Asian": "RACE2"})
wide_format = wide_format.replace(np.nan, 0)
wide_format.to_csv("data/data_output/imm_output.csv")
# %%
df_raw = parse_single(2018)
df = mapping_features(df_raw)
groupby_col = ["RACE2", "DECADE"]
agg_df = df.groupby(groupby_col).count().reset_index()
agg_df = agg_df.iloc[:, 0 : len(groupby_col) + 1]
recomb_df2 = pd.DataFrame()
for race in agg_df["RACE2"].unique():
subset_df = agg_df[agg_df["RACE2"] == race]
subset_df.iloc[:, 1] != "na"
subset_df["% Race Pop"] = subset_df["ST"] / subset_df["ST"].sum()
recomb_df2 = pd.concat([recomb_df2, subset_df])
wide_format2 = recomb_df2.pivot(
index="RACE2", columns="DECADE", values="% Race Pop"
).reset_index()
wide_format2 = wide_format2[
[
"RACE2",
"Born in US",
"Before 1950",
"1950 - 1959",
"1960 - 1969",
"1970 - 1979",
"1980 - 1989",
"1990 - 1999",
"2000 - 2009",
"2010 or later",
]
]
wide_format2["% Immigrated"] = 1 - wide_format2["Born in US"]
wide_format2.to_csv("data/data_output/imm_all_output.csv")
wide_format_comb = pd.concat([wide_format, wide_format2])
for col in wide_format_comb:
if col != "RACE2":
wide_format_comb[col] = wide_format_comb[col].astype(float).map("{:.2%}".format)
wide_format_comb.to_csv("data/data_output/imm_comb_output.csv")
recomb_df = recomb_df.rename(columns={"Asian": "RACE2"})
recomb_df["% Race Pop"] = recomb_df["% Race Pop"].astype(float).map("{:.2%}".format)
recomb_df2["% Race Pop"] = recomb_df2["% Race Pop"].astype(float).map("{:.2%}".format)
recomb_full = pd.concat([recomb_df, recomb_df2])
recomb_full.to_csv("data/data_output/imm_long.csv")
recomb_df.to_csv("data/data_output/imm_long1.csv")
recomb_df2.to_csv("data/data_output/imm_long2.csv")
#%%
df_raw = parse_single(2018)
df = mapping_features(df_raw)
groupby_col = ["RACE", "EDU"]
agg_df = df[df["AGE"] != "0-17"]
agg_df = agg_df.groupby(groupby_col).count().reset_index()
agg_df = agg_df.iloc[:, 0 : len(groupby_col) + 1]
coding_df = pd.read_csv("data/data_raw/race_coding.csv")
result = pd.merge(agg_df, coding_df, how="left", on="RACE")
result["% Total Pop"] = result["ST"] / result["ST"].sum()
asian_result = result.dropna()
asian_result["% Asian Pop"] = asian_result["ST"] / asian_result["ST"].sum()
recomb_df = pd.DataFrame()
for race in asian_result["Asian"].unique():
subset_df = asian_result[asian_result["Asian"] == race]
subset_df = asian_result[asian_result["Asian"] == race]
asian_result.iloc[:, 1] != "na"
subset_df["% Race Pop"] = subset_df["ST"] / subset_df["ST"].sum()
recomb_df =
|
pd.concat([recomb_df, subset_df])
|
pandas.concat
|
import numpy as np
from collections import defaultdict
import gc
import time
from pandas import DataFrame
from pandas.util.testing import rands
import random
N = 10000
indices = np.array([rands(10) for _ in xrange(N)], dtype='O')
indices2 = np.array([
|
rands(10)
|
pandas.util.testing.rands
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 =
|
DataFrame({"a": [1, 2, 3]}, dtype="i8")
|
pandas.DataFrame
|
from __future__ import annotations
from typing import Any, cast, Generator, Iterable, Optional, TYPE_CHECKING, Union
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from tanuki.data_store.data_type import DataType
from tanuki.data_store.index.index import Index
from tanuki.data_store.index.pandas_index import PandasIndex
from tanuki.database.data_token import DataToken
from .data_backend import DataBackend, ILocIndexer, LocIndexer
if TYPE_CHECKING:
from tanuki.data_store.index.index_alias import IndexAlias
from tanuki.data_store.query import Query
class PandasBackend(DataBackend):
_data: DataFrame
_index: PandasIndex
_loc: _LocIndexer
_iloc: _ILocIndexer
def __init__(
self,
data: Optional[Union(Series, DataFrame, dict[str, list])] = None,
index: Optional[PandasIndex] = None,
) -> None:
if data is None:
self._data = DataFrame(dtype="object")
elif type(data) is Series:
self._data = cast(Series, data).to_frame().transpose()
elif type(data) is DataFrame:
self._data = DataFrame(data)
elif type(data) is dict:
sample_value = next(iter(data.values()))
if not isinstance(sample_value, Iterable) or isinstance(sample_value, str):
self._data = Series(data).to_frame().transpose()
else:
self._data =
|
DataFrame(data)
|
pandas.core.frame.DataFrame
|
import numpy as np
import pandas as pd
from numpy import nan
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pytest
from test_pvsystem import sam_data
from conftest import requires_scipy
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_'].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture()
def location():
return Location(32.2, -111, altitude=700)
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
def test_orientation_strategy(system, location):
strategies = {}
@pytest.mark.parametrize('strategy,expected', [
(None, (0, 180)), ('None', (0, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 1.82033564e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, irradiance=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, irradiance=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, irradiance=irradiance).ac
expected = pd.Series(np.array([ 190.194760203, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
@requires_scipy
def test_run_model_with_weather(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'wind_speed':5, 'temp_air':10}, index=times)
ac = mc.run_model(times, weather=weather).ac
expected = pd.Series(np.array([ 1.99952400e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
@requires_scipy
def test_run_model_tracker(system, location):
system = SingleAxisTracker(module_parameters=system.module_parameters,
inverter_parameters=system.inverter_parameters)
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 121.421719, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
expected = pd.DataFrame(np.
array([[ 54.82513187, 90. , 11.0039221 , 11.0039221 ],
[ nan, 0. , 0. , nan]]),
columns=['aoi', 'surface_azimuth', 'surface_tilt', 'tracker_theta'],
index=times)
assert_frame_equal(mc.tracking, expected, check_less_precise=2)
def poadc(mc):
mc.dc = mc.total_irrad['poa_global'] * 0.2
mc.dc.name = None # assert_series_equal will fail without this
@requires_scipy
@pytest.mark.parametrize('dc_model,expected', [
('sapm', [180.13735116, -2.00000000e-02]),
('singlediode', [179.7178188, -2.00000000e-02]),
('pvwatts', [188.400994862, 0]),
(poadc, [187.361841505, 0]) # user supplied function
])
def test_dc_models(system, cec_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system,
location, dc_model, expected):
dc_systems = {'sapm': system, 'singlediode': cec_dc_snl_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
poadc: pvwatts_dc_pvwatts_ac_system}
system = dc_systems[dc_model]
mc = ModelChain(system, location, dc_model=dc_model,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def acdc(mc):
mc.ac = mc.dc
@requires_scipy
@pytest.mark.parametrize('ac_model,expected', [
('snlinverter', [180.13735116, -2.00000000e-02]),
pytest.mark.xfail(raises=NotImplementedError)
(('adrinverter', [179.7178188, -2.00000000e-02])),
('pvwatts', [188.400994862, 0]),
(acdc, [198.11956073, 0]) # user supplied function
])
def test_ac_models(system, cec_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system,
location, ac_model, expected):
ac_systems = {'snlinverter': system, 'adrinverter': cec_dc_snl_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
acdc: pvwatts_dc_pvwatts_ac_system}
system = ac_systems[ac_model]
mc = ModelChain(system, location, ac_model=ac_model,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
|
assert_series_equal(ac, expected, check_less_precise=2)
|
pandas.util.testing.assert_series_equal
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python Client API unit tests.
Unit tests for core methods in the Data Commons Python Client API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pandas.util.testing import assert_series_equal, assert_frame_equal
from unittest import mock
import datacommons as dc
import datacommons.utils as utils
import pandas as pd
import json
import unittest
def post_request_mock(*args, **kwargs):
""" A mock POST requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
# Get the request json
req = kwargs['json']
headers = kwargs['headers']
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for post requests to get_property_labels.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_labels']:
if req['dcids'] == ['geoId/0649670']:
# Response for sending a single dcid to get_property_labels
out_arcs = ['containedInPlace', 'name', 'geoId', 'typeOf']
res_json = json.dumps({
'geoId/0649670': {
'inLabels': [],
'outLabels': out_arcs
}
})
return MockResponse({"payload": res_json}, 200)
elif req['dcids'] == ['State', 'County', 'City']:
# Response for sending multiple dcids to get_property_labels
in_arcs = ['typeOf']
out_arcs = ['name', 'provenance', 'subClassOf', 'typeOf', 'url']
res_json = json.dumps({
'City': {'inLabels': in_arcs, 'outLabels': out_arcs},
'County': {'inLabels': in_arcs, 'outLabels': out_arcs},
'State': {'inLabels': in_arcs, 'outLabels': out_arcs}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == ['dc/MadDcid']:
# Response for sending a dcid that doesn't exist to get_property_labels
res_json = json.dumps({
'dc/MadDcid': {
'inLabels': [],
'outLabels': []
}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == []:
# Response for sending no dcids to get_property_labels
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_property_values
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_values']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'containedInPlace'\
and req['value_type'] == 'Town':
# Response for sending a request for getting Towns containedInPlace of
# Santa Clara County and Montgomery County.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': 'Los Gatos',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
{
'dcid': 'geoId/0643294',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
}
],
'out': []
},
'geoId/24031': {
'in': [
{
'dcid': 'geoId/2462850',
'name': 'Poolesville',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'name':
# Response for sending a request for the name of multiple dcids.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': [
{
'value': 'Santa Clara County',
'provenanceId': 'dc/sm3m2w3',
},
]
},
'geoId/24031': {
'in': [],
'out': [
{
'value': 'Montgomery County',
'provenanceId': 'dc/sm3m2w3',
},
]
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'madProperty':
# Response for sending a request with a property that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': []
},
'geoId/24031': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']\
and req['property'] == 'containedInPlace':
# Response for sending a request with a single dcid that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
},
'dc/MadDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': {
'in': [],
'out': []
},
'dc/MadderDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == [] and req['property'] == 'containedInPlace':
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_triples
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_triples']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']:
# Response for sending a request with two valid dcids.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'geoId/24031': [
{
"subjectId": "geoId/24031",
"predicate": "name",
"objectValue": "Montgomery County"
},
{
"subjectId": "geoId/2467675",
"subjectName": "Rockville",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/24031",
"objectName": "Montgomery County"
},
{
"subjectId": "geoId/24031",
"predicate": "containedInPlace",
"objectId": "geoId/24",
"objectName": "Maryland"
},
]
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']:
# Response for sending a request where one dcid does not exist.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'dc/MadDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': [],
'dc/MadderDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == []:
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
class TestGetPropertyLabels(unittest.TestCase):
""" Unit tests for get_property_labels. """
@mock.patch('requests.post', side_effect=post_request_mock)
def test_single_dcid(self, post_mock):
""" Calling get_property_labels with a single dcid returns a valid
result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['geoId/0649670'])
self.assertDictEqual(out_props,
{'geoId/0649670': ["containedInPlace", "name", "geoId", "typeOf"]})
# Test with out=False
in_props = dc.get_property_labels(['geoId/0649670'], out=False)
self.assertDictEqual(in_props, {'geoId/0649670': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_labels returns valid results with multiple
dcids.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['State', 'County', 'City']
expected_in = ["typeOf"]
expected_out = ["name", "provenance", "subClassOf", "typeOf", "url"]
# Test for outgoing property labels
out_props = dc.get_property_labels(dcids)
self.assertDictEqual(out_props, {
'State': expected_out,
'County': expected_out,
'City': expected_out,
})
# Test for incoming property labels
in_props = dc.get_property_labels(dcids, out=False)
self.assertDictEqual(in_props, {
'State': expected_in,
'County': expected_in,
'City': expected_in,
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_labels with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['dc/MadDcid'])
self.assertDictEqual(out_props, {'dc/MadDcid': []})
# Test for incoming property labels
in_props = dc.get_property_labels(['dc/MadDcid'], out=False)
self.assertDictEqual(in_props, {'dc/MadDcid': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_labels with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels([])
self.assertDictEqual(out_props, {})
# Test for incoming property labels
in_props = dc.get_property_labels([], out=False)
self.assertDictEqual(in_props, {})
class TestGetPropertyValues(unittest.TestCase):
""" Unit tests for get_property_values. """
# --------------------------- STANDARD UNIT TESTS ---------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_values with multiple dcids returns valid
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['geoId/06085', 'geoId/24031']
# Get the containedInPlace Towns for Santa Clara and Montgomery County.
towns = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
self.assertDictEqual(towns, {
'geoId/06085': ['geoId/0643294', 'geoId/0644112'],
'geoId/24031': ['geoId/2462850']
})
# Get the name of Santa Clara and Montgomery County.
names = dc.get_property_values(dcids, 'name')
self.assertDictEqual(names, {
'geoId/06085': ['Santa Clara County'],
'geoId/24031': ['Montgomery County']
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_values with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
bad_dcids_1 = ['geoId/06085', 'dc/MadDcid']
bad_dcids_2 = ['dc/MadDcid', 'dc/MadderDcid']
# Get entities containedInPlace of Santa Clara County and a dcid that does
# not exist.
contained_1 = dc.get_property_values(bad_dcids_1, 'containedInPlace', out=False)
self.assertDictEqual(contained_1, {
'geoId/06085': ['geoId/0644112'],
'dc/MadDcid': []
})
# Get entities containedInPlace for two dcids that do not exist.
contained_2 = dc.get_property_values(bad_dcids_2, 'containedInPlace')
self.assertDictEqual(contained_2, {
'dc/MadDcid': [],
'dc/MadderDcid': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_property(self, post_mock):
""" Calling get_property_values with a property that does not exist returns
empty results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get propery values for a property that does not exist.
prop_vals = dc.get_property_values(
['geoId/06085', 'geoId/24031'], 'madProperty')
self.assertDictEqual(prop_vals, {
'geoId/06085': [],
'geoId/24031': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_values with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get property values with an empty list of dcids.
prop_vals = dc.get_property_values([], 'containedInPlace')
self.assertDictEqual(prop_vals, {})
# ---------------------------- PANDAS UNIT TESTS ----------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series(self, post_mock):
""" Calling get_property_values with a Pandas Series returns the correct
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series.
dcids = pd.Series(['geoId/06085', 'geoId/24031'])
expected = pd.Series([
['geoId/0643294', 'geoId/0644112'],
['geoId/2462850']
])
# Call get_property_values with the series as input
actual = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
assert_series_equal(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_bad_dcids(self, post_mock):
""" Calling get_property_values with a Pandas Series and dcids that does not
exist resturns an empty result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series
bad_dcids_1 = pd.Series(['geoId/06085', 'dc/MadDcid'])
bad_dcids_2 = pd.Series(['dc/MadDcid', 'dc/MadderDcid'])
expected_1 = pd.Series([['geoId/0644112'], []])
expected_2 = pd.Series([[], []])
# Call get_property_values with series as input
actual_1 = dc.get_property_values(bad_dcids_1, 'containedInPlace', out=False)
actual_2 = dc.get_property_values(bad_dcids_2, 'containedInPlace', out=False)
# Assert the results are correct
assert_series_equal(actual_1, expected_1)
assert_series_equal(actual_2, expected_2)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_bad_property(self, post_mock):
""" Calling get_property_values with a Pandas Series and a property that
does not exist returns an empty result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The input and expected series
dcids = pd.Series(['geoId/06085', 'geoId/24031'])
expected = pd.Series([[], []])
# Call get_property_values and assert the results are correct.
actual = dc.get_property_values(dcids, 'madProperty')
assert_series_equal(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_no_dcid(self, post_mock):
# The input and expected series
dcids = pd.Series([])
expected = pd.Series([])
# Call get_property_values and assert the results are correct.
actual = dc.get_property_values(dcids, 'containedInPlace')
assert_series_equal(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_dataframe(self, post_mock):
""" Calling get_property_values with a Pandas DataFrame returns the correct
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series.
dcids = pd.DataFrame({'dcids': ['geoId/06085', 'geoId/24031']})
expected = pd.Series([
['geoId/0643294', 'geoId/0644112'],
['geoId/2462850']
])
# Call get_property_values with the series as input
actual = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
|
assert_series_equal(actual, expected)
|
pandas.util.testing.assert_series_equal
|
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import tensorlayer as tl
from sklearn import preprocessing
from scipy.stats import spearmanr
import pandas as pd
'''
These code comes from Basset:Deep convolutional neural networks for DNA sequence analysis
https://github.com/davek44/Basset/blob/master/src/basset_motifs.py
I made some modification to fit my own dataset.
'''
def get_motif_proteins(meme_db_files):
''' Hash motif_id's to protein names using the MEME DB file '''
motif_protein = {}
for meme_db_file in meme_db_files:
for line in open(meme_db_file):
a = line.split()
if len(a) > 0 and a[0] == 'MOTIF':
if len(a) == 2:
motif_protein[a[1]] = a[1]
continue
if a[2][0] == '(':
motif_protein[a[1]] = a[2][1:a[2].find(')')]
else:
motif_protein[a[1]] = a[2]
return motif_protein
def info_content(pwm, transpose=False, bg_gc=0.415):
''' Compute PWM information content.
In the original analysis, I used a bg_gc=0.5. For any
future analysis, I ought to switch to the true hg19
value of 0.415.
'''
pseudoc = 1e-9
if transpose:
pwm = np.transpose(pwm)
bg_pwm = [1-bg_gc, bg_gc, bg_gc, 1-bg_gc]
ic = 0
for i in range(pwm.shape[0]):
for j in range(4):
# ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j])
ic += -bg_pwm[j]*np.log2(bg_pwm[j]) + pwm[i][j]*np.log2(pseudoc+pwm[i][j])
return ic
def make_filter_pwm(filter_fasta):
''' Make a PWM for this filter from its top hits '''
nts = {'A':0, 'T':1, 'C':2, 'G':3}
pwm_counts = []
nsites = 4 # pseudocounts
for line in open(filter_fasta):
if line[0] != '>':
seq = line.rstrip()
nsites += 1
if len(pwm_counts) == 0:
# initialize with the length
for i in range(len(seq)):
pwm_counts.append(np.array([1.0]*4))
# count
for i in range(len(seq)):
try:
pwm_counts[i][nts[seq[i]]] += 1
except KeyError:
pwm_counts[i] += np.array([0.25]*4)
# normalize
pwm_freqs = []
for i in range(len(pwm_counts)):
pwm_freqs.append([pwm_counts[i][j]/float(nsites) for j in range(4)])
return np.array(pwm_freqs), nsites-4
def plot_filter_heat(param_matrix, out_pdf):
param_range = abs(param_matrix).max()
sns.set(font_scale=2)
plt.figure(figsize=(param_matrix.shape[1], 4))
sns.heatmap(param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range)
ax = plt.gca()
ax.set_xticklabels(range(1,param_matrix.shape[1]+1))
ax.set_yticklabels('ATCG', rotation='horizontal') # , size=10)
plt.savefig(out_pdf)
plt.close()
def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200):
# possible trim
trim_start = 0
trim_end = param_matrix.shape[1]-1
trim_t = 0.3
if trim_filters:
# trim PWM of uninformative prefix
while trim_start < param_matrix.shape[1] and np.max(param_matrix[:,trim_start]) - np.min(param_matrix[:,trim_start]) < trim_t:
trim_start += 1
# trim PWM of uninformative suffix
while trim_end >= 0 and np.max(param_matrix[:,trim_end]) - np.min(param_matrix[:,trim_end]) < trim_t:
trim_end -= 1
if trim_start < trim_end:
fp = open(possum_file, 'w')
fp.write('BEGIN GROUP\n')#, possum_out)# >> possum_out, 'BEGIN GROUP'
fp.write('BEGIN FLOAT\n')#,possum_out) #>> possum_out,
fp.write('ID %s\n'%motif_id)# % motif_id,possum_out)# >> possum_out, 'ID %s' % motif_id
fp.write('AP DNA\n')#,possum_out)# >> possum_out, 'AP DNA'
fp.write('LE %d\n' % (trim_end+1-trim_start))#,possum_out)# >> possum_out, 'LE %d' % (trim_end+1-trim_start)
for ci in range(trim_start,trim_end+1):
fp.write('MA %s\n' % ' '.join(['%.2f'%(mult*n) for n in param_matrix[:,ci]]))#,possum_out)# >> possum_out, 'MA %s' % ' '.join(['%.2f'%(mult*n) for n in param_matrix[:,ci]])
fp.write('END\n')#,possum_out) #>> possum_out, 'END'
fp.write('END\n')#,possum_out)#print >> possum_out, 'END'
fp.close()
import subprocess
weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint "" --resolution 600'
weblogo_opts += ' -C "#CB2026" A A'
weblogo_opts += ' -C "#34459C" C C'
weblogo_opts += ' -C "#FBB116" G G'
weblogo_opts += ' -C "#0C8040" T T'
def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None):
#tl.files.exists_or_mkdir(out_prefix)
#name = out_prefix + out_prefix.split('/')[-1]
if maxpct_t:
all_outs = np.ravel(filter_outs)
all_outs_mean = all_outs.mean()
all_outs_norm = all_outs - all_outs_mean
raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean
# SAME padding
pad_side = (filter_size - 1) // 2
# print fasta file of positive outputs
fp = open('%s.fa' % out_prefix, 'w')
filter_count = 0
for i in range(filter_outs.shape[0]):
for j in range(pad_side, filter_outs.shape[1]-pad_side):
if filter_outs[i,j] > raw_t:
js = (j - pad_side)
kmer = seqs[i][js:js+filter_size]
if len(kmer.strip()) < filter_size:
continue
fp.write('>%d_%d\n' % (i,j))
fp.write(kmer+'\n')
filter_count += 1
fp.close()
'''
# make weblogo
if filter_count > 0:
meme_cmd = f'meme {out_prefix}.fa -dna -oc {out_prefix} -nostatus -time 18000 -mod zoops -nmotifs 2 -minw 6 -maxw 50 -objfun classic -revcomp -markov_order 0'
#meme_cmd = 'meme %s.fa -dna -mod zoops -pal -o %s -nmotifs 2'%(out_prefix, out_prefix)
#weblogo_cmd = 'weblogo %s < %s.fa > %s.eps&' % (weblogo_opts, out_prefix, out_prefix)
#print(weblogo_cmd)
subprocess.call(meme_cmd, shell=True)
'''
def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False):
''' Print a filter to the growing MEME file
Attrs:
meme_out : open file
f (int) : filter index #
filter_pwm (array) : filter PWM array
nsites (int) : number of filter sites
'''
if not trim_filters:
ic_start = 0
ic_end = filter_pwm.shape[0]-1
else:
ic_t = 0.2
# trim PWM of uninformative prefix
ic_start = 0
while ic_start < filter_pwm.shape[0] and info_content(filter_pwm[ic_start:ic_start+1]) < ic_t:
ic_start += 1
# trim PWM of uninformative suffix
ic_end = filter_pwm.shape[0]-1
while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end+1]) < ic_t:
ic_end -= 1
if ic_start < ic_end:
meme_out.write('MOTIF filter%d\n' % f)# z3print >>
meme_out.write( 'letter-probability matrix: alength= 4 w= %d nsites= %d\n' % (ic_end-ic_start+1, nsites))#print >>
for i in range(ic_start, ic_end+1):
meme_out.write( '%.4f %.4f %.4f %.4f\n' % tuple(filter_pwm[i]))#print >>
meme_out.write('\n')#print >>
def meme_intro(meme_file, seqs):
''' Open MEME motif format file and print intro
Attrs:
meme_file (str) : filename
seqs [str] : list of strings for obtaining background freqs
Returns:
mem_out : open MEME file
'''
nts = {'A':0, 'T':1, 'C':2, 'G':3}
# count
nt_counts = [1]*4
for i in range(len(seqs)):
for nt in seqs[i]:
try:
nt_counts[nts[nt]] += 1
except KeyError:
pass
# normalize
nt_sum = float(sum(nt_counts))
nt_freqs = [nt_counts[i]/nt_sum for i in range(4)]
# open file for writing
meme_out = open(meme_file, 'w')
# print intro material
meme_out.write( 'MEME version 4\n')#print >>
meme_out.write('\n')#print >>
meme_out.write( 'ALPHABET= ACGT\n')#print >>
meme_out.write( '\n')#print >>
meme_out.write('Background letter frequencies:\n')#print >>
meme_out.write('A %.4f C %.4f G %.4f T %.4f\n' % tuple(nt_freqs))#print >>
meme_out.write('\n')#print >>
return meme_out
def name_filters(num_filters, tomtom_file, meme_db_file):
''' Name the filters using Tomtom matches.
Attrs:
num_filters (int) : total number of filters
tomtom_file (str) : filename of Tomtom output table.
meme_db_file (str) : filename of MEME db
Returns:
filter_names [str] :
'''
# name by number
#filter_names = ['f%d'%fi for fi in range(num_filters)]
# name by protein
if tomtom_file is not None and meme_db_file is not None:
motif_protein = get_motif_proteins(meme_db_file)
# hash motifs and q-value's by filter
filter_motifs = []
try:
tt_in = open(tomtom_file)
except:
return ''
for line in tt_in:
a = line.split()
if len(a)==0 or a[0].startswith('Q') or a[0].startswith('#'):
continue
#fi = int(a[0][6:])
print(tomtom_file)
print(a)
motif_id = a[1]
pval = float(a[3])
evals = float(a[4])
qval = float(a[5])
filter_motifs.append((evals, pval, qval,motif_id))
tt_in.close()
# assign filter's best match
try:
tmp = sorted(filter_motifs)[0]
top_motif = tmp[-1]
pval = tmp[1]
evals = tmp[0]
qval = tmp[2]
filter_names = 'test_%s_%f_%f_%f' % (motif_protein[top_motif], pval, evals, qval)
except:
filter_names = ''#'test_test_test_test_test'
return filter_names
################################################################################
# plot_target_corr
#
# Plot a clustered heatmap of correlations between filter activations and
# targets.
#
# Input
# filter_outs:
# filter_names:
# target_names:
# out_pdf:
################################################################################
def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'):
num_seqs = filter_outs.shape[0]
num_targets = len(target_names)
if seq_op == 'mean':
filter_outs_seq = filter_outs.mean(axis=2)
else:
filter_outs_seq = filter_outs.max(axis=2)
# std is sequence by filter.
filter_seqs_std = filter_outs_seq.std(axis=0)
filter_outs_seq = filter_outs_seq[:,filter_seqs_std > 0]
filter_names_live = filter_names[filter_seqs_std > 0]
filter_target_cors = np.zeros((len(filter_names_live),num_targets))
for fi in range(len(filter_names_live)):
for ti in range(num_targets):
cor, p = spearmanr(filter_outs_seq[:,fi], seq_targets[:num_seqs,ti])
filter_target_cors[fi,ti] = cor
cor_df =
|
pd.DataFrame(filter_target_cors, index=filter_names_live, columns=target_names)
|
pandas.DataFrame
|
"""Class managing the model inference
"""
import os
import os.path as osp
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import scipy.stats
import torch
import torchvision.transforms as transforms
from torch.utils.data.sampler import WeightedRandomSampler
from torch_geometric.data import DataLoader
from n2j.trainval_data.graphs.cosmodc2_graph import CosmoDC2Graph
import n2j.models as models
import n2j.inference.infer_utils as iutils
import matplotlib.pyplot as plt
import corner
from n2j.trainval_data.utils.transform_utils import (ComposeXYLocal,
Standardizer,
Slicer,
MagErrorSimulatorTorch,
Rejector,
get_bands_in_x,
get_idx)
import n2j.inference.summary_stats_baseline as ssb
import n2j.inference.calibration as calib
class InferenceManager:
def __init__(self, device_type, checkpoint_dir, out_dir, seed=123):
"""Inference tool
Parameters
----------
device_type : str
checkpoint_dir : os.path or str
training checkpoint_dir (same as one used to instantiate `Trainer`)
out_dir : os.path or str
output directory for inference results
"""
self.device_type = device_type
self.device = torch.device(self.device_type)
self.seed = seed
self.seed_everything()
self.checkpoint_dir = checkpoint_dir
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.out_dir = out_dir
os.makedirs(self.out_dir, exist_ok=True)
self._include_los = slice(None) # do not exclude los from inference
def seed_everything(self):
"""Seed the training and sampling for reproducibility
"""
np.random.seed(self.seed)
random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def load_dataset(self, data_kwargs, is_train, batch_size,
sub_features=None, sub_target=None, sub_target_local=None,
rebin=False, num_workers=2,
noise_kwargs={'mag': {'override_kwargs': None,
'depth': 5}},
detection_kwargs={}):
"""Load dataset and dataloader for training or validation
Note
----
Should be called for training data first, to set the normalizing
stats used for both training and validation!
"""
self.num_workers = num_workers
if is_train:
self.batch_size = batch_size
else:
self.val_batch_size = batch_size
# X metadata
features = data_kwargs['features']
self.sub_features = sub_features if sub_features else features
self.X_dim = len(self.sub_features)
# Global y metadata
target = ['final_kappa', 'final_gamma1', 'final_gamma2']
self.sub_target = sub_target if sub_target else target
self.Y_dim = len(self.sub_target)
# Lobal y metadata
target_local = ['halo_mass', 'stellar_mass', 'redshift']
self.sub_target_local = sub_target_local if sub_target_local else target_local
self.Y_local_dim = len(self.sub_target_local)
dataset = CosmoDC2Graph(num_workers=self.num_workers, **data_kwargs)
############
# Training #
############
if is_train:
self.train_dataset = dataset
if osp.exists(osp.join(self.checkpoint_dir, 'stats.pt')):
stats = torch.load(osp.join(self.checkpoint_dir, 'stats.pt'))
else:
stats = self.train_dataset.data_stats
torch.save(stats, osp.join(self.checkpoint_dir, 'stats.pt'))
# Transforming X
idx = get_idx(features, self.sub_features)
self.X_mean = stats['X_mean'][:, idx]
self.X_std = stats['X_std'][:, idx]
slicing = Slicer(idx)
mag_idx, which_bands = get_bands_in_x(self.sub_features)
print(f"Mag errors added to {which_bands}")
magerr = MagErrorSimulatorTorch(mag_idx=mag_idx,
which_bands=which_bands,
**noise_kwargs['mag'])
magcut = Rejector(self.sub_features, **detection_kwargs)
norming = Standardizer(self.X_mean, self.X_std)
editing_X_meta = Metadata(self.sub_features, ['ra_true', 'dec_true'])
norming_X_meta = Standardizer(stats['X_meta_mean'],
stats['X_meta_std'])
# Transforming local Y
idx_Y_local = get_idx(target_local, self.sub_target_local)
self.Y_local_mean = stats['Y_local_mean'][:, idx_Y_local]
self.Y_local_std = stats['Y_local_std'][:, idx_Y_local]
slicing_Y_local = Slicer(idx_Y_local)
norming_Y_local = Standardizer(self.Y_local_mean,
self.Y_local_std)
# TODO: normalization is based on pre-magcut population
self.transform_X_Y_local = ComposeXYLocal([slicing, magerr],
[slicing_Y_local],
[magcut],
[norming],
[norming_Y_local],
[editing_X_meta, norming_X_meta])
# Transforming global Y
idx_Y = get_idx(target, self.sub_target)
self.Y_mean = stats['Y_mean'][:, idx_Y]
self.Y_std = stats['Y_std'][:, idx_Y]
slicing_Y = Slicer(idx_Y)
norming_Y = Standardizer(self.Y_mean, self.Y_std)
self.transform_Y = transforms.Compose([slicing_Y, norming_Y])
self.train_dataset.transform_X_Y_local = self.transform_X_Y_local
self.train_dataset.transform_Y = self.transform_Y
# Loading option 1: Subsample from a distribution
if data_kwargs['subsample_pdf_func'] is not None:
self.class_weight = None
train_subset = torch.utils.data.Subset(self.train_dataset,
stats['subsample_idx'])
self.train_dataset = train_subset
self.train_loader = DataLoader(self.train_dataset,
batch_size=batch_size,
shuffle=False, # no need here
num_workers=self.num_workers,
drop_last=True)
else:
# Loading option 2: Over/undersample according to inverse frequency
if rebin:
self.class_weight = stats['class_weight']
sampler = WeightedRandomSampler(stats['y_weight'],
num_samples=len(self.train_dataset))
self.train_loader = DataLoader(self.train_dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=self.num_workers,
drop_last=True)
# Loading option 3: No special sampling, just shuffle
else:
self.class_weight = None
self.train_loader = DataLoader(self.train_dataset,
batch_size=batch_size,
shuffle=False, # no need here
num_workers=self.num_workers,
drop_last=True)
print(f"Train dataset size: {len(self.train_dataset)}")
###################
# Validation/Test #
###################
else:
self.test_dataset = dataset
# Compute or retrieve stats necessary for resampling
# before setting any kind of transforms
# Note: stats_test.pt is in inference out_dir, not checkpoint_dir
if data_kwargs['subsample_pdf_func'] is not None:
stats_test_path = osp.join(self.out_dir, 'stats_test.pt')
if osp.exists(stats_test_path):
stats_test = torch.load(stats_test_path)
else:
stats_test = self.test_dataset.data_stats_valtest
torch.save(stats_test, stats_test_path)
self.test_dataset.transform_X_Y_local = self.transform_X_Y_local
self.test_dataset.transform_Y = self.transform_Y
self.set_valtest_loading(stats_test['subsample_idx'])
print(f"Test dataset size: {len(self.test_dataset)}")
def set_valtest_loading(self, sub_idx):
"""Set the loading options for val/test set. Should be called
whenever there are changes to the test dataset, to update the
dataloader.
Parameters
----------
subsample_pdf_func : callable
Description
sub_idx : TYPE
Description
"""
self.class_weight = None
test_subset = torch.utils.data.Subset(self.test_dataset,
sub_idx)
self.test_dataset = test_subset
self.test_loader = DataLoader(self.test_dataset,
batch_size=self.val_batch_size,
shuffle=False,
num_workers=self.num_workers,
drop_last=False)
def configure_model(self, model_name, model_kwargs={}):
self.model_name = model_name
self.model_kwargs = model_kwargs
self.model = getattr(models, model_name)(**self.model_kwargs)
self.model.to(self.device)
if self.class_weight is not None:
self.model.class_weight = self.class_weight.to(self.device)
n_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
print(f"Number of params: {n_params}")
def load_state(self, state_path):
"""Load the state dict of the past training
Parameters
----------
state_path : str or os.path object
path of the state dict to load
"""
state = torch.load(state_path,
map_location=torch.device(self.device_type))
self.model.load_state_dict(state['model'])
self.model.to(self.device)
self.epoch = state['epoch']
train_loss = state['train_loss']
val_loss = state['val_loss']
print("Loaded weights at {:s}".format(state_path))
print("Epoch [{}]: TRAIN Loss: {:.4f}".format(self.epoch, train_loss))
print("Epoch [{}]: VALID Loss: {:.4f}".format(self.epoch, val_loss))
self.last_saved_val_loss = val_loss
@property
def include_los(self):
"""Indices to include in inference. Useful when there are faulty
examples in the test set you want to exclude.
"""
return self._include_los
@include_los.setter
def include_los(self, value):
if value is None:
# Do nothing
return
value = list(value)
self._include_los = value
self.set_valtest_loading(value)
max_guess = max(value)
excluded = np.arange(max_guess)[~np.isin(np.arange(max_guess),
value)]
print(f"Assuming there were {max_guess+1} sightlines in test set, "
f" now excluding indices: {excluded}")
@property
def n_test(self):
return len(self.test_dataset)
@property
def bnn_kappa_path(self):
return osp.join(self.out_dir, 'k_bnn.npy')
def get_bnn_kappa(self, n_samples=50, n_mc_dropout=20, flatten=True):
"""Get the samples from the BNN
Parameters
----------
n_samples : int
number of samples per MC iterate
n_mc_dropout : int
number of MC iterates
Returns
-------
np.array of shape `[n_test, self.Y_dim, n_samples*n_mc_dropout]`
"""
if osp.exists(self.bnn_kappa_path):
samples = np.load(self.bnn_kappa_path)
if flatten:
samples = samples.reshape([self.n_test, self.Y_dim, -1])
return samples
# Fetch precomputed Y_mean, Y_std to de-standardize samples
Y_mean = self.Y_mean.to(self.device)
Y_std = self.Y_std.to(self.device)
self.model.eval()
with torch.no_grad():
samples = np.empty([self.n_test, n_mc_dropout, n_samples, self.Y_dim])
for i, batch in enumerate(self.test_loader):
batch = batch.to(self.device)
for mc_iter in range(n_mc_dropout):
x, u = self.model(batch)
B = u.shape[0] # [this batch size]
# Get pred samples for this MC iterate
self.model.global_nll.set_trained_pred(u)
mc_samples = self.model.global_nll.sample(Y_mean,
Y_std,
n_samples)
samples[i*B: (i+1)*B, mc_iter, :, :] = mc_samples
# Transpose dims to get [n_test, Y_dim, n_mc_dropout, n_samples]
samples = samples.transpose(0, 3, 1, 2)
np.save(self.bnn_kappa_path, samples)
if flatten:
samples = samples.reshape([self.n_test, self.Y_dim, -1])
return samples
@property
def true_train_kappa_path(self):
return osp.join(self.out_dir, 'k_train.npy')
@property
def train_summary_stats_path(self):
return osp.join(self.out_dir, 'summary_stats_train.npy')
@property
def true_test_kappa_path(self):
return osp.join(self.out_dir, 'k_test.npy')
@property
def test_summary_stats_path(self):
return osp.join(self.out_dir, 'summary_stats_test.npy')
@property
def matching_dir(self):
return osp.join(self.out_dir, 'matching')
@property
def log_p_k_given_omega_int_path(self):
return osp.join(self.out_dir, 'log_p_k_given_omega_int.npy')
@property
def reweighted_grid_dir(self):
return osp.join(self.out_dir, 'reweighted_grid')
@property
def reweighted_per_sample_dir(self):
return osp.join(self.out_dir, 'reweighted_per_sample')
@property
def reweighted_bnn_kappa_grid_path(self):
return osp.join(self.reweighted_grid_dir,
'k_bnn_reweighted_grid.npy')
@property
def reweighted_bnn_kappa_per_sample_path(self):
return osp.join(self.reweighted_per_sample_dir,
'k_bnn_reweighted_per_sample.npy')
def delete_previous(self):
"""Delete previously stored files related to the test set and
inference results, while leaving any training-set related caches,
which take longer to generate.
"""
import shutil
files = [self.true_test_kappa_path, self.test_summary_stats_path]
files += [self.bnn_kappa_path, self.log_p_k_given_omega_int_path]
files += [self.reweighted_bnn_kappa_grid_path]
files += [self.reweighted_bnn_kappa_per_sample_path]
for f in files:
if osp.exists(f):
print(f"Deleting {f}...")
os.remove(f)
dirs = [self.matching_dir]
dirs += [self.reweighted_grid_dir, self.reweighted_per_sample_dir]
for d in dirs:
if osp.exists(d):
print(f"Deleting {d} and all its contents...")
shutil.rmtree(d)
def get_true_kappa(self, is_train,
compute_summary=True, save=True):
"""Fetch true kappa (for train/val/test)
Parameters
----------
is_train : bool
Whether to get true kappas for train (test otherwise)
compute_summary : bool, optional
Whether to compute summary stats in the loop
save : bool, optional
Whether to store the kappa to disk
Returns
-------
np.ndarray
true kappas of shape `[n_data, Y_dim]`
"""
# Decide which dataset we're collecting kappa labels for
if is_train:
loader = self.train_loader
path = self.true_train_kappa_path
n_data = len(self.train_dataset)
ss_path = self.train_summary_stats_path
else:
loader = self.test_loader
path = self.true_test_kappa_path
n_data = self.n_test
ss_path = self.test_summary_stats_path
if osp.exists(path):
if compute_summary and osp.exists(ss_path):
true_kappa = np.load(path)
return true_kappa
print(f"Saving {path}...")
# Fetch precomputed Y_mean, Y_std to de-standardize samples
Y_mean = self.Y_mean.to(self.device)
Y_std = self.Y_std.to(self.device)
if compute_summary:
pos_indices = get_idx(self.sub_features,
['ra_true', 'dec_true'])
ss_obj = ssb.SummaryStats(n_data, pos_indices)
# Init empty array
true_kappa = np.empty([n_data, self.Y_dim])
with torch.no_grad():
# Populate `true_kappa` by batches
for i, batch in enumerate(loader):
# Update summary stats using CPU batch
if compute_summary:
ss_obj.update(batch, i)
batch = batch.to(self.device)
B = batch.y.shape[0] # [this batch size]ss_obj
true_kappa[i*B: (i+1)*B, :] = (batch.y*Y_std + Y_mean).cpu().numpy()
if save:
np.save(path, true_kappa)
if compute_summary:
ss_obj.export_stats(ss_path)
return true_kappa
def get_summary_stats(self, thresholds, interim_pdf_func=None,
match=True, min_matches=1000):
"""Save accepted samples from summary statistics matching
Parameters
----------
thresholds : dict
Matching thresholds for summary stats
Keys should be one or both of 'N' and 'N_inv_dist'.
"""
train_k = self.get_true_kappa(is_train=True,
compute_summary=True)
test_k = self.get_true_kappa(is_train=False,
compute_summary=True)
pos_indices = get_idx(self.sub_features,
['ra_true', 'dec_true'])
train_ss_obj = ssb.SummaryStats(len(self.train_dataset),
pos_indices)
train_ss_obj.set_stats(self.train_summary_stats_path)
test_ss_obj = ssb.SummaryStats(len(self.test_dataset),
pos_indices)
test_ss_obj.set_stats(self.test_summary_stats_path)
self.matcher = ssb.Matcher(train_ss_obj, test_ss_obj,
train_k,
self.matching_dir,
test_k)
if match:
self.matcher.match_summary_stats(thresholds, interim_pdf_func,
min_matches=min_matches)
overview = self.matcher.get_overview_table()
return overview
def get_log_p_k_given_omega_int(self, n_samples, n_mc_dropout,
interim_pdf_func):
"""Compute log(p_k|Omega_int) for BNN samples p_k
Parameters
----------
n_samples : int
Number of BNN samples per MC iterate per sightline
n_mc_dropout : int
Number of MC dropout iterates per sightline
interim_pdf_func : callable
Function that evaluates the PDF of the interim prior
Returns
-------
np.ndarray
Probabilities log(p_k|Omega_int) of
shape `[n_test, n_mc_dropout*n_samples]`
"""
if osp.exists(self.log_p_k_given_omega_int_path):
return np.load(self.log_p_k_given_omega_int_path)
k_train = self.get_true_kappa(is_train=True).squeeze(1)
k_bnn = self.get_bnn_kappa(n_samples=n_samples,
n_mc_dropout=n_mc_dropout).squeeze(1)
log_p_k_given_omega_int = iutils.get_log_p_k_given_omega_int_analytic(k_train=k_train,
k_bnn=k_bnn,
interim_pdf_func=interim_pdf_func)
np.save(self.log_p_k_given_omega_int_path, log_p_k_given_omega_int)
return log_p_k_given_omega_int
def get_log_p_k_given_omega_int_loop(self, interim_pdf_func, bnn=False,
ss_name='N'):
"""Compute log(p_k|Omega_int) for BNN or summary stats samples p_k.
Useful when the number of samples differs across sightlines, so
the computation is not trivially vectorizable.
Parameters
----------
interim_pdf_func : callable
Function that evaluates the PDF of the interim prior
bnn : bool, optional
Whether the samples are BNN's.
If False, understood to be summary stats matched samples.
ss_name : str, optional
Summary stats name. Only used if `bnn` is False.
Default: 'N'
"""
sample_type = 'bnn' if bnn else 'ss'
if bnn:
raise NotImplementedError("Use the vectorized version for BNN!")
path = osp.join(self.matching_dir,
f'log_p_k_given_omega_int_{sample_type}_list.npy')
if osp.exists(path):
return np.load(path)
log_p_k_given_omega_int_list = []
for i in range(self.n_test):
samples_i = self.matcher.get_samples(idx=i, ss_name=ss_name,
threshold=None)
samples_i = samples_i.reshape([1, -1]) # artificial n_test of 1
log_p_i = iutils.get_log_p_k_given_omega_int_analytic(k_train=None,
k_bnn=samples_i,
interim_pdf_func=interim_pdf_func)
# log_p_i ~ [1, len(samples_i)] so squeeze
log_p_k_given_omega_int_list.append(log_p_i.squeeze())
return log_p_k_given_omega_int_list
def run_mcmc_for_omega_post(self, n_samples, n_mc_dropout,
mcmc_kwargs, interim_pdf_func,
bounds_lower=-np.inf, bounds_upper=np.inf):
"""Run EMCEE to obtain the posterior on test hyperparams, omega
Parameters
----------
n_samples : int
Number of BNN samples per MC iterate per sightline
n_mc_dropout : int
Number of MC dropout iterates
mcmc_kwargs : dict
Config going into `infer_utils.run_mcmc`
bounds_lower : np.ndarray or float, optional
Lower bound for target quantities
bounds_upper : np.ndarray or float, optional
Upper bound for target quantities
"""
k_bnn = self.get_bnn_kappa(n_samples=n_samples,
n_mc_dropout=n_mc_dropout)
log_p_k_given_omega_int = self.get_log_p_k_given_omega_int(n_samples,
n_mc_dropout,
interim_pdf_func)
iutils.get_omega_post(k_bnn, log_p_k_given_omega_int, mcmc_kwargs,
bounds_lower, bounds_upper)
def run_mcmc_for_omega_post_summary_stats(self, ss_name,
mcmc_kwargs,
interim_pdf_func,
bounds_lower=-np.inf,
bounds_upper=np.inf):
"""Run EMCEE to obtain the posterior on test hyperparams, omega
using the matched summary statistics samples, rather than BNN
posterior samples
Parameters
----------
ss_name : str
What kind of summary stats to query (one of 'N', 'N_inv_dist')
mcmc_kwargs : dict
Config going into `infer_utils.run_mcmc`
bounds_lower : np.ndarray or float, optional
Lower bound for target quantities
bounds_upper : np.ndarray or float, optional
Upper bound for target quantities
"""
log_p_k_given_omega_int_list = self.get_log_p_k_given_omega_int_loop(interim_pdf_func,
bnn=False,
ss_name=ss_name)
samples = []
for i in range(self.n_test):
samples_i = self.matcher.get_samples(idx=i, ss_name=ss_name,
threshold=None)
samples.append(samples_i)
iutils.get_omega_post_loop(samples, log_p_k_given_omega_int_list, mcmc_kwargs,
bounds_lower, bounds_upper)
def get_kappa_log_weights(self, idx, n_samples=None, n_mc_dropout=None,
interim_pdf_func=None, grid=None):
"""Get log weights for reweighted kappa posterior per sample
Parameters
----------
idx : int
Index of sightline in test set
n_samples : int
Number of samples per dropout, for getting kappa samples.
(May be overridden with what was used previously, if
kappa samples were already drawn and stored)
n_mc_dropout : int
Number of dropout iterates, for getting kappa samples.
(May be overridden with what was used previously, if
kappa samples were already drawn and stored)
interim_pdf_func : callable
Function that returns the density of the interim prior
grid : None, optional
Unused but kept for consistency with `get_kappa_log_weigths_grid`
Returns
-------
np.ndarray
log weights for each of the BNN samples for this sightline
"""
os.makedirs(self.reweighted_per_sample_dir, exist_ok=True)
path = osp.join(self.reweighted_per_sample_dir,
f'log_weights_{idx}.npy')
k_bnn = self.get_bnn_kappa(n_samples=n_samples,
n_mc_dropout=n_mc_dropout)
log_p_k_given_omega_int = self.get_log_p_k_given_omega_int(n_samples,
n_mc_dropout,
interim_pdf_func)
# omega_post_samples = iutils.get_mcmc_samples(chain_path, chain_kwargs)
log_weights = iutils.get_kappa_log_weights(k_bnn[idx, :],
log_p_k_given_omega_int[idx, :])
np.save(path, log_weights)
return log_weights
def get_kappa_log_weights_grid(self, idx,
grid=None,
n_samples=None,
n_mc_dropout=None,
interim_pdf_func=None):
"""Get log weights for reweighted kappa posterior, analytically
on a grid
Parameters
----------
idx : int
Index of sightline in test set
grid : np.ndarray, optional
Grid of kappa values at which to evaluate log weights
(May be overridden with what was used previously, if
kappa samples were already drawn and stored)
n_samples : int, optional
Number of samples per dropout, for getting kappa samples.
(May be overridden with what was used previously, if
kappa samples were already drawn and stored)
n_mc_dropout : int, optional
Number of dropout iterates, for getting kappa samples.
(May be overridden with what was used previously, if
kappa samples were already drawn and stored)
interim_pdf_func : callable, optional
Function that returns the density of the interim prior
Note
----
log doesn't help with numerical stability since we divide
probabilities directly, but we're keeping this just for
consistency
Returns
-------
np.ndarray
kappa grid, log weights for each of the BNN samples for
this sightline
"""
os.makedirs(self.reweighted_grid_dir, exist_ok=True)
path = osp.join(self.reweighted_grid_dir,
f'log_weights_{idx}.npy')
if osp.exists(path):
return np.load(path)
# Get unflattened, i.e. [n_test, 1, n_mc_dropout, n_samples]
k_bnn = self.get_bnn_kappa(n_samples=n_samples,
n_mc_dropout=n_mc_dropout,
flatten=False)
k_bnn = k_bnn[idx, 0, :, :] # [n_mc_dropout, n_samples]
n_mc_dropout, n_samples = k_bnn.shape
numer = np.zeros(grid.shape) # init numerator
# Fit a normal for each MC dropout
for d in range(n_mc_dropout):
samples_d = k_bnn[d, :]
norm_d = scipy.stats.norm(loc=samples_d.mean(),
scale=samples_d.std())
bnn_prob_d = norm_d.pdf(grid)
numer += (bnn_prob_d - numer)/(d+1) # running mean
# Useful for debugging
np.save(osp.join(self.reweighted_grid_dir,
f'grid_bnn_gmm_{idx}.npy'),
numer)
denom = interim_pdf_func(grid)
log_weights = np.log(numer/denom)
log_weights_grid = np.stack([grid, log_weights], axis=0)
np.save(path, log_weights_grid)
return log_weights_grid
def get_reweighted_bnn_kappa(self, n_resamples, grid_kappa_kwargs,
):
"""Get the reweighted BNN kappa samples, reweighted either on a
grid or per sample
Parameters
----------
n_resamples : int
Number of resamples from the reweighted distribution
grid_kappa_kwargs : dict
Kwargs for
Returns
-------
tuple
Two arrays of shape [n_test, 1, n_resamples], first of which
is resamples using the grid reweighting and second of which
is resamples using the per-sample reweighting
"""
if osp.exists(self.reweighted_bnn_kappa_grid_path):
if osp.exists(self.reweighted_bnn_kappa_per_sample_path):
print("Reading existing reweighted BNN kappa...")
grid = np.load(self.reweighted_bnn_kappa_grid_path)
per_sample = np.load(self.reweighted_bnn_kappa_per_sample_path)
return grid, per_sample
n_test = len(self.test_dataset)
k_bnn = self.get_bnn_kappa(n_samples=grid_kappa_kwargs['n_samples'],
n_mc_dropout=grid_kappa_kwargs['n_mc_dropout'])
# Init reweighted arrays
k_reweighted_grid = np.empty([n_test, 1, n_resamples])
k_reweighted_per_sample = np.empty([n_test, 1, n_resamples])
for idx in tqdm(range(n_test), desc='evaluating, resampling'):
# On a grid
grid, log_p = self.get_kappa_log_weights_grid(idx,
**grid_kappa_kwargs)
per_grid = iutils.resample_from_pdf(grid, log_p, n_resamples)
k_reweighted_grid[idx, 0, :] = per_grid
# Per sample
log_p_sample = self.get_kappa_log_weights(idx, **grid_kappa_kwargs)
plot_path = osp.join(self.reweighted_per_sample_dir, f'kde_{idx}.png')
per_sample = iutils.resample_from_samples(k_bnn[idx],
np.exp(log_p_sample),
n_resamples,
plot_path)
k_reweighted_per_sample[idx, 0, :] = per_sample
# Grid resamples for all sightlines
np.save(self.reweighted_bnn_kappa_grid_path,
k_reweighted_grid)
# Per-sample resamples for all sightlines
np.save(self.reweighted_bnn_kappa_per_sample_path,
k_reweighted_per_sample)
return k_reweighted_grid, k_reweighted_per_sample
def visualize_omega_post(self, chain_path, chain_kwargs,
corner_kwargs, log_idx=None):
# MCMC samples ~ [n_omega, 2]
omega_post_samples = iutils.get_mcmc_samples(chain_path, chain_kwargs)
if log_idx is not None:
omega_post_samples[:, log_idx] = np.exp(omega_post_samples[:, log_idx])
print(f"Plotting {omega_post_samples.shape[0]} samples...")
fig = corner.corner(omega_post_samples,
**corner_kwargs)
fig.savefig(osp.join(self.out_dir, 'omega_post.pdf'))
def visualize_kappa_post(self, idx, n_samples, n_mc_dropout,
interim_pdf_func, grid=None):
log_weights = self.get_kappa_log_weights(idx,
n_samples,
n_mc_dropout,
interim_pdf_func) # [n_samples]
grid, log_w_grid = self.get_kappa_log_weights_grid(idx,
grid,
n_samples,
n_mc_dropout,
interim_pdf_func)
w_grid = np.exp(log_w_grid)
k_bnn = self.get_bnn_kappa(n_samples=n_samples,
n_mc_dropout=n_mc_dropout) # [n_test, n_samples]
true_k = self.get_true_kappa(is_train=False)
fig, ax = plt.subplots()
# Original posterior
bins = np.histogram_bin_edges(k_bnn[idx].squeeze(), bins='scott',)
ax.hist(k_bnn[idx].squeeze(),
histtype='step',
bins=bins,
density=True,
color='#8ca252',
label='original')
# Reweighted posterior, per sample
ax.hist(k_bnn[idx].squeeze(),
histtype='step',
bins=25,
density=True,
weights=np.exp(log_weights),
color='#d6616b',
label='reweighted per sample')
# Reweighted posterior, analytical
reweighted_k_bnn, _ = self.get_reweighted_bnn_kappa(None, None)
reweighted_k_bnn = reweighted_k_bnn[idx, 0, :]
bin_vals, bin_edges = np.histogram(reweighted_k_bnn, bins='scott',
density=True)
norm_factor = np.max(bin_vals)/np.max(w_grid)
ax.plot(grid, norm_factor*w_grid,
color='#d6616b',
label='reweighted on grid')
# Truth
ax.axvline(true_k[idx].squeeze(), color='k', label='truth')
ax.set_xlabel(r'$\kappa$')
ax.legend()
@property
def pre_reweighting_metrics_path(self):
return osp.join(self.out_dir, 'pre_metrics.csv')
@property
def pre_reweighting_metrics(self):
return pd.read_csv(self.pre_reweighting_metrics_path,
index_col=False)
@property
def post_reweighting_metrics_path(self):
return osp.join(self.out_dir, 'post_metrics.csv')
@property
def post_reweighting_metrics(self):
return pd.read_csv(self.post_reweighting_metrics_path,
index_col=False)
def compute_metrics(self):
"""Evaluate metrics for model selection, based on per-sample
reweighting for fair comparison to summary stats metrics
"""
columns = ['minus_sig', 'med', 'plus_sig']
columns += ['log_p', 'mad', 'mae']
# mae = median absolute errors, robust measure of accuracy
# mad = median absolute deviation, robust measure of precision
# Metrics on pre-reweighting BNN posteriors
k_bnn_pre = self.get_bnn_kappa()
pre_metrics = pd.DataFrame(columns=columns)
# Metrics on post-reweighting BNN posteriors
_, k_bnn_post = self.get_reweighted_bnn_kappa(None, None)
post_metrics =
|
pd.DataFrame(columns=columns)
|
pandas.DataFrame
|
# -*-coding: utf-8 -*-
import pandas as pd
import hyperflex_recommend.enpity.DataBase as db
MEAL_TYPE = [1, 2, 3]
def load_data():
query = 'select ' \
'm.user_id, user_name, m.food_code, food_name, meal_type, eat_time ' \
'from ' \
'User u ' \
'right join ' \
'Meal m on u.user_id = m.user_id ' \
'left join ' \
'Food f on m.food_code=f.food_code;'
cur, connect = db.DataBaes().connect_db()
cur.execute(query)
data = cur.fetchall()
return data
def analyze_single_user_info(result=load_data()):
"""
:param result:
:return:
examp: {user_id: 1, meal_info: {breakfast:{food_name:菜名,times:次数}}, {early_dinner:{...}}, {supper:{...}}}
"""
result =
|
pd.DataFrame(result, columns=['user_id', 'user_name', 'food_code', 'food_name', 'meal_type', 'eat_time'])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.3
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Plot focal mechanisms using PyGMT
#
# Based on example by <NAME> at https://docs.generic-mapping-tools.org/latest/animations/anim14.html.
#
# Data are from the [Global Centroid-Moment-Tensor (CMT)](https://www.globalcmt.org/) Project:
# * <NAME>., <NAME> and <NAME>, Determination of earthquake source parameters from waveform data for studies of global and regional seismicity, J. Geophys. Res., 86, 2825-2852, 1981. doi:10.1029/JB086iB04p02825
# * <NAME>., <NAME>, and <NAME>, The global CMT project 2004-2010: Centroid-moment tensors for 13,017 earthquakes, Phys. Earth Planet. Inter., 200-201, 1-9, 2012. doi:10.1016/j.pepi.2012.04.002)
# %%
import pandas as pd
import pygmt
# %%
# Select points for the cross section
profile =
|
pd.DataFrame(data={"x": [-75.02, -63.65], "y": [-33.5, -31]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 11 15:05:17 2020
@author: <NAME>
"""
import pickle
import igraph as ig
import pandas as pd
import numpy as np
import networkx as nx
import osmnx as ox
import os
from shapely.geometry import Point
from shapely.ops import cascaded_union
import pyproj
from functools import partial
from shapely.ops import transform, unary_union
import geopandas as gpd
from tools.graph_operations import *
def read_pickle(path):
return pickle.load(open(path,'rb'))
def graph_from_circle(query, radius=1000, network_type='all_private', dual = False, return_igraph = False,
save_pickle=False, fname='graphs\\city_graph', osmnx_query_kws = {}):
"""
Like fetching a graph with osmnx but with a circle instead of a square.
Parameters
----------
query : string or dict
Location to query Nominatim.
radius: float
Radius of the circle.
network_type: string
see osmnx network types
dual: bool
if true converts graph to its dual form
return_igraph: bool
if true retruns the graph as iGraph
save_pickle: bool
if True saves file as a pickle in fname directory
fname: string
Directory to save.
osmnx_query_kws: dict
options for osmnx query. See osmnx properties at
https://osmnx.readthedocs.io/en/stable/osmnx.html.
Returns
-------
iGraph or NetworkX graph
"""
pt = ox.geocode(query)
poly = circle_from_lat_lon(*pt, radius)
G = ox.graph_from_polygon(poly, network_type=network_type, **osmnx_query_kws)
G.graph['kind'] = 'primal'
if dual:
G = get_dual(G)
if return_igraph:
G=get_full_igraph(G)
if save_pickle and return_igraph:
_save_pickle_file(G,fname, extention='ig')
elif save_pickle and not return_igraph:
_save_pickle_file(G,fname, extention='nx')
return G
def graph_from_address(query, distance=1000, network_type='all_private', dual = False, return_igraph = False,
save_pickle=False, fname='graphs\\city_graph', osmnx_query_kws = {}):
"""
Like fetching a graph with osmnx but with additional functionality.
Parameters
----------
query : string or dict
Location to query Nominatim.
distance: float
distance of the sides of square from the center.
network_type: string
see osmnx network types
dual: bool
if true converts graph to its dual form
return_igraph: bool
if true retruns the graph as iGraph
save_pickle: bool
if True saves file as a pickle in fname directory
fname: string
Directory to save.
osmnx_query_kws: dict
options for osmnx query. See osmnx properties at
https://osmnx.readthedocs.io/en/stable/osmnx.html.
Returns
-------
iGraph or NetworkX graph
"""
G = ox.graph_from_address(query, distance=distance, network_type=network_type, **osmnx_query_kws)
G.graph['kind'] = 'primal'
if dual:
G = get_dual(G)
if return_igraph:
G=get_full_igraph(G)
if save_pickle and return_igraph:
_save_pickle_file(G,fname, extention='ig')
elif save_pickle and not return_igraph:
_save_pickle_file(G,fname, extention='nx')
return G
def graph_from_place(query, network_type='all_private', dual = False, return_igraph = False,
save_pickle=False, fname='graphs\\city_graph', osmnx_query_kws = {}):
"""
Like fetching a graph with osmnx but with additional functionality.
Parameters
----------
query : string or dict
Location to query Nominatim.
network_type: string
see osmnx network types
dual: bool
if true converts graph to its dual form
return_igraph: bool
if true retruns the graph as iGraph
save_pickle: bool
if True saves file as a pickle in fname directory
fname: string
Directory to save.
osmnx_query_kws: dict
options for osmnx query. See osmnx properties at
https://osmnx.readthedocs.io/en/stable/osmnx.html.
Returns
-------
iGraph or NetworkX graph
"""
G = ox.graph_from_place(query, network_type=network_type, **osmnx_query_kws)
G.graph['kind'] = 'primal'
if dual:
G = get_dual(G)
if return_igraph:
G=get_full_igraph(G)
if save_pickle and return_igraph:
_save_pickle_file(G,fname, extention='ig')
elif save_pickle and not return_igraph:
_save_pickle_file(G,fname, extention='nx')
return G
def graph_from_point(point, buffer=0, buffer_type='circle', network_type='all_private', dual = False, return_igraph = False,
save_pickle=False, fname='graphs\\city_graph', osmnx_query_kws = {}):
"""
Like fetching a graph with osmnx but with option of a circle or square.
Parameters
----------
point : float
Central point as (lat, lon).
buffer: float
Radius of the circle or half the side of the square.
network_type: string
see osmnx network types
dual: bool
if true converts graph to its dual form
return_igraph: bool
if true retruns the graph as iGraph
save_pickle: bool
if True saves file as a pickle in fname directory
fname: string
Directory to save.
osmnx_query_kws: dict
options for osmnx query. See osmnx properties at
https://osmnx.readthedocs.io/en/stable/osmnx.html.
Returns
-------
iGraph or NetworkX graph
"""
if buffer_type=='circle':
poly = circle_from_lat_lon(*point, buffer)
G = ox.graph_from_polygon(poly, network_type=network_type, **osmnx_query_kws)
elif buffer_type=='square':
G = ox.graph_from_point(point, distance=buffer, network_type=network_type, **osmnx_query_kws)
G.graph['kind'] = 'primal'
if dual:
G = get_dual(G)
if return_igraph:
G=get_full_igraph(G)
if save_pickle and return_igraph:
_save_pickle_file(G,fname, extention='ig')
elif save_pickle and not return_igraph:
_save_pickle_file(G,fname, extention='nx')
return G
def graph_from_traffic_zones(shp_directory, network_type='all_private', mark_traffic_zones_to_nodes = False,
zone_column=None, convex_hull=False, dual = False, return_igraph = False,
save_pickle=False, fname='graphs\\city_graph', osmnx_query_kws={}):
"""
Get graph from a polygon shapefile of traffic zones.
Parameters
----------
shp_directory : string
Shapefile directory.
network_type: string
See osmnx network types
mark_traffic_zones_to_nodes: bool
If True, add an attribute to nodes marking what zone they belong to.
zone_column: bool
The column of GeoDataFrame with the names of the zones. required to
mark_traffic_zones_to_nodes.
convex_hull: bool
If True, don't use only traffic zones, but the convex hull of the shapes.
This may correct imperfections in traffic zones, but may add nodes not
contained in the area analysed.
dual: bool
If true converts graph to its dual form
return_igraph: bool
If true retruns the graph as iGraph
save_pickle: bool
If True saves file as a pickle in fname directory
fname: string
Directory to save.
osmnx_query_kws: dict
options for osmnx query. See osmnx properties at
https://osmnx.readthedocs.io/en/stable/osmnx.html.
Returns
-------
iGraph or NetworkX graph
"""
gdf = gpd.read_file('ZTs')
gdf = gdf.to_crs(epsg=4326)
polygons = [poly for poly in gdf.geometry]
if convex_hull:
boundary = gpd.GeoSeries(unary_union(polygons)).convex_hull[0]
else:
boundary = gpd.GeoSeries(unary_union(polygons))[0]
G = ox.graph_from_polygon(boundary, network_type=network_type, **osmnx_query_kws)
G.graph['kind'] = 'primal'
if mark_traffic_zones_to_nodes:
if zone_column == None:
raise ValueError('Missing zone column name. If mark_traffic_zones_to_nodes is True, must pass zone_column attribute.')
G = add_traffic_zones_to_nodes(G, gdf, zone_column)
if dual:
G = get_dual(G)
if return_igraph:
G=get_full_igraph(G)
if save_pickle and return_igraph:
_save_pickle_file(G,fname, extention='ig')
elif save_pickle and not return_igraph:
_save_pickle_file(G,fname, extention='nx')
return G
def add_traffic_zones_to_nodes(G, gdf, zone_column):
"""
Adds traffic zones to nodes of a graph.
Parameters
----------
G : NetworkX Graph or DiGraph
Graph for info to be added.
gdf: GeoDataFrame
GeoDataFrame of the traffic zones
zone_column: string
Name of the column.
zone_column: bool
The column of GeoDataFrame with the names of the zones.
Returns
-------
NetworkX graph
"""
gdf_g = ox.graph_to_gdfs(G)[0]
gdf_g = gdf_g.to_crs(epsg=4326)
join = gpd.sjoin(gdf,gdf_g, op='contains')
for zone, osmid in zip(join[zone_column], join.osmid):
G.nodes[osmid]['zone'] = zone
for node in G.nodes:
try:
G.nodes[node]['zone']
except:
G.nodes[node]['zone']=None
return G
def _reverse_bearing(x):
return x + 180 if x < 180 else x - 180
def _count_and_merge(n, bearings):
# make twice as many bins as desired, then merge them in pairs
# prevents bin-edge effects around common values like 0° and 90°
n = n * 2
bins = np.arange(n + 1) * 360 / n
count, _ = np.histogram(bearings, bins=bins)
# move the last bin to the front, so eg 0.01° and 359.99° will be binned together
count = np.roll(count, 1)
return count[::2] + count[1::2]
def get_orientation_entropy(G, weight=None):
Gu = ox.add_edge_bearings(ox.get_undirected(G))
if weight != None:
# weight bearings by NUMERIC attribute
city_bearings = []
for u, v, k, d in Gu.edges(keys=True, data=True):
city_bearings.extend([d['bearing']] * int(d[weight]))
b =
|
pd.Series(city_bearings)
|
pandas.Series
|
from caes import friction_coeff
import CoolProp.CoolProp as CP # http://www.coolprop.org/coolprop/HighLevelAPI.html#propssi-function
from math import pi
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# fixed inputs
T = 290 # [K]
p = 15.0 # pressures [MPa]
depth = 1420 # depth [m]
d = 0.53 # diameter [m]
# parameters to sweep
m_dots = np.arange(0.0, 420, 20) # flow rates [kg/s]
epsilons = np.arange(0.002, 0.0061, 0.002) * 1.0e-3 # roughness [m]
pressures = np.arange(10.0, 15.0, 1.0) # pressures [MPa]
# dataframe to store results
attributes = ['T', 'p', 'epsilon', 'd', 'm_dot', 'rho', 'mu', # inputs
'A', 'U', 'Re', 'f'] # results
df = pd.DataFrame(columns=attributes)
# perform parameter sweep
for m_dot in m_dots:
for epsilon in epsilons:
for p in pressures:
# fluid properties, inputs are degrees K and Pa
rho = CP.PropsSI('D', 'T', T, 'P', p * 1e6, "Air.mix") # density [kg/m3]
mu = CP.PropsSI('V', 'T', T, 'P', p * 1e6, "Air.mix") # viscosity [Pa*s]
# velocity
A = pi / 4.0 * d ** 2.0 # pipe cross-sectional area [m^2]
U = m_dot / (rho * A) # velocity [m/s]
# Reynolds number
Re = rho * d * abs(U) / mu
f = friction_coeff(Re=Re, epsilon=epsilon, d=d)
# save results
s =
|
pd.Series(index=attributes)
|
pandas.Series
|
# -*- coding: utf-8 -*-
import collections
from functools import partial
import numpy as np
import pytest
from pandas import Series, Timestamp
from pandas.core import (
common as com,
ops,
)
def test_get_callable_name():
getname = com.get_callable_name
def fn(x):
return x
lambda_ = lambda x: x # noqa: E731
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_random_state():
import numpy.random as npr
# Check with seed
state = com.random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()
# check with no arg random state
assert com.random_state() is np.random
# Error for floats or strings
with pytest.raises(ValueError):
com.random_state('test')
with pytest.raises(ValueError):
com.random_state(5.5)
@pytest.mark.parametrize('left, right, expected', [
(Series([1], name='x'), Series([2], name='x'), 'x'),
(
|
Series([1], name='x')
|
pandas.Series
|
'''
Este programa:
1. Lee los archivos de una ruta si son de Nielsen y los clasifica segun su extension.
2. Los transforma hasta convertirlos en una única tabla.
3. Exporta el resultado en un archivo .csv
Se asume que el nombre de los archivo sigue esta estructura:
Nielsen - Extraccion Papel Higienico Wm_Nacional y areas a P10'20.xlsx
'''
import os
import pandas as pd
from datetime import datetime, date, timedelta
import win32com.client
import csv
import sys
from tempfile import NamedTemporaryFile
# Variables de los archivos den entrada y salida
folder = 'c:\\Users\\erick\\Desktop\\Archivos'
lista_campos = ['Periodo', 'Region', 'Country', 'Item Type']
nombre_primera_columa = 'Region'
ruta_de_salida = 'c:\\Users\\erick\\Desktop\\'
excel_password = 'password'
xlApp = win32com.client.Dispatch("Excel.Application")
def inicio_del_mes():
#Devuelve el primer día del mes actual en formato de fecha
return datetime.now().date().replace(day=1)
def anio():
# Año actual en formato YY convertido a texto
return str((inicio_del_mes() - timedelta(days=1)).year)[-2:]
def mes():
# Mes anterior al actual en formato MM convertido a texto
return ("00" + str((inicio_del_mes() - timedelta(days=1)).month))[-2:]
def mes_ant():
# Mes anterior a mes() en formato MM, el lag de 32 dias garantiza dos meses de retraso
return ("00" + str((inicio_del_mes() - timedelta(days=32)).month))[-2:]
def valid_period(file_name):
# Valida que el periodo del archivo se encuentre entre los dos meses anteriores
if file_name.split('.')[0][-5:][-2:] == anio() and file_name.split('.')[0][-5:][:2] in [mes(), mes_ant()]:
return True
else:
return False
def valid_extension(file_name):
# Valida si un archivo tiene la extensión correcta
if file_name.endswith(".xls") or file_name.endswith(".xlsx"):
return True
elif file_name.endswith(".xlsb"):
return True
else:
return False
def valid_file(file_name):
# Comprueba que el archivo cumpla las tres condiciones para considerarse valido
if valid_extension(file_name) and valid_period(file_name) and file_name.startswith("Nielsen"):
return True
else:
return False
all_files = []
def list_of_files(file_path):
# Crea una lista con los archivos del directorio que son validos en fecha y extensión
for subfolder, folder, files in os.walk(file_path):
[all_files.append(subfolder + os.sep + file) for file in files if valid_file(file)]
def export_to_csv(df, filename):
# Guarda el DF en un archivo CSV en la ruta de la variable ruta_de_salida
df.to_csv(ruta_de_salida + filename, index = False)
def delete_top_rows(dataframe):
# Identifica en que fila se encuentra el primer encabezado (variable nombre_primera_columa)
top_row = dataframe.index[dataframe[dataframe.columns[0]] == nombre_primera_columa].tolist()
# Elimina las primeras filas que no tienen datos
dataframe = dataframe.iloc[top_row[0]:]
dataframe = dataframe.reset_index(drop=True)
# Promover primera fila como encabezado de columna
new_header = dataframe.iloc[0]
dataframe = dataframe[1:]
dataframe.columns = new_header
return dataframe
def select_columns(filename, old_df):
# Toma del nombre del archivo el último periodo con datos
row_value = filename.split('.')[0][-6:-3] + "'" + filename.split('.')[0][-2:]
# Selecciona columnas especificadas en lista_campos + ultimo periodo
old_df['Periodo'] = row_value
old_df = old_df[lista_campos + [row_value]]
old_df.rename(columns = {row_value:'Value'}, inplace = True)
return old_df
def excel_with_password(filename):
# Abre el archivo
xlApp = win32com.client.Dispatch("Excel.Application")
xlwb = xlApp.Workbooks.Open(filename, False, True, None, excel_password)
# Selecciona la pestaña del archivo
xlws = xlwb.Sheets(1) # Indice de la pestaña (empieza en 1)
#print (xlws.Name)
#print (xlws.Cells(1, 1))
# Crea un archivo temporal en donde deposita los datos
f = NamedTemporaryFile(delete=False, suffix='.csv')
f.close()
os.unlink(f.name)
# Guarda los datos en un csv y luego los lee con pandas
xlCSVWindows = 0x17 # Convierte los datos en formato CSV (Windows)
xlws.SaveAs(Filename=f.name, FileFormat=xlCSVWindows) # Salva a CSV
return f.name
def main():
list_of_files(folder)
# DF vacío al que se le anexa cada archivo valido en el sig. for loop
data = pd.DataFrame(columns=lista_campos + ['Value'])
for file in all_files:
if file.endswith(".xlsb"):
# Leer archivo binario Excel
df = pd.read_excel(file, engine='pyxlsb')
elif file.endswith(".xls") or file.endswith(".xlsx"):
try:
df =
|
pd.read_excel(file)
|
pandas.read_excel
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not
|
tables.which_lib_version("lzo")
|
pandas.tests.io.pytables.common.tables.which_lib_version
|
#!/usr/bin/env python3
# author : <NAME>
# date : 10.01.2019
# license : BSD-3
# ==============================================================================
import os.path
import sys
import time
import argparse
import numpy as np
import pandas as pd
import sqlite3 as sql
from collections import defaultdict
from pmapper.pharmacophore import Pharmacophore
def create_parser():
parser = argparse.ArgumentParser(
description='Iteratively create ligand-based pharmacophore models.')
parser.add_argument('-adb', '--in_active_database', metavar='active.db', required=True,
help='input SQL database file with active compounds')
parser.add_argument('-idb', '--in_inactive_database', metavar='inactive.db', required=True,
help='input SQL database file with active compounds')
parser.add_argument('-ats', '--in_active_trainset', metavar='active_training_set.txt', required=True,
help='txt file with information about active models: '
'model, hash, stereo, nact, ninact, nact/ninact, conf_id, feature_ids')
parser.add_argument('-its', '--in_inactive_trainset', metavar='inactive_training_set.txt', required=True,
help='txt file with information about active models: '
'model, hash, stereo, nact, ninact, nact/ninact, conf_id, feature_ids')
parser.add_argument('-o', '--output_path', metavar='output/path', required=False, default=None,
help='output path to the models of pharmacophores. '
'If None, the path will be generated automatically.')
parser.add_argument('-tol', '--tolerance', default=0,
help='tolerance volume for the calculation of the stereo sign. If the volume of the '
'tetrahedron created by four points less than tolerance then those points are considered '
'lying on the same plane (flat; stereo sign is 0).')
parser.add_argument('-l', '--lower', default=4,
help='number of features of input models')
return parser
def _keep_best_models(df, df_sub_act, df_sub_inact, df_ph_act, df_ph_inact, save_files):
df_sub_act = pd.merge(df_sub_act, df[['hash']], on='hash', how='inner').reset_index(drop=True)
df_ph_act = pd.merge(df_ph_act, df_sub_act[['conf_id']].drop_duplicates(subset=['conf_id']), on='conf_id',
how='inner').reset_index(drop=True)
if not df_ph_inact.empty:
df_sub_inact = pd.merge(df_sub_inact, df[['hash']], on='hash', how='inner').reset_index(drop=True)
df_ph_inact = pd.merge(df_ph_inact, df_sub_inact[['conf_id']].drop_duplicates(subset=['conf_id']),
on='conf_id', how='inner').reset_index(drop=True)
if save_files:
path_internal = os.path.join(save_files[0], 'internal_statistics_{}_pharm{}.txt'.format(save_files[1], save_files[2]))
path_sub_act = os.path.join(save_files[0], 'ph_active_{}_pharm{}.txt'.format(save_files[1], save_files[2]))
df.to_csv(path_internal, index=None, sep='\t')
df_sub_act.to_csv(path_sub_act, index=None, sep='\t')
if not df_sub_inact.empty:
path_sub_inact = os.path.join(save_files[0], 'ph_inactive_{}_pharm{}.txt'.format(save_files[1], save_files[2]))
df_sub_inact.to_csv(path_sub_inact, index=None, sep='\t')
return df_sub_act, df_sub_inact, df_ph_act, df_ph_inact
# generator return mol_name, conf_id, hash, labels
def _gen_quadruplets(df_ph, lower, tol):
for mol_name, conf_id, pharm in zip(df_ph['mol_name'], df_ph['conf_id'], df_ph['pharm']):
if pharm:
for hash, labels in pharm.iterate_pharm(lower, lower, tol):
yield mol_name, conf_id, hash, labels
# generator return mol_name, conf_id, hash, labels
def _plus_one_feature(df_ph, df_sub):
for mol_name, conf_id, pharm in zip(df_ph['mol_name'], df_ph['conf_id'], df_ph['pharm']):
list_ids = df_sub[df_sub['conf_id'] == conf_id]
list_ids = [tuple(map(int, l.split(','))) for l in list_ids['feature_ids']]
if pharm:
for hash, labels in pharm.iterate_pharm1(list_ids):
yield mol_name, conf_id, hash, labels
# return type DataFrame: columns=['hash', 'count', 'mol_name', 'conf_id', 'feature_ids']
def gen_models(def_generator, df_0):
dct = defaultdict(list)
for mol_name, conf_id, hash, labels in def_generator:
dct['hash'].append(hash)
dct['mol_name'].append(mol_name)
dct['conf_id'].append(conf_id)
dct['feature_ids'].append(','.join(map(str, labels)))
df = pd.DataFrame(dct)
if df.empty:
return df_0, df
count_df = df.drop_duplicates(subset=['mol_name', 'hash'])
count_df = count_df.groupby(['hash'], sort=True).size().reset_index(name='count')
df = pd.merge(df, count_df, on='hash', how='right')
df = df.sort_values(by=['count', 'hash'], ascending=False)
return df_0, df[['hash', 'count', 'mol_name', 'conf_id', 'feature_ids']]
# return DataFrame of pharmacophore representation molecules: columns=['mol_name', 'conf_id', 'pharm']
def load_pharmacophores(in_db, in_training_set):
mol_names = [name.strip().split('\t')[1] for name in open(in_training_set).readlines()]
confs_pharm = defaultdict(list)
with sql.connect(in_db) as con:
cur = con.cursor()
cur.execute("SELECT bin_step FROM settings")
db_bin_step = cur.fetchone()[0]
for mol_name in mol_names:
cur.execute("SELECT conf_id, feature_label, x, y, z FROM feature_coords WHERE conf_id IN "
"(SELECT conf_id from conformers WHERE mol_name = ?)", (mol_name,))
res = cur.fetchall()
confs = defaultdict(list)
for r in res:
confs[r[0]].append((r[1], tuple(r[2:]))) # dict(conf_id: (feature_label, x, y, z))
for conf_id, coord in confs.items():
p = Pharmacophore(bin_step=db_bin_step, cached=True)
p.load_from_feature_coords(coord)
confs_pharm['mol_name'].append(mol_name)
confs_pharm['conf_id'].append(conf_id)
confs_pharm['pharm'].append(p)
return
|
pd.DataFrame(confs_pharm)
|
pandas.DataFrame
|
import lxml.etree as ET
import pandas as pd
import numpy as np
import xml.sax
try:
from emeraldtriangles.io._landxml import parse
except:
class LandXMLHandler(xml.sax.ContentHandler):
chunk_size = 1024
def __init__(self):
self.path = []
self.meta = {}
self.surfaces = {}
self.content = ""
def add_meta(self, path, meta, attributes):
if len(path) == 0:
meta.update(attributes)
else:
if path[0] not in meta:
meta[path[0]] = {}
self.add_meta(path[1:], meta[path[0]], attributes)
def startElement(self, tag, attributes):
self.path.append(tag)
if self.path[:2] == ["LandXML", "Surfaces"]:
if self.path == ["LandXML", "Surfaces", "Surface"]:
self.surfaces[attributes["name"]] = self.surface = {
"vertices": [],
"triangles": []
}
self.vertices = None
self.triangles = None
self.vertice_idx = 0
self.triangle_idx = 0
if tag in ("P", "F"):
self.content = ""
else:
self.add_meta(self.path[1:], self.meta, attributes)
def endElement(self, tag):
if tag == "P":
self.append_point([float(val) for val in self.content.strip().split(" ")])
self.content = ""
elif tag == "F":
self.append_triangle([int(val) for val in self.content.strip().split(" ")])
self.content = ""
elif tag == "Surface":
self.surface["vertices"] = pd.concat(self.surface["vertices"]).loc[:self.vertice_idx - 1]
self.surface["triangles"] =
|
pd.concat(self.surface["triangles"])
|
pandas.concat
|
"""
This is technical indicator
Relative strength index
"""
__author__ = '<EMAIL>'
import datetime
import pandas as pd
from Indicator import Indicator
class Rsi(Indicator):
def __init__(self, **kwargs):
super(Rsi, self).__init__(**kwargs)
self._data = pd.DataFrame(data=self.feed()["dataset_data"]["data"],
columns=self.feed()["dataset_data"]["column_names"])
def simulation(self, plot=None, startDate=None, endDate=None, window_length=14, sma=False):
"""
:param sma:
:param window_length:
:param plot:
:param startDate:
:param endDate:
:return:
"""
self._data['Date'] =
|
pd.to_datetime(self._data['Date'], format='%Y-%m-%d')
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 22:09:32 2018
@author: <NAME>, <NAME>, <NAME>, <NAME>
"""
import numpy as np
import surprise
from surprise import BaselineOnly
from surprise import Dataset
from surprise import get_dataset_dir
from surprise.model_selection import train_test_split
import pandas as pd
from surprise import accuracy
from surprise import PredictionImpossible
from surprise import Prediction
from operator import itemgetter
from surprise import AlgoBase
from surprise import NMF
from surprise import SVD
from surprise.model_selection.split import get_cv
#Parameter Declaration
############################################ Prediction Model #########################################
def predict(uid, iid, r_ui=None, clip=True, verbose=False):
"""Compute the rating prediction for given user and item.
The ``predict`` method converts raw ids to inner ids and then calls the
``estimate`` method which is defined in every derived class. If the
prediction is impossible (e.g. because the user and/or the item is
unkown), the prediction is set according to :meth:`default_prediction()
<surprise.prediction_algorithms.algo_base.AlgoBase.default_prediction>`.
Args:
uid: (Raw) id of the user. See :ref:`this note<raw_inner_note>`.
iid: (Raw) id of the item. See :ref:`this note<raw_inner_note>`.
r_ui(float): The true rating :math:`r_{ui}`. Optional, default is
``None``.
clip(bool): Whether to clip the estimation into the rating scale,
that was set during dataset creation. For example, if
:math:`\\hat{r}_{ui}` is :math:`5.5` while the rating scale is
:math:`[1, 5]`, then :math:`\\hat{r}_{ui}` is set to :math:`5`.
Same goes if :math:`\\hat{r}_{ui} < 1`. Default is ``True``.
verbose(bool): Whether to print details of the prediction. Default
is False.
Returns:
A :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>` object
containing:
- The (raw) user id ``uid``.
- The (raw) item id ``iid``.
- The true rating ``r_ui`` (:math:`\\hat{r}_{ui}`).
- The estimated ratino
ig (:math:`\\hat{r}_{ui}`).
- Some additional details about the prediction that might be useful
for later analysis.
"""
# Convert raw ids to inner ids
# print("inner ids: ", uid, ", ", iid)
try:
iuid = WholeSet.to_inner_uid(uid)
# print('uid = ',uid,'iuid = ', iuid)
except ValueError:
print("545: uid error!")
iuid = 'UKN__' + str(uid)
try:
iiid = WholeSet.to_inner_iid(iid)
except ValueError:
print("545: iid error!")
iiid = 'UKN__' + str(iid)
details = {}
try:
est = 0.0
for mm in range(m):
############################################ Estimation from Adaboost Prediction Model #########################################
est += ABPredictM[mm][iuid][iiid] * recm_w[mm]
# If the details dict was also returned
if isinstance(est, tuple):
est, details = est
details['was_impossible'] = False
except PredictionImpossible as e:
est = default_prediction()
details['was_impossible'] = True
details['reason'] = str(e)
# clip estimate into [lower_bound, higher_bound]
if clip:
lower_bound, higher_bound = trainset.rating_scale
est = min(higher_bound, est)
est = max(lower_bound, est)
pred = Prediction(uid, iid, r_ui, est, details,abs(r_ui - est))
if verbose:
print(pred)
return pred
m = 1 # Number of Adaboost iterations
D = 5-1 # Rating range
yita = 0.5 # yita denotes how much the average sample error influences the update process, set 0.5 by experience
rho = 0.2 # Adaboost update rate, rho falls within [0.2,0.3,0.4,0.5,0.6]
recm_w = np.ones(m) # Adaboost weight
# Data declaration and spliting into train & test
data = Dataset.load_builtin('ml-100k')
# data = Dataset.load_builtin('ml-1m')
WholeSet = data.build_full_trainset() # Total data set for universal indexing
# choosing algorithm: ItemBased / UserBased
# sim_options = {'name': 'pearson_baseline', 'user_based': False}
sim_options = {'user_based': False}
bsl_options = {'method': 'sgd',
'learning_rate': .00005,
'reg_all': 0.02}
cv = get_cv(None)
CrossVRMSE = np.zeros(5,dtype=float)
Crossiter = 0
for (trainset, ABtestset) in cv.split(data):
# Initialize testset T_train for Adaboost iterations, it is identical with trainset
testset = [None]*trainset.n_ratings
iter = 0
for uid,iid,ratings in trainset.all_ratings():
# print("is uid,iid int or not?", isinstance(uid, int))
ruid = trainset.to_raw_uid(uid)
riid = trainset.to_raw_iid(iid)
# print("and raw ids are:",ruid,riid)
testset[iter] = [ruid,riid,ratings]
# print("testset element are:", testset[iter])
iter+=1
# Output testset to a csv file
PM = pd.DataFrame(testset)
PM.to_csv("TestSet.csv")
# Initializing algorithm with predefined options
# algo = NMF(biased = True)
algo = SVD(biased = True)
# algo = KNNBaseline()
# Initializing sizes for Adaboost parameter matrices
size_ui = (trainset.n_users + 1, trainset.n_items + 1)
size_mui = (m,trainset.n_users + 1, trainset.n_items + 1)
size_wmui = (m,WholeSet.n_users + 1, WholeSet.n_items + 1)
# Initializing weight matrix
W = np.ones(size_ui)
# Initializing Adaboost Prediction matrix from ABtestset
ABPredictM = np.zeros(size_wmui)
# Initializing weight-update Prediction matrix from T_train
PredictM = np.zeros(size_mui)
# Initializing RMSE vector to store RMSE of ABtestset from each model in Adaboost iteration
ABRMSE = np.zeros(m,dtype=float)
# Initializing Rating Matrix to store true ratings from T_train
RatingM = np.zeros(size_ui)
for uid, iid, rating in trainset.all_ratings():
RatingM[uid,iid] = rating
# Starting the main Adaboost loop
for mm in range(m):
#Obtain prediction using current W
############################################ Adaboost Step 1 #########################################
# algo = BaselineOnly(bsl_options = bsl_options)
algo.weightUpdate(W)
predictions = algo.fit(trainset).test(ABtestset)
# predictions = algo.test(ABtestset)
ABRMSE[mm] = accuracy.rmse(predictions)
for (ruid, riid, _, est, _,_) in predictions:
# print("predictM loop: ", ruid,riid,est)
uid = WholeSet.to_inner_uid(ruid)
iid = WholeSet.to_inner_iid(riid)
ABPredictM[mm, uid, iid] = est
############################################ Adaboost Step 2 #########################################
# predictions = algo.fit(trainset).test(testset)
# algo = BaselineOnly(bsl_options = bsl_options)
# algo.weightUpdate(W)
predictions = algo.fit(trainset).test(testset)
# predictions = algo.test(testset)
# PM = pd.DataFrame(predictions)
# PM.to_csv("CurrentPredictions.csv")
# accuracy.rmse(predictions) #print current RMSE accuracy
# sortedlist = sorted(predictions, key=lambda tup: tup[5], reverse=True)[:T] #Sort prediction in descending order of rating errors for the fist T element
# print("trainset size:", trainset.n_users, trainset.n_items)
# print("trainset iid:", trainset.to_inner_iid('1080'))
# print("wholeset iid:", WholeSet.to_inner_iid('1080'))
for (ruid, riid, _, est, _,_) in predictions: #Update current weight-update Prediction matrix
# print("predictM loop: ", ruid,riid,est)
uid = trainset.to_inner_uid(ruid)
iid = trainset.to_inner_iid(riid)
PredictM[mm][uid][iid] = est
UE = np.ones(size_ui) #Initializing Adaboost parameters
SGNM = (trainset.n_users+1)*[(trainset.n_items+1)*[None]]
errRate = 0
w_err_sum = 0
############################################ Adaboost Iteration loop#########################################
for ruid, riid, rating in testset:
# from raw ids to inner ids
uid = trainset.to_inner_uid(ruid)
iid = trainset.to_inner_iid(riid)
#########################################################
# Formula (11) #
#########################################################
abs_err = abs(rating - PredictM[mm][uid][iid])
########################################################
# Formula(13) #
########################################################
UE[uid][iid] = 1 + yita * rating
for mmm in range(mm+1):
########################################################
# Formula(13) #
########################################################
UE[uid][iid] -= yita * PredictM[mmm][uid][iid] / (mm+1) / D
########################################################
# Formula(11) #
########################################################
w_err_sum += W[uid][iid]
########################################################
# Formula(11) #
########################################################
errRate += W[uid][iid]*(abs_err)/D
########################################################
# Formula(11) #
########################################################
errRate = errRate / w_err_sum
recm_w[mm] = np.log((1 - errRate) / errRate ) # Calculating Adaboost Prediction Model weights
PM =
|
pd.DataFrame(UE)
|
pandas.DataFrame
|
from typing import Dict
from pint import DimensionalityError, UndefinedUnitError
from portfolyo.core.pfline import interop as io
from portfolyo.tools.nits import Q_
import pandas as pd
import numpy as np
import pytest
idx1 = pd.date_range("2020", freq="MS", periods=12)
val1 = 100 + 20 * np.random.random(len(idx1))
s1 = pd.Series(val1, idx1)
idx2 = pd.date_range("2020-08", freq="MS", periods=12)
val2 = 200 + 50 * np.random.random(len(idx2))
s2 = pd.Series(val2, idx2)
idx_i = idx1.intersection(idx2).sort_values()
s1_i = s1.loc[idx_i]
s2_i = s2.loc[idx_i]
idx_u = idx1.union(idx2).sort_values()
s1_u = pd.Series((s1.get(i) for i in idx_u), idx_u)
s2_u = pd.Series((s2.get(i) for i in idx_u), idx_u)
def id_fn(data):
if isinstance(data, Dict):
return str({key: id_fn(val) for key, val in data.items()})
if isinstance(data, pd.Series):
if isinstance(data.index, pd.DatetimeIndex):
return "ts"
else:
return f"series (idx: {''.join(str(i) for i in data.index)})"
if isinstance(data, pd.DataFrame):
return f"df (columns: {''.join(str(c) for c in data.columns)})"
if isinstance(data, io.InOp):
return ""
return str(data)
@pytest.mark.parametrize(
("data_in", "expected_io", "expected_io2"),
[
# One value
# . unit-agnostic
(
23.0,
io.InOp(agn=23.0),
ValueError,
),
# . unitless
(
Q_(23.0, ""),
io.InOp(nodim=23.0),
ValueError,
),
# . known unit
(
Q_(-120.0, "MW"),
io.InOp(w=-120),
ValueError,
),
(
Q_(120e-3, "GW"),
io.InOp(w=120),
ValueError,
),
(
Q_(432e9, "J/h"),
io.InOp(w=120),
ValueError,
),
(
Q_(90_000.0, "MWh"),
io.InOp(q=90_000),
ValueError,
),
(
Q_(90.0, "GWh"),
io.InOp(q=90_000),
ValueError,
),
(
Q_(50.0, "Eur/MWh"),
io.InOp(p=50),
ValueError,
),
(
Q_(5.0, "ctEur/kWh"),
io.InOp(p=50),
ValueError,
),
(
Q_(4_500_000.0, "Eur"),
io.InOp(r=4_500_000),
ValueError,
),
(
Q_(4.5, "MEur"),
io.InOp(r=4_500_000),
ValueError,
),
# . unknown unit
(
Q_(4.5, "MWh/Eur"),
UndefinedUnitError,
None,
),
# One or several values
# . name but no unit
(
{"nodim": 120.0},
io.InOp(nodim=120),
ValueError,
),
(
pd.Series({"nodim": 120.0}),
io.InOp(nodim=120),
ValueError,
),
(
{"w": 120.0},
io.InOp(w=120),
ValueError,
),
(
pd.Series({"w": 120.0}),
io.InOp(w=120),
ValueError,
),
(
{"q": -90_000.0},
io.InOp(q=-90_000),
ValueError,
),
(
pd.Series({"q": -90_000.0}),
io.InOp(q=-90_000),
ValueError,
),
(
{"p": 50.0},
io.InOp(p=50),
ValueError,
),
(
pd.Series({"p": 50.0}),
io.InOp(p=50),
ValueError,
),
(
{"r": 4.5e6},
io.InOp(r=4_500_000),
ValueError,
),
(
pd.Series({"r": 4.5e6}),
io.InOp(r=4_500_000),
ValueError,
),
(
{"w": 120.0, "q": -90_000},
io.InOp(w=120, q=-90_000),
ValueError,
),
(
pd.Series({"w": 120.0, "q": -90_000}),
io.InOp(w=120.0, q=-90_000),
ValueError,
),
(
{"w": 120.0, "p": 50},
io.InOp(w=120.0, p=50),
ValueError,
),
(
pd.Series({"w": 120.0, "p": 50}),
io.InOp(w=120.0, p=50),
ValueError,
),
(
{"w": 120.0, "p": 50.0, "r": 4.5e6},
io.InOp(w=120.0, p=50.0, r=4.5e6),
ValueError,
),
(
pd.Series({"w": 120.0, "p": 50.0, "r": 4.5e6}),
io.InOp(w=120.0, p=50.0, r=4.5e6),
ValueError,
),
(
{"w": 120.0, "p": 50.0, "r": 4.5e6},
io.InOp(w=120.0, p=50.0, r=4.5e6),
ValueError,
),
(
pd.Series({"w": 120.0, "p": 50.0, "r": 4.5e6}),
io.InOp(w=120.0, p=50.0, r=4.5e6),
ValueError,
),
# . name and correct unit
(
{"p": Q_(50.0, "Eur/MWh")},
io.InOp(p=50),
ValueError,
),
(
pd.Series({"p": Q_(50.0, "Eur/MWh")}),
io.InOp(p=50),
ValueError,
),
(
pd.Series({"p": 50}).astype("pint[Eur/MWh]"),
io.InOp(p=50),
ValueError,
),
(
{"r": Q_(4.5, "MEur")},
io.InOp(r=4_500_000),
ValueError,
),
(
pd.Series({"r": Q_(4.5, "MEur")}),
io.InOp(r=4_500_000),
ValueError,
),
(
pd.Series({"r": 4.5}).astype("pint[MEur]"),
io.InOp(r=4_500_000),
ValueError,
),
(
{"w": 120.0, "q": Q_(-90_000.0, "MWh")},
io.InOp(w=120.0, q=-90_000),
ValueError,
),
(
pd.Series({"w": 120.0, "q": Q_(-90_000.0, "MWh")}),
io.InOp(w=120.0, q=-90_000),
ValueError,
),
(
pd.Series({"w": 120.0, "q": Q_(-90.0, "GWh")}),
io.InOp(w=120.0, q=-90_000),
ValueError,
),
# . unknown name -> KeyError
(
{"z": 28.0},
KeyError,
None,
),
(
pd.Series({"z": 28.0}),
KeyError,
None,
),
(
{"z": Q_(120.0, "MWh")},
KeyError,
None,
),
(
pd.Series({"z": Q_(120.0, "MWh")}),
KeyError,
None,
),
# . mix of know and unknown names -> KeyError
(
{"w": 120.0, "z": 28.0},
KeyError,
None,
),
(
pd.Series({"w": 120.0, "z": 28.0}),
KeyError,
None,
),
(
{"w": 120.0, "p": 50.0, "z": 28.0},
KeyError,
None,
),
(
pd.Series({"w": 120.0, "p": 50.0, "z": 28.0}),
KeyError,
None,
),
# . combination of name with incorrect unit -> error
(
{"w": Q_(90.0, "MWh")},
DimensionalityError,
None,
),
(
pd.Series({"w": Q_(90.0, "MWh")}),
DimensionalityError,
None,
),
(
pd.Series({"w": 90}).astype("pint[MWh]"),
DimensionalityError,
None,
),
(
{"p": Q_(90.0, "MWh")},
DimensionalityError,
None,
),
(
pd.Series({"p": Q_(90.0, "MWh")}),
DimensionalityError,
None,
),
(
{"p": 50.0, "w": Q_(90.0, "MWh")},
DimensionalityError,
None,
),
(
pd.Series({"p": 50.0, "w": Q_(90.0, "MWh")}),
DimensionalityError,
None,
),
# One timeseries
# . unit-agnostic
(
s1,
io.InOp(agn=s1),
io.InOp(agn=s1),
),
# . unitless
# (s1.astype("pint[dimensionless]"), io.InterOp(nodim=s1)), # TODO: fix
# . known unit
(
s1.astype("pint[MW]"),
io.InOp(w=s1),
io.InOp(w=s1),
),
(
(s1 / 1000).astype("pint[GW]"), # series with pint unit
io.InOp(w=s1),
io.InOp(w=s1),
),
(
pd.Series([Q_(v, "MW") for v in val1], idx1), # series of Quantities
io.InOp(w=s1),
io.InOp(w=s1),
),
(
s1.astype("pint[GWh]"),
io.InOp(q=s1 * 1000),
io.InOp(q=s1 * 1000),
),
(
s1.astype("pint[Eur/MWh]"),
io.InOp(p=s1),
io.InOp(p=s1),
),
(
s1.astype("pint[MEur]"),
io.InOp(r=s1 * 1e6),
io.InOp(r=s1 * 1e6),
),
# . unknown unit
(
s1.astype("pint[Wh/MEur]"),
UndefinedUnitError,
None,
),
# One or several timeseries
# . name but no unit
(
{"w": s1},
io.InOp(w=s1),
io.InOp(w=s1),
),
(
|
pd.DataFrame({"w": s1})
|
pandas.DataFrame
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.patheffects as pe
from .sanker import Sanker
import imageio
class Visualizer():
def __init__(self, district_list, private_list, city_list, contract_list, bank_list, leiu_list):
self.district_list = district_list.copy()
self.private_list = private_list.copy()
for x in city_list:
self.private_list.append(x)
self.contract_list = contract_list
self.bank_list = bank_list
self.leiu_list = leiu_list
self.private_districts = {}
for x in self.private_list:
self.private_districts[x.name] = []
for xx in x.district_list:
self.private_districts[x.name].append(xx)
inflow_inputs = pd.read_csv('calfews_src/data/input/calfews_src-data.csv', index_col=0, parse_dates=True)
x2_results = pd.read_csv('calfews_src/data/input/x2DAYFLOW.csv', index_col=0, parse_dates=True)
self.observations = inflow_inputs.join(x2_results)
self.observations['delta_outflow'] = self.observations['delta_inflow'] + self.observations['delta_depletions'] - self.observations['HRO_pump'] - self.observations['TRP_pump']
self.index_o = self.observations.index
self.T_o = len(self.observations)
self.day_month_o = self.index_o.day
self.month_o = self.index_o.month
self.year_o = self.index_o.year
kern_bank_observations = pd.read_csv('calfews_src/data/input/kern_water_bank_historical.csv')
kern_bank_observations = kern_bank_observations.set_index('Year')
semitropic_bank_observations = pd.read_csv('calfews_src/data/input/semitropic_bank_historical.csv')
semitropic_bank_observations = semitropic_bank_observations.set_index('Year')
total_bank_kwb = np.zeros(self.T_o)
total_bank_smi = np.zeros(self.T_o)
for x in range(0, self.T_o):
if self.month_o[x] > 9:
year_str = self.year_o[x]
else:
year_str = self.year_o[x] - 1
if self.month_o[x] == 9 and self.day_month_o[x] == 30:
year_str = self.year_o[x]
total_bank_kwb[x] = kern_bank_observations.loc[year_str, 'Ag'] + kern_bank_observations.loc[year_str, 'Mixed Purpose']
deposit_history = semitropic_bank_observations[semitropic_bank_observations.index <= year_str]
total_bank_smi[x] = deposit_history['Metropolitan'].sum() + deposit_history['South Bay'].sum()
self.observations['kwb_accounts'] = pd.Series(total_bank_kwb, index=self.observations.index)
self.observations['smi_accounts'] = pd.Series(total_bank_smi, index=self.observations.index)
def get_results_sensitivity_number(self, results_file, sensitivity_number, start_month, start_year, start_day):
self.values = {}
numdays_index = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
with h5py.File(results_file, 'r') as f:
data = f['s' + sensitivity_number]
names = data.attrs['columns']
names = list(map(lambda x: str(x).split("'")[1], names))
df_data = pd.DataFrame(data[:], columns=names)
for x in df_data:
self.values[x] = df_data[x]
datetime_index = []
monthcount = start_month
yearcount = start_year
daycount = start_day
leapcount = np.remainder(start_year, 4)
for t in range(0, len(self.values[x])):
datetime_index.append(str(yearcount) + '-' + str(monthcount) + '-' + str(daycount))
daycount += 1
if leapcount == 0 and monthcount == 2:
numdays_month = numdays_index[monthcount - 1] + 1
else:
numdays_month = numdays_index[monthcount - 1]
if daycount > numdays_month:
daycount = 1
monthcount += 1
if monthcount == 13:
monthcount = 1
yearcount += 1
leapcount += 1
if leapcount == 4:
leapcount = 0
self.values['Datetime'] = pd.to_datetime(datetime_index)
self.values = pd.DataFrame(self.values)
self.values = self.values.set_index('Datetime')
self.index = self.values.index
self.T = len(self.values.index)
self.day_year = self.index.dayofyear
self.day_month = self.index.day
self.month = self.index.month
self.year = self.index.year
self.starting_year = self.index.year[0]
self.ending_year = self.index.year[-1]
self.number_years = self.ending_year - self.starting_year
total_kwb_sim = np.zeros(len(self.values))
total_smi_sim = np.zeros(len(self.values))
for district_partner in ['DLR', 'KCWA', 'ID4', 'SMI', 'TJC', 'WON', 'WRM']:
total_kwb_sim += self.values['kwb_' + district_partner]
self.values['kwb_total'] = pd.Series(total_kwb_sim, index = self.values.index)
for district_partner in ['SOB', 'MET']:
total_smi_sim += self.values['semitropic_' + district_partner]
self.values['smi_total'] = pd.Series(total_smi_sim, index = self.values.index)
def set_figure_params(self):
self.figure_params = {}
self.figure_params['delta_pumping'] = {}
self.figure_params['delta_pumping']['extended_simulation'] = {}
self.figure_params['delta_pumping']['extended_simulation']['outflow_list'] = ['delta_outflow', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['pump1_list'] = ['delta_HRO_pump', 'HRO_pump']
self.figure_params['delta_pumping']['extended_simulation']['pump2_list'] = ['delta_TRP_pump', 'TRP_pump']
self.figure_params['delta_pumping']['extended_simulation']['scenario_labels'] = ['Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['simulation_labels'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['observation_labels'] = ['HRO_pump', 'TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['agg_list'] = ['AS-OCT', 'AS-OCT', 'D']
self.figure_params['delta_pumping']['extended_simulation']['unit_mult'] = [1.0, 1.0, cfs_tafd]
self.figure_params['delta_pumping']['extended_simulation']['max_value_list'] = [5000, 5000, 15]
self.figure_params['delta_pumping']['extended_simulation']['use_log_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['use_cdf_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['scenario_type_list'] = ['observation', 'validation', 'scenario']
self.figure_params['delta_pumping']['extended_simulation']['x_label_list'] = ['Total Pumping, SWP Delta Pumps (tAF/year)', 'Total Pumping, CVP Delta Pumps (tAF/year)', 'Daily Exceedence Probability', '']
self.figure_params['delta_pumping']['extended_simulation']['y_label_list'] = ['Probability Density', 'Probability Density', 'Daily Delta Outflow (tAF)', 'Relative Frequency of Water-year Types within Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names1'] = ['Historical (1996-2016) Observations', 'Historical (1996-2016) Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names2'] = ['Critical', 'Dry', 'Below Normal', 'Above Normal', 'Wet']
self.figure_params['state_estimation'] = {}
for x in ['publication', 'sacramento', 'sanjoaquin', 'tulare']:
self.figure_params['state_estimation'][x] = {}
self.figure_params['state_estimation'][x]['non_log'] = ['Snowpack (SWE)',]
self.figure_params['state_estimation'][x]['predictor values'] = ['Mean Inflow, Prior 30 Days (tAF/day)','Snowpack (SWE)']
self.figure_params['state_estimation'][x]['colorbar_label_index'] = [0, 30, 60, 90, 120, 150, 180]
self.figure_params['state_estimation'][x]['colorbar_label_list'] = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr']
self.figure_params['state_estimation'][x]['subplot_annotations'] = ['A', 'B', 'C', 'D']
self.figure_params['state_estimation'][x]['forecast_periods'] = [30,'SNOWMELT']
self.figure_params['state_estimation'][x]['all_cols'] = ['DOWY', 'Snowpack', '30MA']
self.figure_params['state_estimation'][x]['forecast_values'] = []
for forecast_days in self.figure_params['state_estimation'][x]['forecast_periods']:
if forecast_days == 'SNOWMELT':
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Snowmelt Season (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append('Snowmelt Flow')
else:
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Next ' + str(forecast_days) + ' Days (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append(str(forecast_days) + ' Day Flow')
self.figure_params['state_estimation']['publication']['watershed_keys'] = ['SHA', 'ORO', 'MIL', 'ISB']
self.figure_params['state_estimation']['publication']['watershed_labels'] = ['Shasta', 'Oroville', 'Millerton', 'Isabella']
self.figure_params['state_estimation']['sacramento']['watershed_keys'] = ['SHA', 'ORO', 'FOL', 'YRS']
self.figure_params['state_estimation']['sacramento']['watershed_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar']
self.figure_params['state_estimation']['sanjoaquin']['watershed_keys'] = ['NML', 'DNP', 'EXC', 'MIL']
self.figure_params['state_estimation']['sanjoaquin']['watershed_labels'] = ['New Melones', '<NAME>', 'Exchequer', 'Millerton']
self.figure_params['state_estimation']['tulare']['watershed_keys'] = ['PFT', 'KWH', 'SUC', 'ISB']
self.figure_params['state_estimation']['tulare']['watershed_labels'] = ['Pine Flat', 'Kaweah', 'Success', 'Isabella']
self.figure_params['model_validation'] = {}
for x in ['delta', 'sierra', 'sanluis', 'bank']:
self.figure_params['model_validation'][x] = {}
self.figure_params['model_validation']['delta']['title_labels'] = ['State Water Project Pumping', 'Central Valley Project Pumping', 'Delta X2 Location']
num_subplots = len(self.figure_params['model_validation']['delta']['title_labels'])
self.figure_params['model_validation']['delta']['label_name_1'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_x2']
self.figure_params['model_validation']['delta']['label_name_2'] = ['HRO_pump', 'TRP_pump', 'DAY_X2']
self.figure_params['model_validation']['delta']['unit_converstion_1'] = [1.0, 1.0, 1.0]
self.figure_params['model_validation']['delta']['unit_converstion_2'] = [cfs_tafd, cfs_tafd, 1.0]
self.figure_params['model_validation']['delta']['y_label_timeseries'] = ['Pumping (tAF/week)', 'Pumping (tAF/week)', 'X2 inland distance (km)']
self.figure_params['model_validation']['delta']['y_label_scatter'] = ['(tAF/yr)', '(tAF/yr)', '(km)']
self.figure_params['model_validation']['delta']['timeseries_timestep'] = ['W', 'W', 'W']
self.figure_params['model_validation']['delta']['scatter_timestep'] = ['AS-OCT', 'AS-OCT', 'M']
self.figure_params['model_validation']['delta']['aggregation_methods'] = ['sum', 'sum', 'mean']
self.figure_params['model_validation']['delta']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['delta']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['sierra']['title_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar', 'New Melones', '<NAME>', 'Exchequer', 'Millerton', 'Pine Flat', 'Kaweah', 'Success', 'Isabella']
num_subplots = len(self.figure_params['model_validation']['sierra']['title_labels'])
self.figure_params['model_validation']['sierra']['label_name_1'] = ['shasta_S', 'oroville_S', 'folsom_S', 'yuba_S', 'newmelones_S', 'donpedro_S', 'exchequer_S', 'millerton_S', 'pineflat_S', 'kaweah_S', 'success_S', 'isabella_S']
self.figure_params['model_validation']['sierra']['label_name_2'] = ['SHA_storage', 'ORO_storage', 'FOL_storage', 'YRS_storage', 'NML_storage', 'DNP_storage', 'EXC_storage', 'MIL_storage', 'PFT_storage', 'KWH_storage', 'SUC_storage', 'ISB_storage']
self.figure_params['model_validation']['sierra']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sierra']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_scatter'] = []
self.figure_params['model_validation']['sierra']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sierra']['scatter_timestep'] = []
self.figure_params['model_validation']['sierra']['aggregation_methods'] = ['mean'] * num_subplots
self.figure_params['model_validation']['sierra']['notation_location'] = ['bottom'] * num_subplots
self.figure_params['model_validation']['sierra']['show_legend'] = [False] * num_subplots
counter_kaweah = self.figure_params['model_validation']['sierra']['title_labels'].index('Kaweah')
counter_success = self.figure_params['model_validation']['sierra']['title_labels'].index('Success')
counter_isabella = self.figure_params['model_validation']['sierra']['title_labels'].index('Isabella')
self.figure_params['model_validation']['sierra']['notation_location'][counter_kaweah] = 'top'
self.figure_params['model_validation']['sierra']['notation_location'][counter_success] = 'topright'
self.figure_params['model_validation']['sierra']['show_legend'][counter_isabella] = True
self.figure_params['model_validation']['sanluis']['title_labels'] = ['State (SWP) Portion, San Luis Reservoir', 'Federal (CVP) Portion, San Luis Reservoir']
num_subplots = len(self.figure_params['model_validation']['sanluis']['title_labels'])
self.figure_params['model_validation']['sanluis']['label_name_1'] = ['sanluisstate_S', 'sanluisfederal_S']
self.figure_params['model_validation']['sanluis']['label_name_2'] = ['SLS_storage', 'SLF_storage']
self.figure_params['model_validation']['sanluis']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sanluis']['scatter_timestep'] = ['M'] * num_subplots
self.figure_params['model_validation']['sanluis']['aggregation_methods'] = ['point'] * num_subplots
self.figure_params['model_validation']['sanluis']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['sanluis']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['bank']['title_labels'] = ['Kern Water Bank Accounts', 'Semitropic Water Bank Accounts']
num_subplots = len(self.figure_params['model_validation']['bank']['title_labels'])
self.figure_params['model_validation']['bank']['label_name_1'] = ['kwb_total', 'smi_total']
self.figure_params['model_validation']['bank']['label_name_2'] = ['kwb_accounts', 'smi_accounts']
self.figure_params['model_validation']['bank']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['bank']['unit_converstion_2'] = [1.0/1000000.0, 1.0/1000.0]
self.figure_params['model_validation']['bank']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['bank']['scatter_timestep'] = ['AS-OCT'] * num_subplots
self.figure_params['model_validation']['bank']['aggregation_methods'] = ['change'] * num_subplots
self.figure_params['model_validation']['bank']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'] = [False] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'][0] = True
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_losthills'] = {}
self.figure_params['state_response']['sanluisstate_losthills']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_losthills']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_losthills']['groundwater_account_names'] = ['LHL','WON']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'steelblue']
self.figure_params['state_response']['sanluisstate_losthills']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_losthills']['subplot_titles'] = ['State Water Project Delta Operations', 'Lost Hills Drought Management', 'San Luis Reservoir Operations', 'Lost Hills Flood Management']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharged from Contract Allocation' 'Recharge of Uncontrolled Flood Spills']
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_wheeler'] = {}
self.figure_params['state_response']['sanluisstate_wheeler']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_wheeler']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_wheeler']['groundwater_account_names'] = ['WRM']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'lightsteelblue']
self.figure_params['state_response']['sanluisstate_wheeler']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_wheeler']['subplot_titles'] = ['State Water Project Delta Operations', 'Wheeler Ridge Drought Management', 'San Luis Reservoir Operations', 'Wheeler Ridge Flood Management']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharge of Uncontrolled Flood Spills', 'Recharged from Contract Allocation']
self.figure_params['district_water_use'] = {}
self.figure_params['district_water_use']['physical'] = {}
self.figure_params['district_water_use']['physical']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors', 'Groundwater Banks']
self.figure_params['district_water_use']['physical']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['physical']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler', 'northkern', 'kerntulare']
self.figure_params['district_water_use']['physical']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['physical']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['physical']['Groundwater Banks'] = ['stockdale', 'kernriverbed', 'poso', 'pioneer', 'kwb', 'b2800', 'irvineranch', 'northkernwb']
self.figure_params['district_water_use']['physical']['subplot columns'] = 2
self.figure_params['district_water_use']['physical']['color map'] = 'YlGbBu_r'
self.figure_params['district_water_use']['physical']['write file'] = True
self.figure_params['district_water_use']['annual'] = {}
self.figure_params['district_water_use']['annual']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors']
self.figure_params['district_water_use']['annual']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['annual']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler']
self.figure_params['district_water_use']['annual']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['annual']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['annual']['subplot columns'] = 2
self.figure_params['district_water_use']['annual']['color map'] = 'BrBG_r'
self.figure_params['district_water_use']['annual']['write file'] = True
self.figure_params['flow_diagram'] = {}
self.figure_params['flow_diagram']['tulare'] = {}
self.figure_params['flow_diagram']['tulare']['column1'] = ['Shasta', 'Folsom', 'Oroville', 'New Bullards', 'Uncontrolled']
self.figure_params['flow_diagram']['tulare']['row1'] = ['Delta Outflow', 'Carryover',]
self.figure_params['flow_diagram']['tulare']['column2'] = ['San Luis (Fed)', 'San Luis (State)', 'Millerton', 'Isabella', 'Pine Flat', 'Kaweah', 'Success']
self.figure_params['flow_diagram']['tulare']['row2'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column3'] = ['Exchange', 'CVP-Delta', 'Cross Valley', 'State Water Project', 'Friant Class 1','Friant Class 2', 'Kern River', 'Kings River', 'Kaweah River', 'Tule River', 'Flood']
self.figure_params['flow_diagram']['tulare']['row3'] = ['Private Pumping', 'GW Banks']
self.figure_params['flow_diagram']['tulare']['column4'] = ['Exchange', 'CVP-Delta', 'Urban', 'KCWA', 'CVP-Friant','Other']
self.figure_params['flow_diagram']['tulare']['row4'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column5'] = ['Irrigation', 'Urban', 'In-Lieu Recharge', 'Direct Recharge']
self.figure_params['flow_diagram']['tulare']['titles'] = ['Sacramento Basin\nSupplies', 'Tulare Basin\nSupplies', 'Surface Water\nContract Allocations', 'Contractor Groups', 'Water Use Type']
def scenario_compare(self, folder_name, figure_name, plot_name, validation_values, show_plot):
outflow_list = self.figure_params[figure_name][plot_name]['outflow_list']
pump1_list = self.figure_params[figure_name][plot_name]['pump1_list']
pump2_list = self.figure_params[figure_name][plot_name]['pump2_list']
scenario_labels = self.figure_params[figure_name][plot_name]['scenario_labels']
simulation_labels = self.figure_params[figure_name][plot_name]['simulation_labels']
observation_labels = self.figure_params[figure_name][plot_name]['observation_labels']
agg_list = self.figure_params[figure_name][plot_name]['agg_list']
unit_mult = self.figure_params[figure_name][plot_name]['unit_mult']
max_value_list = self.figure_params[figure_name][plot_name]['max_value_list']
use_log_list = self.figure_params[figure_name][plot_name]['use_log_list']
use_cdf_list = self.figure_params[figure_name][plot_name]['use_cdf_list']
scenario_type_list = self.figure_params[figure_name][plot_name]['scenario_type_list']
x_label_list = self.figure_params[figure_name][plot_name]['x_label_list']
y_label_list = self.figure_params[figure_name][plot_name]['y_label_list']
legend_label_names1 = self.figure_params[figure_name][plot_name]['legend_label_names1']
legend_label_names2 = self.figure_params[figure_name][plot_name]['legend_label_names2']
color1 = sns.color_palette('spring', n_colors = 3)
color2 = sns.color_palette('summer', n_colors = 3)
color_list = np.array([color1[0], color1[2], color2[0]])
max_y_val = np.zeros(len(simulation_labels))
fig = plt.figure(figsize = (20, 16))
gs = gridspec.GridSpec(3,2, width_ratios=[3,1], figure = fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[:, 1])
axes_list = [ax1, ax2, ax3]
counter = 0
for sim_label, obs_label, agg, max_value, use_log, use_cdf, ax_loop in zip(simulation_labels, observation_labels, agg_list, max_value_list, use_log_list, use_cdf_list, axes_list):
data_type_dict = {}
data_type_dict['scenario'] = self.values[sim_label].resample(agg).sum() * unit_mult[0]
data_type_dict['validation'] = validation_values[sim_label].resample(agg).sum() * unit_mult[1]
data_type_dict['observation'] = self.observations[obs_label].resample(agg).sum() * unit_mult[2]
if use_log:
for scen_type in scenario_type_list:
values_int = data_type_dict[scen_type]
data_type_dict[scen_type] = np.log(values_int[values_int > 0])
for scen_type in scenario_type_list:
max_y_val[counter] = max([max(data_type_dict[scen_type]), max_y_val[counter]])
counter += 1
if use_cdf:
for scen_type, color_loop in zip(scenario_type_list, color_list):
cdf_values = np.zeros(100)
values_int = data_type_dict[scen_type]
for x in range(0, 100):
x_val = int(np.ceil(max_value)) * (x/100)
cdf_values[x] = len(values_int[values_int > x_val])/len(values_int)
ax_loop.plot(cdf_values, np.arange(0, int(np.ceil(max_value)), int(np.ceil(max_value))/100), linewidth = 3, color = color_loop)
else:
pos = np.linspace(0, max_value, 101)
for scen_type, color_loop in zip(scenario_type_list, color_list):
kde_est = stats.gaussian_kde(data_type_dict[scen_type])
ax_loop.fill_between(pos, kde_est(pos), edgecolor = 'black', alpha = 0.6, facecolor = color_loop)
sri_dict = {}
sri_dict['validation'] = validation_values['delta_forecastSRI']
sri_dict['scenario'] = self.values['delta_forecastSRI']
sri_cutoffs = {}
sri_cutoffs['W'] = [9.2, 100]
sri_cutoffs['AN'] = [7.8, 9.2]
sri_cutoffs['BN'] = [6.6, 7.8]
sri_cutoffs['D'] = [5.4, 6.6]
sri_cutoffs['C'] = [0.0, 5.4]
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
scenario_type_list = ['validation', 'scenario']
colors = sns.color_palette('RdBu_r', n_colors = 5)
percent_years = {}
for wyt in wyt_list:
percent_years[wyt] = np.zeros(len(scenario_type_list))
for scen_cnt, scen_type in enumerate(scenario_type_list):
ann_sri = []
for x_cnt, x in enumerate(sri_dict[scen_type]):
if sri_dict[scen_type].index.month[x_cnt] == 9 and sri_dict[scen_type].index.day[x_cnt] == 30:
ann_sri.append(x)
ann_sri = np.array(ann_sri)
for x_cnt, wyt in enumerate(wyt_list):
mask_value = (ann_sri >= sri_cutoffs[wyt][0]) & (ann_sri < sri_cutoffs[wyt][1])
percent_years[wyt][scen_cnt] = len(ann_sri[mask_value])/len(ann_sri)
colors = sns.color_palette('RdBu_r', n_colors = 5)
last_type = np.zeros(len(scenario_type_list))
for cnt, x in enumerate(wyt_list):
ax4.bar(['Validated Period\n(1997-2016)', 'Extended Simulation\n(1906-2016)'], percent_years[x], alpha = 1.0, label = wyt, facecolor = colors[cnt], edgecolor = 'black', bottom = last_type)
last_type += percent_years[x]
ax1.set_xlim([0.0, 500.0* np.ceil(max_y_val[0]/500.0)])
ax2.set_xlim([0.0, 500.0* np.ceil(max_y_val[1]/500.0)])
ax3.set_xlim([0.0, 1.0])
ax4.set_ylim([0, 1.15])
ax1.set_yticklabels('')
ax2.set_yticklabels('')
label_list = []
loc_list = []
for value_x in range(0, 120, 20):
label_list.append(str(value_x) + ' %')
loc_list.append(value_x/100.0)
ax4.set_yticklabels(label_list)
ax4.set_yticks(loc_list)
ax3.set_xticklabels(label_list)
ax3.set_xticks(loc_list)
ax3.set_yticklabels(['4', '8', '16', '32', '64', '125', '250', '500', '1000', '2000', '4000'])
ax3.set_yticks([np.log(4), np.log(8), np.log(16), np.log(32), np.log(64), np.log(125), np.log(250), np.log(500), np.log(1000), np.log(2000), np.log(4000)])
ax3.set_ylim([np.log(4), np.log(4000)])
for ax, x_lab, y_lab in zip([ax1, ax2, ax3, ax4], x_label_list, y_label_list):
ax.set_xlabel(x_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.set_ylabel(y_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.grid(False)
for tick in ax.get_xticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
for tick in ax.get_yticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
legend_elements = []
for x_cnt, x in enumerate(legend_label_names1):
legend_elements.append(Patch(facecolor = color_list[x_cnt], edgecolor = 'black', label = x))
ax1.legend(handles = legend_elements, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
legend_elements_2 = []
for x_cnt, x in enumerate(legend_label_names2):
legend_elements_2.append(Patch(facecolor = colors[x_cnt], edgecolor = 'black', label = x))
ax4.legend(handles = legend_elements_2, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
plt.savefig(folder_name + figure_name + '_' + plot_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def make_deliveries_by_district(self, folder_name, figure_name, plot_name, scenario_name, show_plot):
if plot_name == 'annual':
name_bridge = {}
name_bridge['semitropic'] = 'KER01'
name_bridge['westkern'] = 'KER02'
name_bridge['wheeler'] = 'KER03'
name_bridge['kerndelta'] = 'KER04'
name_bridge['arvin'] = 'KER05'
name_bridge['belridge'] = 'KER06'
name_bridge['losthills'] = 'KER07'
name_bridge['northkern'] = 'KER08'
name_bridge['northkernwb'] = 'KER08'
name_bridge['ID4'] = 'KER09'
name_bridge['sosanjoaquin'] = 'KER10'
name_bridge['berrenda'] = 'KER11'
name_bridge['buenavista'] = 'KER12'
name_bridge['cawelo'] = 'KER13'
name_bridge['rosedale'] = 'KER14'
name_bridge['shaffer'] = 'KER15'
name_bridge['henrymiller'] = 'KER16'
name_bridge['kwb'] = 'KER17'
name_bridge['b2800'] = 'KER17'
name_bridge['pioneer'] = 'KER17'
name_bridge['irvineranch'] = 'KER17'
name_bridge['kernriverbed'] = 'KER17'
name_bridge['poso'] = 'KER17'
name_bridge['stockdale'] = 'KER17'
name_bridge['delano'] = 'KeT01'
name_bridge['kerntulare'] = 'KeT02'
name_bridge['lowertule'] = 'TUL01'
name_bridge['tulare'] = 'TUL02'
name_bridge['lindmore'] = 'TUL03'
name_bridge['saucelito'] = 'TUL04'
name_bridge['porterville'] = 'TUL05'
name_bridge['lindsay'] = 'TUL06'
name_bridge['exeter'] = 'TUL07'
name_bridge['terra'] = 'TUL08'
name_bridge['teapot'] = 'TUL09'
name_bridge['bakersfield'] = 'BAK'
name_bridge['fresno'] = 'FRE'
name_bridge['southbay'] = 'SOB'
name_bridge['socal'] = 'SOC'
name_bridge['tehachapi'] = 'TEH'
name_bridge['tejon'] = 'TEJ'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'PIX'
name_bridge['chowchilla'] = 'CHW'
name_bridge['maderairr'] = 'MAD'
name_bridge['fresnoid'] = 'FSI'
name_bridge['westlands'] = 'WTL'
name_bridge['panoche'] = 'PAN'
name_bridge['sanluiswater'] = 'SLW'
name_bridge['delpuerto'] = 'DEL'
elif plot_name == 'monthly':
name_bridge = {}
name_bridge['semitropic'] = 'Semitropic Water Storage District'
name_bridge['westkern'] = 'West Kern Water District'
name_bridge['wheeler'] = 'Wheeler Ridge-Maricopa Water Storage District'
name_bridge['kerndelta'] = 'Kern Delta Water District'
name_bridge['arvin'] = 'Arvin-Edison Water Storage District'
name_bridge['belridge'] = 'Belridge Water Storage District'
name_bridge['losthills'] = 'Lost Hills Water District'
name_bridge['northkern'] = 'North Kern Water Storage District'
name_bridge['northkernwb'] = 'North Kern Water Storage District'
name_bridge['ID4'] = 'Urban'
name_bridge['sosanjoaquin'] = 'Southern San Joaquin Municipal Utility District'
name_bridge['berrenda'] = 'Berrenda Mesa Water District'
name_bridge['buenavista'] = 'Buena Vista Water Storage District'
name_bridge['cawelo'] = 'Cawelo Water District'
name_bridge['rosedale'] = 'Rosedale-Rio Bravo Water Storage District'
name_bridge['shaffer'] = 'Shafter-Wasco Irrigation District'
name_bridge['henrymiller'] = 'Henry Miller Water District'
name_bridge['kwb'] = 'Kern Water Bank Authority'
name_bridge['b2800'] = 'Kern Water Bank Authority'
name_bridge['pioneer'] = 'Kern Water Bank Authority'
name_bridge['irvineranch'] = 'Kern Water Bank Authority'
name_bridge['kernriverbed'] = 'Kern Water Bank Authority'
name_bridge['poso'] = 'Kern Water Bank Authority'
name_bridge['stockdale'] = 'Kern Water Bank Authority'
name_bridge['delano'] = 'Delano-Earlimart Irrigation District'
name_bridge['kerntulare'] = 'Kern-Tulare Water District'
name_bridge['lowertule'] = 'Lower Tule River Irrigation District'
name_bridge['tulare'] = 'Tulare Irrigation District'
name_bridge['lindmore'] = 'Lindmore Irrigation District'
name_bridge['saucelito'] = 'Saucelito Irrigation District'
name_bridge['porterville'] = 'Porterville Irrigation District'
name_bridge['lindsay'] = 'Lindsay-Strathmore Irrigation District'
name_bridge['exeter'] = 'Exeter Irrigation District'
name_bridge['terra'] = 'Terra Bella Irrigation District'
name_bridge['teapot'] = 'Tea Pot Dome Water District'
name_bridge['bakersfield'] = 'Urban'
name_bridge['fresno'] = 'Urban'
name_bridge['southbay'] = 'Urban'
name_bridge['socal'] = 'Urban'
name_bridge['tehachapi'] = 'Tehachapi - Cummings County Water District'
name_bridge['tejon'] = 'Tejon-Castac Water District'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'Pixley Irrigation District'
name_bridge['chowchilla'] = 'Chowchilla Water District'
name_bridge['maderairr'] = 'Madera Irrigation District'
name_bridge['fresnoid'] = 'Fresno Irrigation District'
name_bridge['westlands'] = 'Westlands Water District'
name_bridge['panoche'] = 'Panoche Water District'
name_bridge['sanluiswater'] = 'San Luis Water District'
name_bridge['delpuerto'] = 'Del Puerto Water District'
name_bridge['alta'] = 'Alta Irrigation District'
name_bridge['consolidated'] = 'Consolidated Irrigation District'
location_type = plot_name
self.total_irrigation = {}
self.total_recharge = {}
self.total_pumping = {}
self.total_flood_purchases = {}
self.total_recovery_rebate = {}
self.total_recharge_sales = {}
self.total_recharge_purchases = {}
self.total_recovery_sales = {}
self.total_recovery_purchases = {}
for bank in self.bank_list:
self.total_irrigation[bank.name] = np.zeros(self.number_years*12)
self.total_recharge[bank.name] = np.zeros(self.number_years*12)
self.total_pumping[bank.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[bank.name] = np.zeros(self.number_years*12)
for district in self.district_list:
self.total_irrigation[district.name] = np.zeros(self.number_years*12)
self.total_recharge[district.name] = np.zeros(self.number_years*12)
self.total_pumping[district.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[district.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[district.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[district.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[district.name] = np.zeros(self.number_years*12)
date_list_labels = []
for year_num in range(self.starting_year, 2017):
start_month = 1
end_month = 13
if year_num == self.starting_year:
start_month = 10
if year_num == 2016:
end_month = 10
for month_num in range(start_month, end_month):
date_string_start = str(year_num) + '-' + str(month_num) + '-01'
date_list_labels.append(date_string_start)
for district in self.district_list:
inleiu_name = district.name + '_inleiu_irrigation'
inleiu_recharge_name = district.name + '_inleiu_recharge'
direct_recover_name = district.name + '_recover_banked'
indirect_surface_name = district.name + '_exchanged_SW'
indirect_ground_name = district.name + '_exchanged_GW'
inleiu_pumping_name = district.name + '_leiupumping'
pumping_name = district.name + '_pumping'
recharge_name = district.name + '_' + district.key + '_recharged'
numdays_month = [31, 28, 31, 30, 31, 30, 31, 31, 29, 31, 30, 31]
for year_num in range(0, self.number_years+1):
year_str = str(year_num + self.starting_year)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year - 1)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by physical location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries
for contract in self.contract_list:
delivery_name = district.name + '_' + contract.name + '_delivery'
recharge_contract_name = district.name + '_' + contract.name + '_recharged'
flood_irr_name = district.name + '_' + contract.name + '_flood_irrigation'
flood_name = district.name + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_irr_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
if flood_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_name].values[0]
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumping (daily values aggregated by year)
if pumping_name in self.values:
annual_pumping = 0.0
for x in range(0, len(self.index)):
monthly_index = (self.year[x] - self.starting_year)*12 + self.month[x] - 10
if self.day_month[x] == 1:
self.total_pumping[district.name][monthly_index] += annual_pumping
annual_pumping = 0.0
else:
annual_pumping += self.values.loc[self.index[x], pumping_name]
self.total_pumping[district.name][-1] += annual_pumping
#Get values for any private entities within the district
for private_name in self.private_list:
private = private_name.name
if district.key in self.private_districts[private]:
inleiu_name = private + '_' + district.key + '_inleiu_irrigation'
inleiu_recharge_name = private + '_' + district.key + '_inleiu_irrigation'
direct_recover_name = private + '_' + district.key + '_recover_banked'
indirect_surface_name = private + '_' + district.key + '_exchanged_SW'
indirect_ground_name = private + '_' + district.key + '_exchanged_GW'
inleiu_pumping_name = private + '_' + district.key + '_leiupumping'
pumping_name = private + '_' + district.key + '_pumping'
recharge_name = private + '_' + district.key + '_' + district.key + '_recharged'
for year_num in range(0, self.number_years - 1):
year_str = str(year_num + self.starting_year + 1)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years - 1:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year + 1)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[
|
pd.DatetimeIndex([date_string_prior])
|
pandas.DatetimeIndex
|
import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
from matplotlib import cm
# import matplotlib
from adjustText import adjust_text
import re
import matplotlib.patheffects as pe
import scipy.stats as st
# deprecated
def plot_hist_exp_1(results, household_size, pool_size, prevalence):
fnr_indep = results[:, 0]
fnr_correlated = results[:, 1]
eff_indep = results[:, 2]
eff_correlated = results[:, 3]
test_indep = results[:, 4]
test_correlated = results[:, 5]
fig, [ax0, ax1] = plt.subplots(1,2, figsize=(10,6))
ax0.hist([fnr_indep, fnr_correlated], label=['naive pooling', 'correlated pooling'], color=['mediumaquamarine', 'mediumpurple'])
ax0.legend(loc='upper right')
ax0.set_xlabel('$FNR$')
ax0.set_ylabel('Frequency')
ax0.set_title('FNR values under naive and\ncorrelated pooling')
ax1.hist(fnr_indep - fnr_correlated, color='lightskyblue', rwidth=0.7)
ax1.set_title('difference in FNR values')
ax1.set_ylabel('Frequency')
plt.tight_layout()
plt.savefig('../figs/experiment_1/fnr_diff_pool-size={}_household-size={}_prevalence={}.pdf'.format(pool_size, household_size, prevalence))
plt.close()
fig, [ax0, ax1] = plt.subplots(1,2, figsize=(10,6))
ax0.hist([test_indep, test_correlated], label=['naive pooling', 'correlated pooling'], color=['mediumaquamarine', 'mediumpurple'])
ax0.legend(loc='upper right')
ax0.set_xlabel('$\#$ followup tests per positive identified')
ax0.set_ylabel('Frequency')
ax0.set_title('$\#$ followup tests per positive identified under\nnaive and correlated pooling')
ax1.hist(test_indep - test_correlated, color='lightskyblue', rwidth=0.7)
ax1.set_title('difference in $\#$ followup tests per positive identified')
ax1.set_ylabel('Frequency')
plt.tight_layout()
plt.savefig('../figs/experiment_1/relative_test_consumption_pool-size={}_household-size={}_prevalence={}.pdf'.format(pool_size, household_size, prevalence))
plt.close()
return
# deprecated
def generate_heatmap_plots_for_exp_1():
dir = '../results/experiment_1'
aggregate_results = {}
for filename in os.listdir(dir):
if filename == ".DS_Store" or not filename.endswith('.data'):
continue
parts = re.split('=|[.](?!\d)|_', filename)
print(parts)
household_size = int(parts[4])
prevalence = float(parts[6])
filedir = os.path.join(dir, filename)
with open(filedir) as f:
results = np.loadtxt(f)
avgs = np.mean(results, axis=0)
aggregate_results[(prevalence, household_size)] = avgs
df_agg = pd.DataFrame.from_dict(aggregate_results, orient='index', columns=['indep fnr', 'corr fnr', 'indep eff', 'corr eff', 'indep test', 'corr test'])
df_agg.index = pd.MultiIndex.from_tuples(df_agg.index, names=['prevalence', 'household size'])
df_agg = df_agg.reset_index()
df_agg = df_agg.sort_values(by=['prevalence', 'household size'])
df_agg['indep sn'] = 1 - df_agg['indep fnr']
df_agg['corr sn'] = 1 - df_agg['corr fnr']
df_agg['sn diff'] = df_agg['corr sn'] - df_agg['indep sn']
df_agg['rel test consumption'] = df_agg['corr test'] / df_agg['indep test']
fig, [ax0, ax1] = plt.subplots(1, 2, figsize=(8, 4))
table_sn = pd.pivot_table(df_agg, values='sn diff', index=['household size'], columns=['prevalence'])
print(table_sn)
heatmap = ax0.pcolor(table_sn, cmap=cm.BuPu)
ax0.set_aspect('equal')
ax0.set_yticks(np.arange(0.5, len(table_sn.index), 1))
ax0.set_yticklabels(table_sn.index)
ax0.set_xticks(np.arange(0.5, len(table_sn.columns), 1))
ax0.set_xticklabels(table_sn.columns)
ax0.set_xlabel('prevalence')
ax0.set_ylabel('household size')
ax0.set_title('Difference in FNR')
fig.colorbar(heatmap, ax=ax0, orientation="horizontal")
table_test = pd.pivot_table(df_agg, values='rel test consumption', index=['household size'], columns=['prevalence'])
heatmap = ax1.pcolor(table_test, cmap=cm.YlGn_r)
ax1.set_aspect('equal')
ax1.set_yticks(np.arange(0.5, len(table_test.index), 1))
ax1.set_yticklabels(table_test.index)
ax1.set_xticks(np.arange(0.5, len(table_test.columns), 1))
ax1.set_xticklabels(table_test.columns)
ax1.set_xlabel('prevalence')
ax1.set_ylabel('household size')
ax1.set_title('Relative test consumption')
fig.colorbar(heatmap, ax=ax1, orientation="horizontal")
fig.tight_layout()
fig.savefig('../figs/experiment_1/tmp_heapmap_for_fnr_and_test.pdf', bbox_inches='tight')
plt.clf()
return
def plot_hist_exp_2(results, param, val=None):
fnr_indep = results[:, 0]
fnr_correlated = results[:, 1]
eff_indep = results[:, 2]
eff_correlated = results[:, 3]
# print Sn (naive), Sn (correlated), Eff (naive), Eff (correlated)
num_iters = results.shape[0]
pool_size = 6.
f = open(f"../results/experiment_2/nominal_scenario_results_{num_iters}.txt", "w")
f.write(f"sensitivity: {1 - np.mean(fnr_indep):.1%} (naive), {1 - np.mean(fnr_correlated):.1%} (correlated);\
efficiency: {np.mean(eff_indep):.2f} (naive), {np.mean(eff_correlated):.2f} (correlated)\n")
f.write(f"standard error: {np.std(fnr_indep)/np.sqrt(num_iters)}, {np.std(fnr_correlated)/np.sqrt(num_iters)}, \
{np.std(eff_indep)/np.sqrt(num_iters)}, {np.std(eff_correlated)/np.sqrt(num_iters)}\n")
f.write(f"improvement: {(1 - np.mean(fnr_correlated)) / (1 - np.mean(fnr_indep))-1:.2%} (sensitivity); \
{np.mean(eff_correlated) / np.mean(eff_indep)-1:.2%} (efficiency)\n")
frac_sample_indiv_test_naive = 1 / np.mean(eff_indep) - 1 / pool_size
frac_sample_indiv_test_correlated = 1 / np.mean(eff_correlated) - 1 / pool_size
frac_positive_sample_indiv_test_naive = 0.01 * (1 - np.mean(fnr_indep)) / 0.95
frac_positive_sample_indiv_test_correlated = 0.01 * (1 - np.mean(fnr_correlated)) / 0.95
frac_negative_sample_indiv_test_naive = frac_sample_indiv_test_naive - frac_positive_sample_indiv_test_naive
frac_negative_sample_indiv_test_correlated = frac_sample_indiv_test_correlated - frac_positive_sample_indiv_test_correlated
f.write(f"fraction of samples tested individually: {frac_sample_indiv_test_naive:.2%} (naive), {frac_sample_indiv_test_correlated:.2%} (correlated)\n")
f.write(f"fraction of positive samples tested individually: {frac_positive_sample_indiv_test_naive:.2%} (naive), {frac_positive_sample_indiv_test_correlated:.2%} (correlated)\n")
f.write(f"fraction of negative samples tested individually: {frac_negative_sample_indiv_test_naive:.2%} (naive), {frac_negative_sample_indiv_test_correlated:.2%} (correlated)\n")
f.write(f"implied FPR: {frac_negative_sample_indiv_test_naive * 0.0001} (naive), {frac_negative_sample_indiv_test_correlated * 0.0001} (correlated)\n")
f.close()
ax1 = plt.subplot(111)
n, bins, patches = ax1.hist(results[:, :2], label=['naive', 'correlated'], color=['mediumaquamarine', 'mediumpurple'])
hatches = [".", '//']
for patch_set, hatch in zip(patches, hatches):
for patch in patch_set.patches:
patch.set_hatch(hatch)
patch.set_edgecolor('k')
plt.legend(loc='upper right')
plt.xlabel('False negative rate')
plt.ylabel('Frequency')
if param == 'nominal':
plt.title('Histogram of FNR values under {} scenario'.format(param))
plt.savefig('../figs/experiment_2/fnr_{}_scenario.pdf'.format(param))
else:
plt.title('Histogram of FNR values for one-stage group testing \n under {} = {}'.format(param, val))
plt.savefig('../figs/experiment_2/fnr_{}={}.pdf'.format(param, val), dpi=600)
plt.close()
ax2 = plt.subplot(111)
n, bins, patches = ax2.hist(results[:, 2:], label=['naive', 'correlated'], color=['mediumaquamarine', 'mediumpurple'])
hatches = ["..", '//']
for patch_set, hatch in zip(patches, hatches):
for patch in patch_set.patches:
patch.set_hatch(hatch)
plt.legend(loc='upper right')
plt.xlabel('Efficiency')
plt.ylabel('Frequency')
if param == 'nominal':
plt.title('Histogram of testing efficiency under {} scenario'.format(param))
plt.savefig('../figs/experiment_2/eff_{}_scenario.pdf'.format(param))
else:
plt.title('Histogram of testing efficiency for one-stage group testing \n under {} = {}'.format(param, val))
plt.savefig('../figs/experiment_2/eff_{}={}.pdf'.format(param, val), dpi=600)
plt.close()
return
def generate_sensitivity_plots(param):
dir = '../results/experiment_2/sensitivity_analysis_2000/'
fnr_indep = []
fnr_corr = []
eff_indep = []
eff_corr = []
index = []
for filename in os.listdir(dir):
if param in filename:
val = filename.split(param, 1)[1][:-5]
val = val.split('_', 1)[0][1:]
val = int(val) if param == 'pool size' else val if param == 'household dist' else float(val)
filedir = os.path.join(dir, filename)
with open(filedir) as f:
results = np.loadtxt(f)
avgs = np.mean(results, axis=0)
fnr_indep.append(avgs[0])
fnr_corr.append(avgs[1])
eff_indep.append(avgs[2])
eff_corr.append(avgs[3])
index.append(val)
df = pd.DataFrame({'FNR (naive)': fnr_indep, 'FNR (correlated)': fnr_corr, 'efficiency (naive)': eff_indep,'efficiency (correlated)': eff_corr}, index=index)
df = df.sort_index()
df = df.rename_axis(param).reset_index()
df['sensitivity (naive)'] = 1 - df['FNR (naive)']
df['sensitivity (correlated)'] = 1 - df['FNR (correlated)']
fig, ax = plt.subplots()
ax2 = ax.twinx()
#fnrs = df[['FNR (naive)', 'FNR (correlated)']].plot.bar(ax=ax, legend=False, color=['mediumaquamarine', 'mediumpurple'], alpha=1)
sns = df[['sensitivity (naive)', 'sensitivity (correlated)']].plot.bar(ax=ax, legend=False, color=['mediumaquamarine', 'mediumpurple'], alpha=1)
l = df.shape[0]
bars = ax.patches
hatches = [".."] * l + ['//'] * l
for bar, hatch in zip(bars, hatches):
bar.set_hatch(hatch)
df[['efficiency (naive)']].plot.line(ax=ax2, legend=False, marker='^', markeredgecolor='w', markeredgewidth=0, \
color=['mediumaquamarine'], path_effects=[pe.Stroke(linewidth=3, foreground='w'), pe.Normal()])
df[['efficiency (correlated)']].plot.line(ax=ax2, legend=False, marker='o', markeredgecolor='w', markeredgewidth=0, \
color=['mediumpurple'], path_effects=[pe.Stroke(linewidth=3, foreground='w'), pe.Normal()])
ax.set_xticklabels(df[param])
ax.set_ylabel('sensitivity')
ax.set_ylim(0.6)
ax2.set_ylabel('efficiency')
ax2.set_ylim(1) if param in ['prevalence', 'pool size'] else ax2.set_ylim(4.5)
if param == 'FNR':
ax.set_xlabel('population-average individual test FNR')
elif param == 'household dist':
ax.set_xlabel('household size distribution')
else:
ax.set_xlabel(param)
h, l = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax.legend(h + h2, l + l2, loc='lower left', bbox_to_anchor=(0, 1.02, 0.6, 1.02), ncol=2)
fig.savefig('../figs/experiment_2/sensitivity_plots/sensitivity_for_{}_new.pdf'.format(param), bbox_inches='tight', dpi=600)
plt.clf()
return
def generate_pareto_fontier_plots():
dir = '../results/experiment_2/pareto_analysis_2000/'
aggregate_results = {}
for filename in os.listdir(dir):
if filename == ".DS_Store":
continue
parts = re.split('=|[.](?!\d)|_', filename)
prev = float(parts[2])
pool_size = int(parts[4])
filedir = os.path.join(dir, filename)
with open(filedir) as f:
results = np.loadtxt(f)
avgs = np.mean(results, axis=0)
aggregate_results[(prev, pool_size)] = avgs
df_agg =
|
pd.DataFrame.from_dict(aggregate_results, orient='index', columns=['fnr (naive)', 'fnr (correlated)', 'eff (naive)', 'eff (correlated)'])
|
pandas.DataFrame.from_dict
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from statistics import median
import os, json, random
def calculateTimeFromMidnight(actual_datetime):
midnight = actual_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = (actual_datetime - midnight).total_seconds()
return timesincemidnight
def createActivityFeatures(line, starttime, lastevtime, caseID, current_activity_end_date):
activityTimestamp = line[1]
activity = []
activity.append(caseID)
for feature in line[2:]:
activity.append(feature)
#add features: time from trace start, time from last_startdate_event, time from midnight, weekday
activity.append((activityTimestamp - starttime).total_seconds())
activity.append((activityTimestamp - lastevtime).total_seconds())
activity.append(calculateTimeFromMidnight(activityTimestamp))
activity.append(activityTimestamp.weekday())
# if there is also end_date add features time from last_enddate_event and event_duration
if current_activity_end_date is not None:
activity.append((current_activity_end_date - activityTimestamp).total_seconds())
# add timestamp end or start to calculate remaining time later
activity.append(current_activity_end_date)
else:
activity.append(activityTimestamp)
return activity
def move_essential_columns(df, case_id_position, start_date_position):
columns = df.columns.to_list()
# move case_id column and start_date column to always know their position
case = columns[case_id_position]
start = columns[start_date_position]
columns.pop(columns.index(case))
columns.pop(columns.index(start))
df = df[[case, start] + columns]
return df
def one_hot_encoding(df):
#case id not encoded
for column in df.columns[1:]:
# if column don't numbers encode
if not np.issubdtype(df[column], np.number):
# Possibile modifica: encodare le colonne di tipo data, o facciamo la diff da 1970 in secondi e poi normalizziamo
# One hot encoding - eventual categorical nans will be ignored
one_hot = pd.get_dummies(df[column], prefix=column, prefix_sep='=')
print("Encoded column:{} - Different keys: {}".format(column, one_hot.shape[1]))
# Drop column as it is now encoded
df = df.drop(column, axis=1)
# Join the encoded df
df = df.join(one_hot)
print("Categorical columns encoded")
return df
def convert_strings_to_datetime(df, date_format):
# convert string columns that contain datetime to datetime
for column in df.columns:
try:
#if a number do nothing
if np.issubdtype(df[column], np.number):
continue
df[column] = pd.to_datetime(df[column], format=date_format)
# exception means it is really a string
except (ValueError, TypeError, OverflowError):
pass
return df
def find_case_finish_time(trace, num_activities):
# we find the max finishtime for the actual case
for i in range(num_activities):
if i == 0:
finishtime = trace[-i-1][-1]
else:
if trace[-i-1][-1] > finishtime:
finishtime = trace[-i-1][-1]
return finishtime
def calculate_remaining_time_for_actual_case(traces, num_activities):
finishtime = find_case_finish_time(traces, num_activities)
for i in range(num_activities):
# calculate remaining time to finish the case for every activity in the actual case
traces[-(i + 1)][-1] = (finishtime - traces[-(i + 1)][-1]).total_seconds()
return traces
def fill_missing_end_dates(df, start_date_position, end_date_position):
df[df.columns[end_date_position]] = df.apply(lambda row: row[start_date_position]
if row[end_date_position] == 0 else row[end_date_position], axis=1)
return df
def convert_datetime_columns_to_seconds(df):
for column in df.columns:
try:
if np.issubdtype(df[column], np.number):
continue
df[column] = pd.to_datetime(df[column])
df[column] = (df[column] -
|
pd.to_datetime('1970-01-01 00:00:00')
|
pandas.to_datetime
|
import os
import pandas as pd
import tweepy
import sys
from danlp.download import DATASETS, download_dataset, DEFAULT_CACHE_DIR, _unzip_process_func
from danlp.utils import extract_single_file_from_zip
class EuroparlSentiment1:
"""
Class for loading the Europarl Sentiment dataset.
:param str cache_dir: the directory for storing cached models
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name = 'europarl.sentiment1'
self.file_extension = DATASETS[self.dataset_name]['file_extension']
self.dataset_dir = download_dataset(self.dataset_name, cache_dir=cache_dir)
self.file_path = os.path.join(self.dataset_dir, self.dataset_name + self.file_extension)
def load_with_pandas(self):
"""
Loads the dataset in a dataframe
and drop duplicates and nan values
:return: a dataframe
"""
df = pd.read_csv(self.file_path, sep=',', index_col=0, encoding='utf-8')
df = df[['valence', 'text']].dropna()
return df.drop_duplicates()
class EuroparlSentiment2:
"""
Class for loading the Europarl Sentiment dataset.
:param str cache_dir: the directory for storing cached models
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name = 'europarl.sentiment2'
self.dataset_dir = download_dataset(self.dataset_name, cache_dir=cache_dir, process_func=_unzip_process_func)
self.file_path = os.path.join(cache_dir, self.dataset_name + '.csv')
def load_with_pandas(self):
"""
Loads the dataset as a dataframe
:return: a dataframe
"""
return pd.read_csv(self.file_path, sep=',', encoding='utf-8')
class LccSentiment:
"""
Class for loading the LCC Sentiment dataset.
:param str cache_dir: the directory for storing cached models
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name1 = 'lcc1.sentiment'
self.file_extension1 = DATASETS[self.dataset_name1]['file_extension']
self.dataset_dir1 = download_dataset(self.dataset_name1, cache_dir=cache_dir)
self.file_path1 = os.path.join(self.dataset_dir1, self.dataset_name1 + self.file_extension1)
self.dataset_name2 = 'lcc2.sentiment'
self.file_extension2 = DATASETS[self.dataset_name2]['file_extension']
self.dataset_dir2 = download_dataset(self.dataset_name2, cache_dir=cache_dir)
self.file_path2 = os.path.join(self.dataset_dir2, self.dataset_name2 + self.file_extension2)
def load_with_pandas(self):
"""
Loads the dataset in a dataframe,
combines and drops duplicates and nan values
:return: a dataframe
"""
df1 = pd.read_csv(self.file_path1, sep=',', encoding='utf-8')
df2 = pd.read_csv(self.file_path2, sep=',', encoding='utf-8')
df = df1.append(df2, sort=False)
df = df[['valence', 'text']].dropna()
return df
class TwitterSent:
"""
Class for loading the Twitter Sentiment dataset.
:param str cache_dir: the directory for storing cached models
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name = 'twitter.sentiment'
self.dataset_dir = download_dataset(self.dataset_name, cache_dir=cache_dir, process_func=_twitter_data_process_func)
self.file_path = os.path.join(cache_dir, self.dataset_name + '.csv')
def load_with_pandas(self):
"""
Loads the dataset in a dataframe.
:return: a dataframe of the test set and a dataframe of the train set
"""
df=pd.read_csv(self.file_path, sep=',', encoding='utf-8')
return df[df['part'] == 'test'].drop(columns=['part']), df[df['part'] == 'train'].drop(columns=['part'])
class AngryTweets:
"""
Class for loading the AngryTweets Sentiment dataset.
:param str cache_dir: the directory for storing cached models
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name = 'angrytweets.sentiment'
self.dataset_dir = download_dataset(self.dataset_name, cache_dir=cache_dir, process_func=_twitter_data_process_func)
self.file_path = os.path.join(cache_dir, self.dataset_name + '.csv')
def load_with_pandas(self):
"""
Loads the dataset in a dataframe.
:return: a dataframe
"""
return pd.read_csv(self.file_path, sep=',', encoding='utf-8')
def _lookup_tweets(tweet_ids, api):
import tweepy
full_tweets = []
tweet_count = len(tweet_ids)
try:
for i in range(int(tweet_count/100)+1):
# Catch the last group if it is less than 100 tweets
end_loc = min((i + 1) * 100, tweet_count)
full_tweets.extend(
api.statuses_lookup(id_=tweet_ids[i * 100:end_loc], tweet_mode='extended', trim_user=True)
)
return full_tweets
except tweepy.TweepError:
print("Failed fetching tweets")
def _twitter_data_process_func(tmp_file_path: str, meta_info: dict,
cache_dir: str = DEFAULT_CACHE_DIR,
clean_up_raw_data: bool = True,
verbose: bool = True):
from zipfile import ZipFile
twitter_api = _construct_twitter_api_connection()
model_name = meta_info['name']
full_path = os.path.join(cache_dir, model_name) + meta_info['file_extension']
with ZipFile(tmp_file_path, 'r') as zip_file: # Extract files to cache_dir
file_list = zip_file.namelist()
extract_single_file_from_zip(cache_dir, file_list[0], full_path, zip_file)
file_path = os.path.join(cache_dir, model_name + '.csv')
df =
|
pd.read_csv(file_path)
|
pandas.read_csv
|
import pandas as pd
from tqdm import tqdm
from src.configs.variables_const import VariablesConsts
from src.evaluation.metrics import Metrics
class EvaluationMethod:
def __init__(self, product_ids: dict):
self.product_ids = product_ids
# TODO: Improve performance here
def _calculate_distances(self, data_dict: dict, vector_space_to_search, evaluate_column: str):
distances_df =
|
pd.DataFrame(columns=[evaluate_column, VariablesConsts.PRODUCT_ID, VariablesConsts.DISTANCE])
|
pandas.DataFrame
|
import pandas as pd
import os
from .objects import ECause, EState, trade_from_dict
from .enums import *
from decimal import ROUND_DOWN, Decimal
import logging
from logging.handlers import TimedRotatingFileHandler
import time
from .safe_operators import *
def calculate_fee(amount, fee, digit=8):
return round(safe_multiply(amount,fee), digit)
def time_scale_to_minute(interval: str):
seconds_per_unit = {
"m": 1,
"h": 60,
"d": 24 * 60,
"w": 7 * 24 * 60,
}
try:
return int(interval[:-1]) * seconds_per_unit[interval[-1]]
except (ValueError, KeyError):
return None
def round_step_downward(quantity, step_size):
# NOTE: if the step_size is '1.0', 1.2389196468651802 is rounded as 1.2 instead of 1.
# Thus if the step_size is an integer then we should approach properly
if step_size.is_integer(): step_size = int(step_size)
return float(Decimal(str(quantity)).quantize(Decimal(str(step_size)), rounding=ROUND_DOWN))
def truncate(num,n):
temp = str(num)
for x in range(len(temp)):
if temp[x] == '.':
try:
return float(temp[:x+n+1])
except:
return float(temp)
return float(temp)
def time_scale_to_second(interval: str):
return time_scale_to_minute(interval) * 60
def time_scale_to_milisecond(interval: str):
return time_scale_to_minute(interval) * 60 * 1000
def eval_total_capital(df_balance, live_trade_list, quote_currency, max_capital_use_ratio=1):
# Toal capital: Free QC + LTO_enter
free_qc = df_balance.loc[quote_currency,'free']
# NOTE: In-trade balance is calculated only by considering the LTOs of the Ikarus
# Using only the df_balance requires live updates and evaluation of each asset in terms of QC
# NOTE: If state of a TO is:
# 'closed': then the amount that is used by this TO is reflected back to main capital (df_balance in backtest (by lto_update))
# : these LTOs needs be omitted
# 'enter_expire': then it is marked to be handled by the their strategy but the balance is still locked in LTO
in_trade_qc = eval_total_capital_in_lto(live_trade_list)
total_qc = safe_sum(free_qc, in_trade_qc)
return safe_multiply(total_qc, max_capital_use_ratio)
def eval_total_capital_in_lto(trade_list):
in_trade_qc = 0
for trade in trade_list:
# Omit the LTOs that are closed, because their use of amount returned to df_balance (by broker or by lto_update of test-engine)
if trade.status != EState.CLOSED:
# NOTE: It is assumed that each object may only have 1 TYPE of exit or enter
in_trade_qc = safe_sum(in_trade_qc, trade.enter.amount)
return in_trade_qc
async def get_closed_hto(config, mongocli, query={'result.cause':ECause.CLOSED}):
# TODO: NEXT: All statistics needs to be changed a bit to integrate market orders
# Read Database to get hist-trades and dump to a DataFrame
hto_list = await mongocli.do_find('hist-trades',query)
hto_closed = []
for hto in hto_list:
trade = trade_from_dict(hto)
hto_dict = {
"_id": trade._id,
"strategy": trade.strategy,
"decision_time": trade.decision_time,
"enterTime": trade.result.enter.time,
"enterPrice": trade.enter.price,
"exitTime": trade.result.exit.time,
"exitPrice": trade.exit.price,
"sellPrice": trade.result.exit.price
}
# NOTE: No trade.result.enter.price is used because in each case Limit/Market enter the price value will be used directly
hto_closed.append(hto_dict)
df = pd.DataFrame(hto_closed)
return df
async def get_enter_expire_hto(mongocli, query={'result.cause':ECause.ENTER_EXP}):
# Read Database to get hist-trades and dump to a DataFrame
hto_list = await mongocli.do_find('hist-trades',query)
hto_ent_exp_list = []
for hto in hto_list:
# NOTE: HIGH: We dont know it the exit type is limit or not
trade = trade_from_dict(hto)
hto_dict = {
"_id": trade._id,
"strategy": trade.strategy,
"decision_time": trade.decision_time,
"enterExpire": trade.enter.expire, # TODO: TYPE_LIMIT | TODO: use result enter price
"enterPrice": trade.enter.price,
}
hto_ent_exp_list.append(hto_dict)
df = pd.DataFrame(hto_ent_exp_list)
return df
async def get_exit_expire_hto(config, mongocli, query={'result.cause':STAT_EXIT_EXP}):
# Read Database to get hist-trades and dump to a DataFrame
hto_list = await mongocli.do_find('hist-trades',query)
hto_closed_list = []
for hto in hto_list:
enter_type = config['strategy'][hto['strategy']]['enter']['type']
exit_type = config['strategy'][hto['strategy']]['exit']['type']
if exit_type == TYPE_OCO: plannedPriceName = 'limitPrice'
elif exit_type == TYPE_LIMIT: plannedPriceName = 'price'
# Initial (ideal) exit module is saved to update_history list
initial_exit_module = hto['update_history'][0]
# TODO: Rename the update_history with some proper name
hto_dict = {
"_id": hto['_id'],
"strategy": hto['strategy'],
"decision_time": hto['decision_time'],
"enterTime": hto['result']['enter']['time'],
"enterPrice": hto['enter'][enter_type]['price'], # Ideally enter limit orders are executed with the exact prices
"exitPrice": initial_exit_module[plannedPriceName],
"sellPrice": hto['result']['exit']['price'],
"exitExpire": initial_exit_module['expire']
}
hto_closed_list.append(hto_dict)
df =
|
pd.DataFrame(hto_closed_list)
|
pandas.DataFrame
|
# from daily_weather_obs_chart import read_weather_obs_csv
import os
import sys
import glob
import datetime
import dateutil
from datetime import timedelta
from pathlib import Path
import functools
import time
import csv
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
import logging
logger = logging.getLogger('weather_obs_f')
trace = True
def trace_print(level, first_string, *optional_strings):
""" central logging function """
global trace
global logger
trace_out = first_string + ''.join(optional_strings)
if (trace == True):
if (level == 1):
logger.debug(trace_out)
elif (level == 2):
logger.critcal(trace_out)
elif (level == 3):
logger.warning(trace_out)
elif (level == 4):
logger.info(trace_out)
else:
print("level not known: ", trace_out, flush=True)
def read_weather_obs_csv(target_csv):
""" read csv and return dataframe """
# handle no_value_provided as NAN
try:
# ignore time zone for parse here - times local to observation
def date_utc(x): return dateutil.parser.parse(x[:20], ignoretz=True)
obs1 = pd.read_csv(target_csv, parse_dates=[9], date_parser=date_utc,
dtype = { 'wind_mph': 'float64'},
na_values = "<no_value_provided>")
except OSError:
trace_print( 4, "file not found: ", target_csv)
# return empty dataframe
obs1 =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pytest
from pandas.core.dtypes.generic import ABCIndex
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.integer import (
Int8Dtype,
UInt32Dtype,
)
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"])
def test_preserve_dtypes(op):
# TODO(#22346): preserve Int64 dtype
# for ops that enable (mean would actually work here
# but generally it is a float return value)
df = pd.DataFrame(
{
"A": ["a", "b", "b"],
"B": [1, None, 3],
"C": pd.array([1, None, 3], dtype="Int64"),
}
)
# op
result = getattr(df.C, op)()
if op in {"sum", "prod", "min", "max"}:
assert isinstance(result, np.int64)
else:
assert isinstance(result, int)
# groupby
result = getattr(df.groupby("A"), op)()
expected = pd.DataFrame(
{"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Int64")},
index=pd.Index(["a", "b"], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_astype_nansafe():
# see gh-22343
arr = pd.array([np.nan, 1, 2], dtype="Int8")
msg = "cannot convert to 'uint32'-dtype NumPy array with missing values."
with pytest.raises(ValueError, match=msg):
arr.astype("uint32")
@pytest.mark.parametrize("dropna", [True, False])
def test_construct_index(all_data, dropna):
# ensure that we do not coerce to Float64Index, rather
# keep as Index
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Index(pd.array(other, dtype=all_data.dtype))
expected = pd.Index(other, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_astype_index(all_data, dropna):
# as an int/uint index to Index
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
dtype = all_data.dtype
idx = pd.Index._with_infer(np.array(other))
assert isinstance(idx, ABCIndex)
result = idx.astype(dtype)
expected = idx.astype(object).astype(dtype)
tm.assert_index_equal(result, expected)
def test_astype(all_data):
all_data = all_data[:10]
ints = all_data[~all_data.isna()]
mixed = all_data
dtype = Int8Dtype()
# coerce to same type - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype)
expected = pd.Series(ints)
tm.assert_series_equal(result, expected)
# coerce to same other - ints
s = pd.Series(ints)
result = s.astype(dtype)
expected = pd.Series(ints, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - ints
s = pd.Series(ints)
result = s.astype(all_data.dtype.numpy_dtype)
expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype))
tm.assert_series_equal(result, expected)
# coerce to same type - mixed
s = pd.Series(mixed)
result = s.astype(all_data.dtype)
expected = pd.Series(mixed)
tm.assert_series_equal(result, expected)
# coerce to same other - mixed
s = pd.Series(mixed)
result = s.astype(dtype)
expected = pd.Series(mixed, dtype=dtype)
tm.assert_series_equal(result, expected)
# coerce to same numpy_dtype - mixed
s = pd.Series(mixed)
msg = r"cannot convert to .*-dtype NumPy array with missing values.*"
with pytest.raises(ValueError, match=msg):
s.astype(all_data.dtype.numpy_dtype)
# coerce to object
s = pd.Series(mixed)
result = s.astype("object")
expected = pd.Series(np.asarray(mixed))
tm.assert_series_equal(result, expected)
def test_astype_copy():
arr = pd.array([1, 2, 3, None], dtype="Int64")
orig = pd.array([1, 2, 3, None], dtype="Int64")
# copy=True -> ensure both data and mask are actual copies
result = arr.astype("Int64", copy=True)
assert result is not arr
assert not tm.shares_memory(result, arr)
result[0] = 10
tm.assert_extension_array_equal(arr, orig)
result[0] = pd.NA
tm.assert_extension_array_equal(arr, orig)
# copy=False
result = arr.astype("Int64", copy=False)
assert result is arr
assert np.shares_memory(result._data, arr._data)
assert np.shares_memory(result._mask, arr._mask)
result[0] = 10
assert arr[0] == 10
result[0] = pd.NA
assert arr[0] is pd.NA
# astype to different dtype -> always needs a copy -> even with copy=False
# we need to ensure that also the mask is actually copied
arr = pd.array([1, 2, 3, None], dtype="Int64")
orig = pd.array([1, 2, 3, None], dtype="Int64")
result = arr.astype("Int32", copy=False)
assert not tm.shares_memory(result, arr)
result[0] = 10
tm.assert_extension_array_equal(arr, orig)
result[0] = pd.NA
tm.assert_extension_array_equal(arr, orig)
def test_astype_to_larger_numpy():
a = pd.array([1, 2], dtype="Int32")
result = a.astype("int64")
expected = np.array([1, 2], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
a = pd.array([1, 2], dtype="UInt32")
result = a.astype("uint64")
expected = np.array([1, 2], dtype="uint64")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"])
def test_astype_specific_casting(dtype):
s = pd.Series([1, 2, 3], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
s = pd.Series([1, 2, 3, None], dtype="Int64")
result = s.astype(dtype)
expected = pd.Series([1, 2, 3, None], dtype=dtype)
tm.assert_series_equal(result, expected)
def test_astype_floating():
arr = pd.array([1, 2, None], dtype="Int64")
result = arr.astype("Float64")
expected = pd.array([1.0, 2.0, None], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
def test_astype_dt64():
# GH#32435
arr = pd.array([1, 2, 3, pd.NA]) * 10 ** 9
result = arr.astype("datetime64[ns]")
expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_construct_cast_invalid(dtype):
msg = "cannot safely"
arr = [1.2, 2.3, 3.7]
with pytest.raises(TypeError, match=msg):
pd.array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
arr = [1.2, 2.3, 3.7, np.nan]
with pytest.raises(TypeError, match=msg):
pd.array(arr, dtype=dtype)
with pytest.raises(TypeError, match=msg):
pd.Series(arr).astype(dtype)
@pytest.mark.parametrize("in_series", [True, False])
def test_to_numpy_na_nan(in_series):
a = pd.array([0, 1, None], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([0.0, 1.0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="int64", na_value=-1)
expected = np.array([0, 1, -1], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = a.to_numpy(dtype="bool", na_value=False)
expected = np.array([False, True, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("in_series", [True, False])
@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"])
def test_to_numpy_dtype(dtype, in_series):
a = pd.array([0, 1], dtype="Int64")
if in_series:
a = pd.Series(a)
result = a.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
|
tm.assert_numpy_array_equal(result, expected)
|
pandas._testing.assert_numpy_array_equal
|
# updated to take info from excel and check corrosponding image for input verification.
import cv2
import cvlib as cv
import sys
import numpy as np
import pandas as pd
import glob
# All files ending with .txt with depth of 2 folder
candidate_list =
|
pd.read_excel("ViewEnrollmentData_CutOFFDate.xlsx")
|
pandas.read_excel
|
import pytest
from xarray import DataArray
import scipy.stats as st
from numpy import (
argmin,
array,
concatenate,
dot,
exp,
eye,
kron,
nan,
reshape,
sqrt,
zeros,
)
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame
from limix.qc import normalise_covariance
from limix.qtl import scan
from limix.stats import linear_kinship, multivariate_normal as mvn
def _test_qtl_scan_st(lik):
random = RandomState(0)
n = 30
ncovariates = 3
M = random.randn(n, ncovariates)
v0 = random.rand()
v1 = random.rand()
G = random.randn(n, 4)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = random.randn(ncovariates)
alpha = random.randn(G.shape[1])
m = M @ beta + G @ alpha
y = mvn(random, m, v0 * K + v1 * eye(n))
idx = [[0, 1], 2, [3]]
if lik == "poisson":
y = random.poisson(exp(y))
elif lik == "bernoulli":
y = random.binomial(1, 1 / (1 + exp(-y)))
elif lik == "probit":
y = random.binomial(1, st.norm.cdf(y))
elif lik == "binomial":
ntrials = random.randint(0, 30, len(y))
y = random.binomial(ntrials, 1 / (1 + exp(-y)))
lik = (lik, ntrials)
r = scan(G, y, lik=lik, idx=idx, K=K, M=M, verbose=False)
str(r)
str(r.stats.head())
str(r.effsizes["h2"].head())
str(r.h0.trait)
str(r.h0.likelihood)
str(r.h0.lml)
str(r.h0.effsizes)
str(r.h0.variances)
def test_qtl_scan_st():
_test_qtl_scan_st("normal")
_test_qtl_scan_st("poisson")
_test_qtl_scan_st("bernoulli")
_test_qtl_scan_st("probit")
_test_qtl_scan_st("binomial")
def test_qtl_scan_three_hypotheses_mt():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A0 = random.randn(ntraits, 1)
A1 = random.randn(ntraits, 2)
A01 = concatenate((A0, A1), axis=1)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A01.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A01, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, A0=A0, A1=A1, verbose=False)
str(r)
def test_qtl_scan_two_hypotheses_mt():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A0 = random.randn(ntraits, 1)
A1 = random.randn(ntraits, 2)
A01 = concatenate((A0, A1), axis=1)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A01.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A01, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, A1=A1, verbose=False)
str(r)
def test_qtl_scan_two_hypotheses_mt_A0A1_none():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A1 = eye(ntraits)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A1.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A1, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
Y = DataArray(Y, dims=["sample", "trait"], coords={"trait": ["WA", "Cx"]})
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, verbose=False)
df = r.effsizes["h2"]
df = df[df["test"] == 0]
assert_array_equal(df["trait"], ["WA"] * 3 + ["Cx"] * 3 + [None] * 4)
assert_array_equal(
df["env"], [None] * 6 + ["env1_WA", "env1_WA", "env1_Cx", "env1_Cx"]
)
str(r)
def test_qtl_scan_lmm():
random = RandomState(0)
nsamples = 50
G = random.randn(50, 100)
K = linear_kinship(G[:, 0:80], verbose=False)
y = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
M = G[:, :5]
X = G[:, 68:70]
result = scan(X, y, lik="normal", K=K, M=M, verbose=False)
pv = result.stats["pv20"]
ix_best_snp = argmin(array(pv))
M = concatenate((M, X[:, [ix_best_snp]]), axis=1)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv[ix_best_snp], 1.0, atol=1e-6)
def test_qtl_scan_lmm_nokinship():
random = RandomState(0)
nsamples = 50
G = random.randn(50, 100)
K = linear_kinship(G[:, 0:80], verbose=False)
y = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
M = G[:, :5]
X = G[:, 68:70]
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"].values
assert_allclose(pv[:2], [8.159539103135342e-05, 0.10807353641893498], atol=1e-5)
def test_qtl_scan_lmm_repeat_samples_by_index():
random = RandomState(0)
nsamples = 30
samples = ["sample{}".format(i) for i in range(nsamples)]
G = random.randn(nsamples, 100)
G = DataFrame(data=G, index=samples)
K = linear_kinship(G.values[:, 0:80], verbose=False)
K = DataFrame(data=K, index=samples, columns=samples)
y0 = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
y1 = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
y = concatenate((y0, y1))
y = DataFrame(data=y, index=samples + samples)
M = G.values[:, :5]
X = G.values[:, 68:70]
M = DataFrame(data=M, index=samples)
X = DataFrame(data=X, index=samples)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv.values[0], 0.9920306566395604, rtol=1e-6)
ix_best_snp = argmin(array(result.stats["pv20"]))
M = concatenate((M, X.loc[:, [ix_best_snp]]), axis=1)
M = DataFrame(data=M, index=samples)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv[ix_best_snp], 1.0, rtol=1e-6)
assert_allclose(pv.values[0], 0.6684700834450028, rtol=1e-6)
X.sort_index(inplace=True, ascending=False)
X = DataFrame(X.values, index=X.index.values)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv[ix_best_snp], 1.0, rtol=1e-6)
assert_allclose(pv.values[0], 0.6684700834450028, rtol=1e-6)
def test_qtl_scan_lmm_different_samples_order():
random = RandomState(0)
nsamples = 50
samples = ["sample{}".format(i) for i in range(nsamples)]
G = random.randn(nsamples, 100)
G = DataFrame(data=G, index=samples)
K = linear_kinship(G.values[:, 0:80], verbose=False)
K =
|
DataFrame(data=K, index=samples, columns=samples)
|
pandas.DataFrame
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import range
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
class TestSeriesLogicalOps(object):
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_operators_bitwise(self):
# GH#9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
with pytest.raises(TypeError):
s_1111 & 'a'
with pytest.raises(TypeError):
s_1111 & ['a', 'b', 'c', 'd']
with pytest.raises(TypeError):
s_0123 & np.NaN
with pytest.raises(TypeError):
s_0123 & 3.14
with pytest.raises(TypeError):
s_0123 & [0.1, 4, 3.14, 2]
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 &
|
Series([0.1, 4, -3.14, 2])
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Important Variable Selection with SNPs
Created on Fri Jan 31 16:31:01 2020
@author: <NAME>
"""
# Import the libraries
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.svm import SVR
from sklearn.linear_model import MultiTaskLassoCV, MultiTaskElasticNetCV, LassoCV, ElasticNetCV, MultiTaskElasticNet, MultiTaskLasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error
# Using chunk size to read rice data
def read_x_cont():
chunksize = 100
X_ct = pd.DataFrame()
for chunk in pd.read_csv("X_cont_ls_el.csv",low_memory=False, chunksize=chunksize, memory_map=True):
X_ct = pd.concat([X_ct, chunk])
return(X_ct)
# Function of data preprocessing
def process_variable(X, y):
# Drop 'IID' columns
X = X.drop('IID', axis = 1)
# Split data to training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10)
# Convert from integer to float
X_train= X_train.astype(float, 32)
X_test = X_test.astype(float, 32)
# Apply the same scaling to both datasets
scaler = StandardScaler()
X_train_scl = scaler.fit_transform(X_train)
X_test_scl = scaler.transform(X_test) # we transform rather than fit_transform
return(X_train_scl, X_test_scl, y_train, y_test)
"""Random Forest Regressor"""
#Function to run random forest with grid search and k-fold cross-validation.
def get_rf_model(X_train, y_train, X_test, y_test):
# Hyperparameters search grid
rf_param_grid = {'bootstrap': [False, True],
'n_estimators': [60, 70, 80, 90, 100],
'max_features': [0.6, 0.65, 0.7, 0.75, 0.8],
'min_samples_leaf': [1],
'min_samples_split': [2]
}
# Instantiate random forest regressor
rf_estimator = RandomForestRegressor(random_state=None)
# Create the GridSearchCV object
rf_model = GridSearchCV(estimator=rf_estimator, param_grid=rf_param_grid, cv=10, scoring='neg_mean_squared_error', n_jobs=-1, iid = True)
# Train the regressor
rf_model.fit(X_train, y_train)
# Get the best model
rf_model_best = rf_model.best_estimator_
# Make predictions using the optimised parameters
rf_pred = rf_model_best.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, rf_pred)
# Find r-squared
r2 = r2_score(y_test, rf_pred)
best_prs = rf_model.best_params_
print("Best Parameters:\n", rf_model.best_params_)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
return(mse, r2, best_prs)
"""Support Vector Regressor"""
#Function to run support vector machine with grid search and k-fold cross-validation.
def get_svm_model(X_train, y_train, X_test, y_test):
# Parameter grid
svm_param_grid = {'C': [0.1, 1, 10, 100], 'gamma': [1, 0.1, 0.01, 0.001, 0.0001, 10], "kernel": ["rbf"]}
# Create SVM grid search regressor
svm_grid = GridSearchCV(estimator = SVR(), param_grid= svm_param_grid, cv=10, scoring='neg_mean_squared_error', n_jobs=-1, iid = True)
# Train the regressor
svm_grid.fit(X_train, y_train)
# Get the best model
svm_model_best = svm_grid.best_estimator_
# Make predictions using the optimised parameters
svm_pred = svm_model_best.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, svm_pred)
# Find r-squared
r2 = r2_score(y_test, svm_pred)
best_prs = svm_grid.best_params_
print("Best Parameters:\n", svm_grid.best_params_)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
return(mse, r2, best_prs)
"""Lasso and Multi Task Lasso"""
#Lasso
def get_lasso_cv(X_train, y_train, X_test, y_test, cols):
# Create Lasso CV
ls_grid = LassoCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
ls_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
ls_pred = ls_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, ls_pred)
# Find r-squared
r2 = r2_score(y_test, ls_pred)
best_prs = ls_grid.alpha_
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
# Get coefficients of the model
coef = pd.DataFrame(ls_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Lasso picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Multi-task Lasso
def get_multitask_lasso_cv(X_train, y_train, X_test, y_test, cols):
# Create Multi-task Lasso CV
ls_grid = MultiTaskLassoCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
ls_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
ls_pred = ls_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, ls_pred)
# Find r-squared
r2 = r2_score(y_test, ls_pred)
best_prs = ls_grid.alpha_
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r2:', r2)
# Get coefficients of the model
coef = pd.DataFrame(ls_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Multit-task Lasso picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
"""Elastic Net and Multi Task Elastic Net"""
# Elastic Net
def get_elasticnet_cv(X_train, y_train, X_test, y_test, cols):
# Create Elastic Net CV
el_grid = ElasticNetCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
el_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
el_pred = el_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, el_pred)
# Find r-squared
r2 = r2_score(y_test, el_pred)
best_prs = [el_grid.alpha_]
best_prs.append(el_grid.l1_ratio_)
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r-squared:', r2)
# Get coefficients of the model
coef = pd.DataFrame(el_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("ElasticNet picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Multi-task Elastic Net
def get_multitask_elasticnet_cv(X_train, y_train, X_test, y_test, cols):
# Create Multi Task Elastic Net CV
el_grid = MultiTaskElasticNetCV(cv = 10, random_state = 0, n_jobs = -1)
# Train the regressor
el_grid.fit(X_train, y_train)
# Make predictions using the optimised parameters
el_pred = el_grid.predict(X_test)
# Find mean squared error
mse = mean_squared_error(y_test, el_pred)
# Find r-squared
r2 = r2_score(y_test, el_pred)
best_prs = [el_grid.alpha_]
best_prs.append(el_grid.l1_ratio_)
print("Best Parameters:\n", best_prs)
print("Best Score:\n", 'mse:', mse, 'r-squared:', r2)
# Get coefficients of the model
coef = pd.DataFrame(el_grid.coef_.T, index = cols)
var = list(coef[coef[0] != 0].index)
print(coef.head())
print("Multi-task ElasticNet picked " + str(sum(coef[0] != 0)) + " variables and eliminated the other " + str(sum(coef[0] == 0)) + " variables")
return(mse, r2, var, best_prs)
# Evaluation each trait by multi-task Lasso
def eval_mtls_split_trait(alpha, X_train, Y_train, X_test, Y_test):
# Create Multi-Task Lasso
ls_tfl_grw = MultiTaskLasso(alpha, random_state = 0)
# Train the regressor
ls_tfl_grw.fit(X_train, Y_train)
# Make predictions using the optimised parameters
ls_pred = ls_tfl_grw.predict(X_test)
# Find mean squared error
mse_tfl = mean_squared_error(Y_test[:, 0], ls_pred[:, 0])
mse_grw= mean_squared_error(Y_test[:, 1], ls_pred[:, 1])
# Find r-squared
r2_tfl = r2_score(Y_test[:, 0], ls_pred[:, 0])
r2_grw = r2_score(Y_test[:, 1], ls_pred[:, 1])
return(mse_tfl, mse_grw, r2_tfl, r2_grw)
# Evaluation each trait by multi-task Elastic Net
def eval_mtel_split_trait(alpha, l1_ratio, X_train, Y_train, X_test, Y_test):
# Create Multi-Task Lasso
el_tfl_grw = MultiTaskElasticNet(alpha, l1_ratio, random_state = 0)
# Train the regressor
el_tfl_grw.fit(X_train, Y_train)
# Make predictions using the optimised parameters
el_pred = el_tfl_grw.predict(X_test)
# Find mean squared error
mse_tfl = mean_squared_error(Y_test[:, 0], el_pred[:, 0])
mse_grw= mean_squared_error(Y_test[:, 1], el_pred[:, 1])
# Find r-squared
r2_tfl = r2_score(Y_test[:, 0], el_pred[:, 0])
r2_grw = r2_score(Y_test[:, 1], el_pred[:, 1])
return(mse_tfl, mse_grw, r2_tfl, r2_grw)
if __name__ == '__main__':
print("")
print("")
print("|============================================================================|")
print("| |")
print("| ----- IMPORTANT VARIABLE SELECTION WITH SNPS ----- |")
print("| |")
print("|============================================================================|")
print("")
print("")
print("********************************* INPUT DATA *********************************")
print("")
print("Import data may take several minutes, please wait...")
print("")
# Import data
X_cont = read_x_cont()
cols = X_cont.columns[1::]
# Load data after pre-processinng
y_tfl = pd.read_csv("y_tfl.csv", header=None)
y_grw = pd.read_csv("y_grw.csv", header=None)
y_tfl_grw = pd.read_csv("y_tfl_grw.csv", header=None)
X_grw_2 = pd.read_csv("X_grw_2.csv", header='infer')
X_grw_3 = pd.read_csv("X_grw_3.csv", header='infer')
X_grw_4 = pd.read_csv("X_grw_4.csv", header='infer')
X_grw_5 = pd.read_csv("X_grw_5.csv", header='infer')
X_tfl_2 = pd.read_csv("X_tfl_2.csv", header='infer')
X_tfl_3 = pd.read_csv("X_tfl_3.csv", header='infer')
X_tfl_4 = pd.read_csv("X_tfl_4.csv", header='infer')
X_tfl_5 = pd.read_csv("X_tfl_5.csv", header='infer')
X_tfl_6 = pd.read_csv("X_tfl_6.csv", header='infer')
X_tfl_grw_2 = pd.read_csv("X_tfl_grw_2.csv", header='infer')
X_tfl_grw_25 = pd.read_csv("X_tfl_grw_25.csv", header='infer')
X_tfl_grw_1 = pd.read_csv("X_tfl_grw_1.csv", header='infer')
X_tfl_grw_75 = pd.read_csv("X_tfl_grw_75.csv", header='infer')
X_tfl_grw_3 = pd.read_csv("X_tfl_grw_3.csv", header='infer')
print("")
# Transform response variables to matrix type.
y_tfl = y_tfl.values.ravel()
y_grw = y_grw.values.ravel()
y_tfl_grw = y_tfl_grw.values
# Normalize rice data
X_grw_2_train, X_grw_2_test, y_grw_2_train, y_grw_2_test = process_variable(X_grw_2, y_grw)
X_grw_3_train, X_grw_3_test, y_grw_3_train, y_grw_3_test = process_variable(X_grw_3, y_grw)
X_grw_4_train, X_grw_4_test, y_grw_4_train, y_grw_4_test = process_variable(X_grw_4, y_grw)
X_grw_5_train, X_grw_5_test, y_grw_5_train, y_grw_5_test = process_variable(X_grw_5, y_grw)
X_tfl_2_train, X_tfl_2_test, y_tfl_2_train, y_tfl_2_test = process_variable(X_tfl_2, y_tfl)
X_tfl_3_train, X_tfl_3_test, y_tfl_3_train, y_tfl_3_test = process_variable(X_tfl_3, y_tfl)
X_tfl_4_train, X_tfl_4_test, y_tfl_4_train, y_tfl_4_test = process_variable(X_tfl_4, y_tfl)
X_tfl_5_train, X_tfl_5_test, y_tfl_5_train, y_tfl_5_test = process_variable(X_tfl_5, y_tfl)
X_tfl_6_train, X_tfl_6_test, y_tfl_6_train, y_tfl_6_test = process_variable(X_tfl_6, y_tfl)
X_tfl_grw_2_train, X_tfl_grw_2_test, y_tfl_grw_2_train, y_tfl_grw_2_test = process_variable(X_tfl_grw_2, y_tfl_grw)
X_tfl_grw_25_train, X_tfl_grw_25_test, y_tfl_grw_25_train, y_tfl_grw_25_test = process_variable(X_tfl_grw_25, y_tfl_grw)
X_tfl_grw_1_train, X_tfl_grw_1_test, y_tfl_grw_1_train, y_tfl_grw_1_test = process_variable(X_tfl_grw_1, y_tfl_grw)
X_tfl_grw_75_train, X_tfl_grw_75_test, y_tfl_grw_75_train, y_tfl_grw_75_test = process_variable(X_tfl_grw_75, y_tfl_grw)
X_tfl_grw_3_train, X_tfl_grw_3_test, y_tfl_grw_3_train, y_tfl_grw_3_test = process_variable(X_tfl_grw_3, y_tfl_grw)
X_grw_train, X_grw_test, y_grw_train, y_grw_test = process_variable(X_cont, y_grw)
X_tfl_train, X_tfl_test, y_tfl_train, y_tfl_test = process_variable(X_cont, y_tfl)
X_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_train, y_tfl_grw_test = process_variable(X_cont, y_tfl_grw)
print("")
print("******************************* TRAINING MODELS *****************************")
print("")
rf_grw_mse = []
rf_grw_r2 = []
rf_tfl_mse = []
rf_tfl_r2 = []
rf_grw_prs = []
rf_tfl_prs = []
rf_tfl_grw_mse_0 = []
rf_tfl_grw_r2_0 = []
rf_tfl_grw_prs_0 = []
rf_tfl_grw_mse_1 = []
rf_tfl_grw_r2_1 = []
rf_tfl_grw_prs_1 = []
svr_grw_mse = []
svr_grw_r2 = []
svr_tfl_mse = []
svr_tfl_r2 = []
svr_grw_prs = []
svr_tfl_prs = []
svr_tfl_grw_mse_0 = []
svr_tfl_grw_r2_0 = []
svr_tfl_grw_prs_0 = []
svr_tfl_grw_mse_1 = []
svr_tfl_grw_r2_1 = []
svr_tfl_grw_prs_1 = []
# Filtering variables by p_value.
p_value = ['<=5e-6', '<=5e-5', '<=5e-4', '<=5e-3', '<=5e-2']
p_value_2 = ['<=5e-3','<=7.5e-3', '<=1e-2', '<=2.5e-2', '<=5e-2']
print("Find mse and r-squared for random forest model of grain weight...")
rf_grw_mse_2, rf_grw_r2_2, rf_grw_prs_2 = get_rf_model(X_grw_2_train, y_grw_2_train, X_grw_2_test, y_grw_2_test)
rf_grw_mse.append(rf_grw_mse_2)
rf_grw_r2.append(rf_grw_r2_2)
rf_grw_prs.append(rf_grw_prs_2)
rf_grw_mse_3, rf_grw_r2_3, rf_grw_prs_3 = get_rf_model(X_grw_3_train, y_grw_3_train, X_grw_3_test, y_grw_3_test)
rf_grw_mse.append(rf_grw_mse_3)
rf_grw_r2.append(rf_grw_r2_3)
rf_grw_prs.append(rf_grw_prs_3)
rf_grw_mse_4, rf_grw_r2_4, rf_grw_prs_4 = get_rf_model(X_grw_4_train, y_grw_4_train, X_grw_4_test, y_grw_4_test)
rf_grw_mse.append(rf_grw_mse_4)
rf_grw_r2.append(rf_grw_r2_4)
rf_grw_prs.append(rf_grw_prs_4)
rf_grw_mse_5, rf_grw_r2_5, rf_grw_prs_5 = get_rf_model(X_grw_5_train, y_grw_5_train, X_grw_5_test, y_grw_5_test)
rf_grw_mse.append(rf_grw_mse_5)
rf_grw_r2.append(rf_grw_r2_5)
rf_grw_prs.append(rf_grw_prs_5)
rf_grw = pd.DataFrame({'rf_grw_mse':rf_grw_mse[::-1], 'rf_grw_r2':rf_grw_r2[::-1], 'rf_grw_prs':rf_grw_prs[::-1]})
rf_grw.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
rf_grw.to_csv('rf_grw.csv')
print('RF of grain weight is saved')
print("Find mse and r-squared for random forest model of time to flowering...")
rf_tfl_mse_2, rf_tfl_r2_2, rf_tfl_prs_2 = get_rf_model(X_tfl_2_train, y_tfl_2_train, X_tfl_2_test, y_tfl_2_test)
rf_tfl_mse.append(rf_tfl_mse_2)
rf_tfl_r2.append(rf_tfl_r2_2)
rf_tfl_prs.append(rf_tfl_prs_2)
rf_tfl_mse_3, rf_tfl_r2_3, rf_tfl_prs_3 = get_rf_model(X_tfl_3_train, y_tfl_3_train, X_tfl_3_test, y_tfl_3_test)
rf_tfl_mse.append(rf_tfl_mse_3)
rf_tfl_r2.append(rf_tfl_r2_3)
rf_tfl_prs.append(rf_tfl_prs_3)
rf_tfl_mse_4, rf_tfl_r2_4, rf_tfl_prs_4 = get_rf_model(X_tfl_4_train, y_tfl_4_train, X_tfl_4_test, y_tfl_4_test)
rf_tfl_mse.append(rf_tfl_mse_4)
rf_tfl_r2.append(rf_tfl_r2_4)
rf_tfl_prs.append(rf_tfl_prs_4)
rf_tfl_mse_5, rf_tfl_r2_5, rf_tfl_prs_5 = get_rf_model(X_tfl_5_train, y_tfl_5_train, X_tfl_5_test, y_tfl_5_test)
rf_tfl_mse.append(rf_tfl_mse_5)
rf_tfl_r2.append(rf_tfl_r2_5)
rf_tfl_prs.append(rf_tfl_prs_5)
rf_tfl_mse_6, rf_tfl_r2_6, rf_tfl_prs_6 = get_rf_model(X_tfl_6_train, y_tfl_6_train, X_tfl_6_test, y_tfl_6_test)
rf_tfl_mse.append(rf_tfl_mse_6)
rf_tfl_r2.append(rf_tfl_r2_6)
rf_tfl_prs.append(rf_tfl_prs_6)
rf_tfl = pd.DataFrame({'rf_tfl_mse':rf_tfl_mse[::-1], 'rf_tfl_r2':rf_tfl_r2[::-1], 'rf_tfl_prs':rf_tfl_prs[::-1]})
rf_tfl.set_index(pd.Index(p_value), 'p_value', inplace = True)
rf_tfl.to_csv('rf_tfl.csv')
print('RF of time to flowering is saved')
print("Find mse and r-squared for random forest model of time to flowering and grain weight...")
# Output is time to flowering
rf_tfl_grw_mse_2_0, rf_tfl_grw_r2_2_0, rf_tfl_grw_prs_2_0 = get_rf_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 0], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_2_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_2_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_2_0)
rf_tfl_grw_mse_25_0, rf_tfl_grw_r2_25_0, rf_tfl_grw_prs_25_0 = get_rf_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 0], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_25_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_25_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_25_0)
rf_tfl_grw_mse_1_0, rf_tfl_grw_r2_1_0, rf_tfl_grw_prs_1_0 = get_rf_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 0], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_1_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_1_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_1_0)
rf_tfl_grw_mse_75_0, rf_tfl_grw_r2_75_0, rf_tfl_grw_prs_75_0 = get_rf_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 0], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_75_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_75_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_75_0)
rf_tfl_grw_mse_3_0, rf_tfl_grw_r2_3_0, rf_tfl_grw_prs_3_0 = get_rf_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 0], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 0])
rf_tfl_grw_mse_0.append(rf_tfl_grw_mse_3_0)
rf_tfl_grw_r2_0.append(rf_tfl_grw_r2_3_0)
rf_tfl_grw_prs_0.append(rf_tfl_grw_prs_3_0)
rf_tfl_grw_0 = pd.DataFrame({'rf_tfl_grw_mse_0':rf_tfl_grw_mse_0[::-1], 'rf_tfl_grw_r2_0':rf_tfl_grw_r2_0[::-1], 'rf_tfl_grw_prs_0':rf_tfl_grw_prs_0[::-1]})
rf_tfl_grw_0.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
rf_tfl_grw_0.to_csv('rf_tfl_grw_0.csv')
# Output is grain weight
rf_tfl_grw_mse_2_1, rf_tfl_grw_r2_2_1, rf_tfl_grw_prs_2_1 = get_rf_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 1], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_2_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_2_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_2_1)
rf_tfl_grw_mse_25_1, rf_tfl_grw_r2_25_1, rf_tfl_grw_prs_25_1 = get_rf_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 1], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_25_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_25_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_25_1)
rf_tfl_grw_mse_1_1, rf_tfl_grw_r2_1_1, rf_tfl_grw_prs_1_1 = get_rf_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 1], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_1_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_1_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_1_1)
rf_tfl_grw_mse_75_1, rf_tfl_grw_r2_75_1, rf_tfl_grw_prs_75_1 = get_rf_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 1], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_75_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_75_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_75_1)
rf_tfl_grw_mse_3_1, rf_tfl_grw_r2_3_1, rf_tfl_grw_prs_3_1 = get_rf_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 1], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 1])
rf_tfl_grw_mse_1.append(rf_tfl_grw_mse_3_1)
rf_tfl_grw_r2_1.append(rf_tfl_grw_r2_3_1)
rf_tfl_grw_prs_1.append(rf_tfl_grw_prs_3_1)
rf_tfl_grw_1 = pd.DataFrame({'rf_tfl_grw_mse_1':rf_tfl_grw_mse_1[::-1], 'rf_tfl_grw_r2_1':rf_tfl_grw_r2_1[::-1], 'rf_tfl_grw_prs_1':rf_tfl_grw_prs_1[::-1]})
rf_tfl_grw_1.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
rf_tfl_grw_1.to_csv('rf_tfl_grw_1.csv')
print('RF of time to flowering and grain weight is saved')
print("Find mse and r-squared for svm model of grain weight...")
svr_grw_mse_2, svr_grw_r2_2, svr_grw_prs_2 = get_svm_model(X_grw_2_train, y_grw_2_train, X_grw_2_test, y_grw_2_test)
svr_grw_mse.append(svr_grw_mse_2)
svr_grw_r2.append(svr_grw_r2_2)
svr_grw_prs.append(svr_grw_prs_2)
svr_grw_mse_3, svr_grw_r2_3, svr_grw_prs_3 = get_svm_model(X_grw_3_train, y_grw_3_train, X_grw_3_test, y_grw_3_test)
svr_grw_mse.append(svr_grw_mse_3)
svr_grw_r2.append(svr_grw_r2_3)
svr_grw_prs.append(svr_grw_prs_3)
svr_grw_mse_4, svr_grw_r2_4, svr_grw_prs_4 = get_svm_model(X_grw_4_train, y_grw_4_train, X_grw_4_test, y_grw_4_test)
svr_grw_mse.append(svr_grw_mse_4)
svr_grw_r2.append(svr_grw_r2_4)
svr_grw_prs.append(svr_grw_prs_4)
svr_grw_mse_5, svr_grw_r2_5, svr_grw_prs_5 = get_svm_model(X_grw_5_train, y_grw_5_train, X_grw_5_test, y_grw_5_test)
svr_grw_mse.append(svr_grw_mse_5)
svr_grw_r2.append(svr_grw_r2_5)
svr_grw_prs.append(svr_grw_prs_5)
svr_grw = pd.DataFrame({'svr_grw_mse':svr_grw_mse[::-1], 'svr_grw_r2':svr_grw_r2[::-1], 'svr_grw_prs':svr_grw_prs[::-1]})
svr_grw.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
svr_grw.to_csv('svr_grw.csv')
print('SVR of grain weight is saved')
print("Find mse and r-squared for svm model of time to flowering...")
svr_tfl_mse_2, svr_tfl_r2_2, svr_tfl_prs_2 = get_svm_model(X_tfl_2_train, y_tfl_2_train, X_tfl_2_test, y_tfl_2_test)
svr_tfl_mse.append(svr_tfl_mse_2)
svr_tfl_r2.append(svr_tfl_r2_2)
svr_tfl_prs.append(svr_tfl_prs_2)
svr_tfl_mse_3, svr_tfl_r2_3, svr_tfl_prs_3 = get_svm_model(X_tfl_3_train, y_tfl_3_train, X_tfl_3_test, y_tfl_3_test)
svr_tfl_mse.append(svr_tfl_mse_3)
svr_tfl_r2.append(svr_tfl_r2_3)
svr_tfl_prs.append(svr_tfl_prs_3)
svr_tfl_mse_4, svr_tfl_r2_4, svr_tfl_prs_4 = get_svm_model(X_tfl_4_train, y_tfl_4_train, X_tfl_4_test, y_tfl_4_test)
svr_tfl_mse.append(svr_tfl_mse_4)
svr_tfl_r2.append(svr_tfl_r2_4)
svr_tfl_prs.append(svr_tfl_prs_4)
svr_tfl_mse_5, svr_tfl_r2_5, svr_tfl_prs_5 = get_svm_model(X_tfl_5_train, y_tfl_5_train, X_tfl_5_test, y_tfl_5_test)
svr_tfl_mse.append(svr_tfl_mse_5)
svr_tfl_r2.append(svr_tfl_r2_5)
svr_tfl_prs.append(svr_tfl_prs_5)
svr_tfl_mse_6, svr_tfl_r2_6, svr_tfl_prs_6 = get_svm_model(X_tfl_6_train, y_tfl_6_train, X_tfl_6_test, y_tfl_6_test)
svr_tfl_mse.append(svr_tfl_mse_6)
svr_tfl_r2.append(svr_tfl_r2_6)
svr_tfl_prs.append(svr_tfl_prs_6)
svr_tfl = pd.DataFrame({'svr_tfl_mse':svr_tfl_mse[::-1], 'svr_tfl_r2':svr_tfl_r2[::-1], 'svr_tfl_prs':svr_tfl_prs[::-1]})
svr_tfl.set_index(pd.Index(p_value), 'p_value', inplace = True)
svr_tfl.to_csv('svr_tfl.csv')
print('SVR of time to flowering is saved')
print("Find mse and r-squared for svm model of time to flowering and grain weight... ")
# Output is time to flowering
svr_tfl_grw_mse_2_0, svr_tfl_grw_r2_2_0, svr_tfl_grw_prs_2_0 = get_svm_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 0], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_2_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_2_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_2_0)
svr_tfl_grw_mse_25_0, svr_tfl_grw_r2_25_0, svr_tfl_grw_prs_25_0 = get_svm_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 0], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_25_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_25_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_25_0)
svr_tfl_grw_mse_1_0, svr_tfl_grw_r2_1_0, svr_tfl_grw_prs_1_0 = get_svm_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 0], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_1_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_1_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_1_0)
svr_tfl_grw_mse_75_0, svr_tfl_grw_r2_75_0, svr_tfl_grw_prs_75_0 = get_svm_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 0], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_75_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_75_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_75_0)
svr_tfl_grw_mse_3_0, svr_tfl_grw_r2_3_0, svr_tfl_grw_prs_3_0 = get_svm_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 0], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 0])
svr_tfl_grw_mse_0.append(svr_tfl_grw_mse_3_0)
svr_tfl_grw_r2_0.append(svr_tfl_grw_r2_3_0)
svr_tfl_grw_prs_0.append(svr_tfl_grw_prs_3_0)
svr_tfl_grw_0 = pd.DataFrame({'svr_tfl_grw_mse_0':svr_tfl_grw_mse_0[::-1], 'svr_tfl_grw_r2_0':svr_tfl_grw_r2_0[::-1], 'svr_tfl_grw_prs_0':svr_tfl_grw_prs_0[::-1]})
svr_tfl_grw_0.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
svr_tfl_grw_0.to_csv('svr_tfl_grw_0.csv')
# Output is grain weight
svr_tfl_grw_mse_2_1, svr_tfl_grw_r2_2_1, svr_tfl_grw_prs_2_1 = get_svm_model(X_tfl_grw_2_train, y_tfl_grw_2_train[:, 1], X_tfl_grw_2_test, y_tfl_grw_2_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_2_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_2_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_2_1)
svr_tfl_grw_mse_25_1, svr_tfl_grw_r2_25_1, svr_tfl_grw_prs_25_1 = get_svm_model(X_tfl_grw_25_train, y_tfl_grw_25_train[:, 1], X_tfl_grw_25_test, y_tfl_grw_25_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_25_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_25_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_25_1)
svr_tfl_grw_mse_1_1, svr_tfl_grw_r2_1_1, svr_tfl_grw_prs_1_1 = get_svm_model(X_tfl_grw_1_train, y_tfl_grw_1_train[:, 1], X_tfl_grw_1_test, y_tfl_grw_1_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_1_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_1_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_1_1)
svr_tfl_grw_mse_75_1, svr_tfl_grw_r2_75_1, svr_tfl_grw_prs_75_1 = get_svm_model(X_tfl_grw_75_train, y_tfl_grw_75_train[:, 1], X_tfl_grw_75_test, y_tfl_grw_75_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_75_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_75_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_75_1)
svr_tfl_grw_mse_3_1, svr_tfl_grw_r2_3_1, svr_tfl_grw_prs_3_1 = get_svm_model(X_tfl_grw_3_train, y_tfl_grw_3_train[:, 1], X_tfl_grw_3_test, y_tfl_grw_3_test[:, 1])
svr_tfl_grw_mse_1.append(svr_tfl_grw_mse_3_1)
svr_tfl_grw_r2_1.append(svr_tfl_grw_r2_3_1)
svr_tfl_grw_prs_1.append(svr_tfl_grw_prs_3_1)
svr_tfl_grw_1 = pd.DataFrame({'svr_tfl_grw_mse_1':svr_tfl_grw_mse_1[::-1], 'svr_tfl_grw_r2_1':svr_tfl_grw_r2_1[::-1], 'svr_tfl_grw_prs_1':svr_tfl_grw_prs_1[::-1]})
svr_tfl_grw_1.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
svr_tfl_grw_1.to_csv('svr_tfl_grw_1.csv')
print("")
print("Create data frames...")
print("")
grw_mse = pd.DataFrame({'rf_grw_mse':rf_grw_mse[::-1], 'svr_grw_mse':svr_grw_mse[::-1]})
grw_mse.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
grw_r2 = pd.DataFrame({'rf_grw_r2':rf_grw_r2[::-1], 'svr_grw_r2':svr_grw_r2[::-1]})
grw_r2.set_index(pd.Index(p_value[1:5]), 'p_value', inplace = True)
tfl_mse = pd.DataFrame({'rf_tfl_mse':rf_tfl_mse[::-1], 'svr_tfl_mse':svr_tfl_mse[::-1]})
tfl_mse.set_index(pd.Index(p_value), 'p_value', inplace = True)
tfl_r2 = pd.DataFrame({'rf_tfl_r2':rf_tfl_r2[::-1], 'svr_tfl_r2':svr_tfl_r2[::-1]})
tfl_r2.set_index(pd.Index(p_value), 'p_value', inplace = True)
tfl_grw_mse = pd.DataFrame({'rf_tfl_mse':rf_tfl_grw_mse_0[::-1], 'rf_grw_mse':rf_tfl_grw_mse_1[::-1], 'svr_tfl_mse':svr_tfl_grw_mse_0[::-1], 'svr_grw_mse':svr_tfl_grw_mse_1[::-1]})
tfl_grw_mse.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
tfl_grw_r2 = pd.DataFrame({'rf_tfl_r2':rf_tfl_grw_r2_0[::-1], 'rf_grw_r2':rf_tfl_grw_r2_1[::-1], 'svr_tfl_r2':svr_tfl_grw_r2_0[::-1], 'svr_grw_r2':svr_tfl_grw_r2_1[::-1]})
tfl_grw_r2.set_index(pd.Index(p_value_2), 'p_value', inplace = True)
print("")
print("Find mse and r-squared for lasso and multitasklasso model...")
print("")
print("For grain weight...")
print("")
mse_grw_ls, r2_grw_ls, var_grw_ls, ls_grw_prs = get_lasso_cv(X_grw_train, y_grw_train, X_grw_test, y_grw_test, cols)
print("")
print("For time to flowering...")
print("")
mse_tfl_ls, r2_tfl_ls, var_tfl_ls, ls_tfl_prs = get_lasso_cv(X_tfl_train, y_tfl_train, X_tfl_test, y_tfl_test, cols)
print("")
print("For time to flowering and grain weight...")
print("")
mse_tfl_grw_ls, r2_tfl_grw_ls, var_tfl_grw_ls, ls_tfl_grw_prs = get_multitask_lasso_cv(X_tfl_grw_train, y_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_test, cols)
print("")
print("Find mse and r-squared for elasticnet and multitaskelasticnet model...")
print("")
print("For grain weight...")
print("")
mse_grw_el, r2_grw_el, var_grw_el, el_grw_prs = get_elasticnet_cv(X_grw_train, y_grw_train, X_grw_test, y_grw_test, cols)
print("")
print("For time to flowering...")
print("")
mse_tfl_el, r2_tfl_el, var_tfl_el, el_tfl_prs = get_elasticnet_cv(X_tfl_train, y_tfl_train, X_tfl_test, y_tfl_test, cols)
print("")
print("For time to flowering and grain weight...")
print("")
mse_tfl_grw_el, r2_tfl_grw_el, var_tfl_grw_el, el_tfl_grw_prs = get_multitask_elasticnet_cv(X_tfl_grw_train, y_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_test, cols)
# Mse, r2 of each trait with the multi-task problem
mtls_mse_tfl, mtls_mse_grw, mtls_r2_tfl, mtls_r2_grw = eval_mtls_split_trait(2.41812258083032, X_tfl_grw_train, y_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_test)
mtel_mse_tfl, mtel_mse_grw, mtel_r2_tfl, mtel_r2_grw = eval_mtel_split_trait(4.20631940576943, 0.5, X_tfl_grw_train, y_tfl_grw_train, X_tfl_grw_test, y_tfl_grw_test)
ls_table = pd.DataFrame({'mse_grw_ls':[mse_grw_ls], 'r2_grw_ls':[r2_grw_ls],
'mse_tfl_ls':[mse_tfl_ls], 'r2_tfl_ls':[r2_tfl_ls],
'mse_tfl_grw_ls':[mse_tfl_grw_ls], 'r2_tfl_grw_ls':[r2_tfl_grw_ls],
'ls_grw_prs':[ls_grw_prs], 'ls_tfl_prs':[ls_tfl_prs], 'ls_tfl_grw_prs':[ls_tfl_grw_prs]})
el_table = pd.DataFrame({'mse_grw_el':[mse_grw_el], 'r2_grw_el':[r2_grw_el],
'mse_tfl_el':[mse_tfl_el], 'r2_tfl_el':[r2_tfl_el],
'mse_tfl_grw_el':[mse_tfl_grw_el], 'r2_tfl_grw_el':[r2_tfl_grw_el],
'el_grw_prs':[el_grw_prs], 'el_tfl_prs':[el_tfl_prs], 'el_tfl_grw_prs':[el_tfl_grw_prs]})
ls_split_trait = pd.DataFrame({'mtls_mse_tfl':[mtls_mse_tfl],'mtls_mse_grw':[mtls_mse_grw], 'mtls_r2_tfl':[mtls_r2_tfl], 'mtls_r2_grw':[mtls_r2_grw]})
el_split_trait = pd.DataFrame({'mtel_mse_tfl':[mtel_mse_tfl],'mtel_mse_grw':[mtel_mse_grw], 'mtel_r2_tfl':[mtel_r2_tfl], 'mtel_r2_grw':[mtel_r2_grw]})
var_tfl_ls = pd.DataFrame({'var_tfl_ls':var_tfl_ls})
var_grw_ls = pd.DataFrame({'var_grw_ls':var_grw_ls})
var_tfl_grw_ls = pd.DataFrame({'var_tfl_grw_ls':var_tfl_grw_ls})
var_tfl_el = pd.DataFrame({'var_tfl_el':var_tfl_el})
var_grw_el =
|
pd.DataFrame({'var_grw_el':var_grw_el})
|
pandas.DataFrame
|
#Import modules
import os
import pandas as pd
import numpy as np
from pandas import DatetimeIndex
import dask
import scipy
from scipy.optimize import minimize, LinearConstraint
import time
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import pickle
#Define Column Name
indexName = 'date'
indexExpiry = 'optionExpiry'
indexTenor = 'underlyingTerm'
indexStrike = 'Strike'
indexRelStrike = 'RelativeStrike'
def getTTMFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[0])
def getMoneynessFromCoordinates(dfList):
return dfList[1].applymap(lambda x : x[1])
def readfile(file):
print("file")
print(file)
def iterateOnFolderContent(folderName):
for elt in os.scandir(folderName):
if os.DirEntry.is_dir(elt):
print("Folder")
print(elt)
iterateOnFolderContent(elt)
else :
readfile(elt)
def parseTerm(stringTerm):
if 'M' == stringTerm[-1]:
return float(stringTerm[:-1])/12
elif 'Y' == stringTerm[-1]:
return float(stringTerm[:-1])
else :
raise Exception("Can not parse term")
def parseTenor(row):
return [parseTerm(row['underlyingTerm']), parseTerm(row['optionExpiry'])]
def smileFromSkew(skew):
atmVol = skew['A']
#smile = atmVol + skew[skewShift]
#return smile#.append(skew.drop(smile.index))
return atmVol + skew.drop('A')
def parseStrike(relStrike):
if relStrike.name[3] == 'A':
return relStrike['forward']
if "+" in relStrike.name[3]:
shift = int(relStrike.name[3].split("+")[1])
return relStrike['forward'] + shift/1000
if "-" in relStrike.name[3]:
shift = int(relStrike.name[3].split("-")[1])
return relStrike['forward'] - shift/1000
raise Exception(' Can not parse Strike ')
#intersection of all dates across history
def intersectionGrid(grid) :
nbDates = grid.index.get_level_values(0).unique().shape[0]
if nbDates <= 1:
return grid.index.droplevel(0)
else :
midDate = grid.index.get_level_values(0).unique()[int(nbDates/2)]
g1 = grid[grid.index.get_level_values(0) < midDate]
g2 = grid[grid.index.get_level_values(0) >= midDate]
return intersectionGrid(g1).intersection(intersectionGrid(g2))
def splitTrainTestDataRandomly(gridHistory, trainingSetPercentage):
nbDates = gridHistory.index.get_level_values(0).unique().shape[0]
trainingDates = np.random.choice(gridHistory.index.get_level_values(0).unique(),
replace=False,
size=int(nbDates * trainingSetPercentage))
trainingData = gridHistory.loc[pd.IndexSlice[trainingDates,:,:], :]
testingData = gridHistory.drop(trainingData.index)
trainingData.index = trainingData.index.droplevel([1,2])
testingData.index = testingData.index.droplevel([1,2])
return trainingData, testingData
def splitTrainTestDataChronologically(gridHistory, trainingSetPercentage):
firstTestingDate = int(gridHistory.index.get_level_values(0).unique().shape[0]
* trainingSetPercentage)
trainingDates = gridHistory.index.get_level_values(0).unique()[:firstTestingDate]
trainingData = gridHistory.loc[pd.IndexSlice[trainingDates,:,:], :]
testingData = gridHistory.drop(trainingData.index)
trainingData.index = trainingData.index.droplevel([1,2])
testingData.index = testingData.index.droplevel([1,2])
return trainingData, testingData
def sampleBatchOfDays(dataSet, nbDrawn):
trainingDates = np.random.choice(dataSet.index.get_level_values(0).unique(),
replace=False,
size=nbDrawn)
return dataSet.loc[trainingDates, :]
def splitHistory(history, colName):
return pd.pivot_table(history,
values = colName,
index = history.index.names,
columns=['Expiry','Tenor'])
def extractDataFromCSV(dataSetPath):
#Read csv file
data = pd.read_csv(dataSetPath)
#Parse tenor and expiry as float years
data['Tenor'],data['Expiry'] = zip(*data.apply(parseTenor,axis=1))
#Parse date as a datetime
data[indexName] = pd.to_datetime(data['businessDate'], dayfirst=True)
#Set Index as as a three dimension vector and sort observation
indexedData = data.set_index([indexExpiry, indexTenor, indexName]).sort_index()
#Keep relevant features
#Columns used for representing a Strike Value
skewShift = [shift for shift in indexedData.columns if ('A' in shift )]#and 'A' != shift
#Other Columns to keep
otherColumns = ['forward', 'Tenor', 'Expiry']
#Get columns indexed by a relative strike
skewHistory = indexedData[skewShift + otherColumns]#.apply(smileFromSkew,axis=1)
#Merge with other useful columns
#Stacking Smile
#Left outer Join on (tenor, expiry, date)
joinColumns = skewHistory.index.names
leftTable = skewHistory.drop(otherColumns, axis = 1).stack().rename("Vol")#Features depending on strike value
leftTable.index.names = [leftTable.index.names[0],
leftTable.index.names[1],
leftTable.index.names[2],
'RelativeStrike']
formattedHistory = leftTable.reset_index().merge(skewHistory[otherColumns].reset_index(),
on=joinColumns,
validate = "m:1").set_index(leftTable.index.names).sort_index()
#Convert strike shift as a float from a stringTerm
formattedHistory[indexStrike] = formattedHistory.apply(parseStrike,axis=1)
return formattedHistory
def equalDf(df1, df2):
if df1.shape == df2.shape :
if np.sum(np.isnan(df1.values)) != np.sum(np.isnan(df2.values)) :
print("Not the same number of nan")
return False
tol = 1e-6
gap = np.nansum(np.abs(df1.values - df2.values))
if gap < tol :
return True
else :
print("Large df error : ", gap)
return False
print("Not the same shape")
return False
def sampleSwaptionsToDelete(dataSet, completionRate):
return dataSet.iloc[0].sample(frac = completionRate).index
def removeSwaptionsToDelete(dataSet):
listToDelete = [(0.08333333333333333,0.25),(0.08333333333333333,10.0),
(0.08333333333333333,30.0),(0.5,2.0),(0.5,15.0),
(5.0,1.0),(5.0,20.0),(10.0,5.0)]
return dataSet.iloc[0].index.difference(listToDelete)
#Different from minmax scaler of scikit learn
#Min and Max are computed on the dataset, not column wise
class customMinMaxScale:
def __init__(self, feature_range = (0,1)):
self.min = feature_range[0]
self.max = feature_range[1]
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
self.datasetMin = dataset.min().min()
if enforceDataSetMin is not None :
self.datasetMin = min(enforceDataSetMin, self.datasetMin)
self.datasetMax = dataset.max().max()
if enforceDataSetMax is not None :
self.datasetMax = max(enforceDataSetMax, self.datasetMax)
return
def transform(self, dataset):
scale = (self.max - self.min) / (self.datasetMax - self.datasetMin)
return (dataset - self.datasetMin) * scale + self.min
def inverse_transform(self, scaledDataset):
scale = (self.max - self.min) / (self.datasetMax - self.datasetMin)
return (scaledDataset - self.min) / scale + self.datasetMin
#Encapsulation class for Sklearn Standard scaling
class customMeanStdScale:
def __init__(self, feature_range = (0,1)):
self.scalerList = []
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
self.scalerList = [StandardScaler() for i in range(tupleSize)]
for k in range(tupleSize):
funcAccess = lambda x : x[k]
scaler = self.scalerList[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler.fit(dfElt)
else :
self.scalerList = []
self.scalerList.append(StandardScaler())
self.scalerList[0].fit(dataset)
return
def transformSingleDf(self, scaler, dfElt):
totalVariance = np.sum(scaler.var_)
if totalVariance <= 1e-6 : #Avoid mean scaling for constant data
return dfElt
if type(dfElt) == type(pd.Series()):
return pd.Series(np.ravel(scaler.transform(dfElt.values.reshape(1, -1))),
index = dfElt.index).rename(dfElt.name)
return pd.DataFrame(scaler.transform(dfElt),
index = dfElt.index,
columns = dfElt.columns)
def transform(self, dataset):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
scaledDfList = []
for k in range(tupleSize):
funcAccess = lambda x : x[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler = self.scalerList[k]
scaledDfList.append(np.ravel(self.transformSingleDf(scaler, dfElt).values))
#Flattened list of tuples
tupleList= list(zip(*scaledDfList))
#Merge all datasets into a single structure
if dataset.ndim==2 :
reshapedList = [tupleList[(i*dataset.shape[1]):((i+1)*dataset.shape[1])] for i in range(dataset.shape[0])]
return pd.DataFrame(reshapedList,
index = dataset.index,
columns = dataset.columns)
else :
reshapedList = tupleList
return pd.Series(reshapedList, index = dataset.index)
else :
return self.transformSingleDf(self.scalerList[0], dataset)
return None
def inverTransformSingleDf(self, scaler, dfElt):
totalVariance = np.sum(scaler.var_)
if totalVariance <= 1e-6 : #Avoid mean scaling for constant data
return dfElt
if type(dfElt) == type(pd.Series()):
return pd.Series(np.ravel(scaler.inverse_transform(dfElt.values.reshape(1, -1))),
index = dfElt.index).rename(dfElt.name)
return pd.DataFrame(scaler.inverse_transform(dfElt),
index = dfElt.index,
columns = dfElt.columns)
def inverse_transform(self, scaledDataset):
hasTupleElt = (type(scaledDataset.iloc[0,0] if scaledDataset.ndim==2 else scaledDataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(scaledDataset.iloc[0,0] if scaledDataset.ndim==2 else scaledDataset.iloc[0])
scaledDfList = []
for k in range(tupleSize):
funcAccess = lambda x : x[k]
dfElt = scaledDataset.applymap(funcAccess) if (type(scaledDataset) != type(pd.Series())) else scaledDataset.map(funcAccess)
scaler = self.scalerList[k]
scaledDfList.append(np.ravel(self.inverTransformSingleDf(scaler, dfElt).values))
#Flattened list of tuples
tupleList= list(zip(*scaledDfList))
#Merge all datasets into a single structure
if scaledDataset.ndim==2 :
reshapedList = [tupleList[(i*scaledDataset.shape[1]):((i+1)*scaledDataset.shape[1])] for i in range(scaledDataset.shape[0])]
return pd.DataFrame(reshapedList,
index = scaledDataset.index,
columns = scaledDataset.columns)
else :
reshapedList = tupleList
return pd.Series(reshapedList, index = scaledDataset.index)
else :
return self.inverTransformSingleDf(self.scalerList[0], scaledDataset)
return None
#Encapsulation class for Sklearn min max scaling
class standardMinMaxScale(customMeanStdScale):
def __init__(self, feature_range = (0,1)):
super().__init__()
#We can enforce the minimum if we expect smaller data in the testing set
def fit(self, dataset,
enforceDataSetMin = None,
enforceDataSetMax = None):
hasTupleElt = (type(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])==type(tuple()))
if hasTupleElt :
tupleSize = len(dataset.iloc[0,0] if dataset.ndim==2 else dataset.iloc[0])
self.scalerList = [MinMaxScaler() for i in range(tupleSize)]
for k in range(tupleSize):
funcAccess = lambda x : x[k]
scaler = self.scalerList[k]
dfElt = dataset.applymap(funcAccess) if (type(dataset) != type(pd.Series())) else dataset.map(funcAccess)
scaler.fit(dfElt)
else :
self.scalerList = []
self.scalerList.append(MinMaxScaler())
self.scalerList[0].fit(dataset)
return
def selectLessCorrelatedFeatures(featureCorr, nbPoints):
objectiveFunction = lambda x : x.T @ featureCorr.values @ x
gradient = lambda x : (featureCorr.values + featureCorr.values.T) @ x
hessian = lambda x : featureCorr.values + featureCorr.values.T
nbRestart = 5
x0s = np.random.uniform(size=(nbRestart , featureCorr.shape[1]))
x0s = x0s * nbPoints / np.sum(x0s, axis = 1, keepdims=True)
bestSol = x0s[0,:]
bestVar = featureCorr.shape[1]
bounds = [[0,1]] * featureCorr.shape[1]
budgetAllocation = LinearConstraint(np.ones((1,featureCorr.shape[1])), [nbPoints], [nbPoints], keep_feasible = True)
for k in range(nbRestart):
res = minimize(objectiveFunction, x0s[k,:],
bounds = bounds,
constraints = budgetAllocation,
method = "trust-constr",
jac = gradient,
hess = hessian)
if (res.fun < bestVar) or (k==0) :
bestSol = res.x
bestVar = res.fun
print("Attempt no ", k, " ; best solution : ", bestSol, " ; best inertia : ", bestVar)
topnbPointsValue = -(np.sort(-bestSol)[nbPoints - 1])
optimalAllocation = pd.Series(bestSol, index = featureCorr.index)
return optimalAllocation[optimalAllocation >= topnbPointsValue].index
def isCSVFile(filename):
extension = filename[-3:]
return (extension == "csv")
#These class are responsible for :
# - passing the right data to the model for trainingData
# - converting data to the original format for plotting
class datasetATM:
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.trainingSetPercentage = trainingSetPercentage
self.pathToDataset = pathToDataset
self.activateScaling = scaleFeatures
self.isGridStable = True
self.testVol = None
self.trainVol = None
self.VolSerie = None
self.volScaler = None
self.scaledTrainVol = None
self.scaledTestVol = None
self.testCoordinates = None
self.trainCoordinates = None
self.CoordinatesSerie = None
self.coordinatesScaler = None
self.scaledTrainCoordinates = None
self.scaledTestCoordinates = None
self.testFwd = None
self.trainFwd = None
self.FwdSerie = None
self.fwdScaler = None
self.scaledTrainFwd = None
self.scaledTestFwd = None
self.testStrike = None
self.trainStrike = None
self.StrikeSerie = None
self.loadData()
self.scaleDataSets()
lambdaAppend = (lambda x : x[0].append(x[1]) if x[0] is not None else None)
self.fullHistory = list(map(lambdaAppend, zip(self.getTrainingDataForModel(),self.getTestingDataForModel())))
self.fullScaler = [self.volScaler, self.coordinatesScaler, self.fwdScaler, None]
self.gridSize = self.getTestingDataForModel()[0].shape[1]
return
def loadData(self):
raise NotImplementedError("Abstract class")
return
def sanityCheck(self):
print("Testing formatModelDataAsDataSet")
assert(equalDf(self.testVol.dropna(how="all").head(),
self.formatModelDataAsDataSet(self.getTestingDataForModel())[0].head()))
origData = self.formatModelDataAsDataSet(self.getTrainingDataForModel())
print("Testing coordinates")
assert(equalDf(self.trainCoordinates.head().applymap(lambda x : x[0]),
origData[1].head().applymap(lambda x : x[0])))
assert(equalDf(self.trainCoordinates.head().applymap(lambda x : x[1]),
origData[1].head().applymap(lambda x : x[1])))
print("Testing Forward")
assert(equalDf(self.getTrainingDataForModel()[2].head(),
self.convertRealDataToModelFormat(self.formatModelDataAsDataSet(self.getTrainingDataForModel()))[2].head()))
print("Testing masking function")
maskedDf = self.maskDataset(self.getTrainingDataForModel()[1]).dropna(how="all",axis=1).head()
assert(maskedDf.shape[1] == (self.gridSize - self.maskedPoints.size))
print("Testing convertRealDataToModelFormat")
assert(equalDf(self.trainVol.loc[origData[0].index].head(),
self.formatModelDataAsDataSet(self.convertRealDataToModelFormat(origData))[0].head()))
print("Success")
return
#When the grid is not fixed - i.e. volatilities time to maturities are sliding -
#we need to decide which instruments can be compared between two dates
def decideInvestableInstruments(self):
coordinatesDf = self.formatModelDataAsDataSet(self.getDataForModel())[1]
pairIndexHistory = []#series of pair of index
nextTTMDf = coordinatesDf.shift(-1).dropna(how = "all")
for serie in coordinatesDf.head(-1).iterrows():
currentDay = serie[1]
nextDay = nextTTMDf.loc[serie[0]]
currentRankForHedgeablePoints = currentDay.index
nextRankForHedgeablePoints = nextDay.index
pairIndexHistory.append((currentRankForHedgeablePoints, nextRankForHedgeablePoints))
pairIndexHistory.append((nextRankForHedgeablePoints, nextRankForHedgeablePoints))
pairIndexHistory = pd.Series(pairIndexHistory, index = coordinatesDf.index)
return pairIndexHistory
#List Format : First position vol, second position coordinates, third position forward, fourth position strike
def getTestingDataForModel(self):
return [self.scaledTestVol, self.scaledTestCoordinates, self.scaledTestFwd, self.testStrike]
def getTrainingDataForModel(self):
return [self.scaledTrainVol, self.scaledTrainCoordinates, self.scaledTrainFwd, self.trainStrike]
def getDataForModel(self, dates = None):
if dates is None :
return self.fullHistory
funcExtractDate = lambda x : x.loc[dates] if x is not None else None
return list(map(funcExtractDate, self.fullHistory))
#Tranform synthetic surfaces as model data
#Name of surfaces should be the date
def convertRealDataToModelFormat(self, unformattedSurface):
if(self.activateScaling):
if (type(unformattedSurface)==type(list())) and (len(unformattedSurface)==4):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].transform(x[0])
return list(map(lambdaTransform, zip(unformattedSurface, self.fullScaler)))
elif (type(unformattedSurface)!=type(list())) :
return self.volScaler.transform(unformattedSurface)
else :
raise("Can not format as model data")
return
return unformattedSurface
#Format data returned by a model to format
#For instance variation are transformed as level with yesterday volatilities
def formatModelDataAsDataSet(self, modelData):
if(self.activateScaling):
if (type(modelData)==type(list())) and (len(modelData)==4):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].inverse_transform(x[0])
return list(map(lambdaTransform, zip(modelData, self.fullScaler)))
elif (type(modelData)!=type(list())) :
return self.volScaler.inverse_transform(modelData)
else :
raise("Can not format as model data")
return
return modelData
def scaleDataSets(self):
if(self.activateScaling):
#Define MinMax scaling for volatility
self.volScaler = customMeanStdScale() #customMinMaxScale()
self.volScaler.fit(self.trainVol, enforceDataSetMin = 0)#Positive volatilities of course
self.scaledTrainVol = self.volScaler.transform(self.trainVol)
self.scaledTestVol = self.volScaler.transform(self.testVol)
#Define MinMax scaling for volatility
self.coordinatesScaler = customMeanStdScale() #customMinMaxScale()
self.coordinatesScaler.fit(self.trainCoordinates, enforceDataSetMin = 0)#Positive volatilities of course
self.scaledTrainCoordinates = self.coordinatesScaler.transform(self.trainCoordinates)
self.scaledTestCoordinates = self.coordinatesScaler.transform(self.testCoordinates)
#Define MinMax scaling for forward swap rates
self.fwdScaler = customMeanStdScale() # customMinMaxScale()
self.fwdScaler.fit(self.trainFwd)
self.scaledTrainFwd = self.fwdScaler.transform(self.trainFwd)
self.scaledTestFwd = self.fwdScaler.transform(self.testFwd)
else :
self.scaledTrainVol = self.trainVol
self.scaledTestVol = self.testVol
self.scaledTrainCoordinates = self.trainCoordinates
self.scaledTestCoordinates = self.testCoordinates
self.scaledTrainFwd = self.trainFwd
self.scaledTestFwd = self.testFwd
return
def getATMDataFromCSV(dataSetPath, trainingSetPercentage=0.8):
formattedHistory = extractDataFromCSV(dataSetPath)
#Filter only ATM volatility
ATMHistory = (formattedHistory[formattedHistory.index.get_level_values(indexRelStrike)=='A']
.reorder_levels([indexName, indexExpiry, indexTenor, indexRelStrike])
.sort_index())
#Remove strike from index as we consider only ATM
ATMHistory.index = ATMHistory.index.droplevel(3)
#Get Expiry and tenors shared by all dates
commonGridPoints = intersectionGrid(ATMHistory)
#Get indexer for multiindex
idx = pd.IndexSlice
#Filter data for Expiry and tenors common to all dates
commonATMHistory = ATMHistory.loc[idx[:,commonGridPoints.get_level_values(0),
commonGridPoints.get_level_values(1)],:]
#Feeding Data
#Take the first 80% dates as training set and the remaining ones as testing set
trainTmp,testTmp = splitTrainTestDataChronologically(commonATMHistory,trainingSetPercentage)
#Separate features between volatility, forward rate and Strike
testVol = splitHistory(testTmp,"Vol")
trainVol = splitHistory(trainTmp,"Vol")
testFwd = splitHistory(testTmp,"forward")
trainFwd = splitHistory(trainTmp,"forward")
testStrike = None
trainStrike = None
indexFunc = lambda x : pd.Series(x.index.values,
index = x.index)
trainCoordinates = trainVol.apply(indexFunc, axis=1)
testCoordinates = testVol.apply(indexFunc, axis=1)
trainVol = pd.DataFrame(trainVol.values, index=trainVol.index)
testVol = pd.DataFrame(testVol.values, index=testVol.index)
trainCoordinates = pd.DataFrame(trainCoordinates.values, index=trainCoordinates.index)
testCoordinates = pd.DataFrame(testCoordinates.values, index=testCoordinates.index)
return testVol, trainVol, testFwd, trainFwd, testCoordinates, trainCoordinates, testStrike, trainStrike
class dataSetATMCSV(datasetATM):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.nbExpiry = 0
self.nbTenors = 0
self.minExpiry = minExpiry
self.expiryTenorToRankSerie = None
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
listTokeep = [(0.08333333333333333,0.25),(0.08333333333333333,10.0),
(0.08333333333333333,30.0),(0.5,2.0),(0.5,15.0),
(5.0,1.0),(5.0,20.0),(10.0,5.0)]
self.setMaskedPoints(listTokeep)
def setMaskedPoints(self, completionPoints):
# self.maskedPoints = sampleSwaptionsToDelete(self.getTestingDataForModel(),
# completionRate)
fullObs = self.getTestingDataForModel()[1]
self.maskedPoints = fullObs.columns.difference(completionPoints)
if self.isGridStable :#Surface coordinates are the same for each day
#Matrix where True indicates that this point is completed (i.e. hidden on the market), false otherwise
maskMatrix = pd.Series(False, index = self.expiryTenorToRankSerie.index)
maskMatrix.loc[fullObs.iloc[0].loc[self.maskedPoints]] = True
self.maskSerie = pd.Series(maskMatrix.values, index = self.expiryTenorToRankSerie.values)
self.maskMatrix = maskMatrix.unstack(level=-1)
#Return a deep copy with masked values
def maskDataset(self, completeDataset):
maskedRank = self.maskedPoints
maskedDataset = completeDataset.copy()
if completeDataset.ndim == 1 :
maskedDataset.loc[maskedRank] = np.NaN
elif completeDataset.ndim == 2 :
maskedDataset[maskedRank] = np.NaN
return maskedDataset
def removeShortestExpiry(self, dataset):
if dataset is None :
return
#remove data with expiry inferior than minExpiry
hasExpiryColumn = ("Expiry" in dataset.columns.names)
columnsFilter = ((dataset.columns.get_level_values("Expiry")>=self.minExpiry) if hasExpiryColumn else
self.expiryTenorToRankSerie[self.expiryTenorToRankSerie.index.get_level_values("Expiry")>=self.minExpiry].values)
return dataset.filter(items=dataset.columns[columnsFilter])
def loadData(self):
tmp = getATMDataFromCSV(self.pathToDataset, self.trainingSetPercentage)
self.expiryTenorToRankSerie = pd.Series(tmp[4].columns,
index = pd.MultiIndex.from_tuples(tmp[4].iloc[0].values,
names=('Expiry', 'Tenor')))
self.expiryTenorToRankSerie = self.expiryTenorToRankSerie[self.expiryTenorToRankSerie.index.get_level_values("Expiry")>=self.minExpiry]
self.testVol = self.removeShortestExpiry(tmp[0])
self.trainVol = self.removeShortestExpiry(tmp[1])
self.testCoordinates = self.removeShortestExpiry(tmp[4])
self.trainCoordinates = self.removeShortestExpiry(tmp[5])
self.testFwd = self.removeShortestExpiry(tmp[2])
self.trainFwd = self.removeShortestExpiry(tmp[3])
self.testStrike = self.removeShortestExpiry(tmp[6])
self.trainStrike = self.removeShortestExpiry(tmp[7])
self.nbExpiry = self.trainFwd.columns.get_level_values("Expiry").unique().size
self.nbTenors = self.trainFwd.columns.get_level_values("Tenor").unique().size
self.gridSize = self.trainFwd.columns.size
return
def datasetSummary(self):
print("Number of days in dataset",
self.getDataForModel()[0].shape[0])
print("Number of days for testing", self.getTestingDataForModel()[0].shape[0])
print("Number of days for training", self.getTrainingDataForModel()[0].shape[0])
print("Working on ATM volatility level")
print("Number of points in the grid : ", self.gridSize)
print("Number of expiries : ", self.nbExpiry)
print("List : ", self.getTrainingDataForModel()[2].columns.get_level_values("Expiry").unique())
print("Number of tenors : ", self.nbTenors)
print("List : ", self.getTrainingDataForModel()[2].columns.get_level_values("Tenor").unique())
return
def getATMDataFromPickle(dataSetPath,
trainingSetPercentage=0.8,
minStrikeIndex = 0,
maturityStrikeIndex = 0):
with open(dataSetPath, "rb") as f :
objectRead = pickle.load(f)
def rankCalDays(dfDay):
return dfDay["nBizDays"].rank()
listRank = list(map(rankCalDays, objectRead))
dfRank = pd.concat(listRank)
dfConcat = pd.concat(objectRead)
dfConcat["Rank"] = dfRank
volDf = dfConcat.reset_index().set_index(["index", "Rank"]).drop(["Date", "Forwards", "nBizDays", "nCalDays", "diff Days"], axis=1, errors="ignore").unstack()
volDf.columns = volDf.columns.set_names("Moneyness",level=0)
volDf = volDf.dropna(how="all",axis=1).astype("float64")
fwdDf = dfConcat.reset_index().set_index(["index", "Rank"])["Forwards"].unstack()
coordinatesRankDf = dfConcat.reset_index().set_index(["index", "Rank"])["nBizDays"].unstack()
def bindBizDays(rows):
bizDays = coordinatesRankDf.loc[rows.name].astype("float64")
return pd.Series(list(zip(bizDays[rows.index.get_level_values("Rank")].values / 252.0,
np.log(rows.index.get_level_values("Moneyness").astype("float64")) )),
index = rows.index)
coordinatesDf = volDf.apply(bindBizDays, axis=1)
def getFwd(rowVol):
ttmRank = rowVol.index.get_level_values("Rank")
return pd.Series(fwdDf.loc[rowVol.name, ttmRank].values, index = rowVol.index)
#Search for point in the vol dataframe the corresponding forward
fwdDf = volDf.apply(getFwd, axis=1).dropna(how="all",axis=1).astype("float64")
firstTestingDate = int(volDf.index.shape[0] * trainingSetPercentage)
trainingDates = volDf.index[:firstTestingDate]
trainVol = volDf.loc[trainingDates]
testVol = volDf.drop(trainVol.index)
trainVol = pd.DataFrame(trainVol.values, index=trainVol.index)
testVol = pd.DataFrame(testVol.values, index=testVol.index)
trainFwd = fwdDf.loc[trainVol.index]
trainFwd = pd.DataFrame(trainFwd.values, index=trainFwd.index)[trainVol.columns]
testFwd = fwdDf.drop(trainVol.index)
testFwd = pd.DataFrame(testFwd.values, index=testFwd.index)[testVol.columns]
testStrike = None
trainStrike = None
trainCoordinates = coordinatesDf.loc[trainingDates]
trainCoordinates = pd.DataFrame(trainCoordinates.values, index=trainCoordinates.index)[trainVol.columns]
testCoordinates = coordinatesDf.drop(trainVol.index)
testCoordinates = pd.DataFrame(testCoordinates.values, index=testCoordinates.index)[testVol.columns]
strikeDf = trainCoordinates.applymap(lambda x : x[1]).iloc[0]
strikeList = np.sort(strikeDf.unique())
minStrike = strikeList[minStrikeIndex]
strikesKept = strikeDf[strikeDf >= minStrike].index
maturityDf = trainCoordinates.applymap(lambda x : x[0]).iloc[0][strikesKept]
maturityList = np.sort(maturityDf.unique())
minMaturity = maturityList[minStrikeIndex]
maturityKept = maturityDf[maturityDf >= minMaturity].index
testVol = testVol[maturityKept]
trainVol = trainVol[maturityKept]
trainCoordinates = trainCoordinates[maturityKept]
testCoordinates = testCoordinates[maturityKept]
trainFwd = trainFwd[maturityKept]
testFwd = testFwd[maturityKept]
return testVol, trainVol, testFwd, trainFwd, testCoordinates, trainCoordinates, testStrike, trainStrike
def saveInterpolationResult(pathFile, paramDf, interpDf):
pathTestFileInterp = pathFile + 'Interp'
dictPickle = {}
dictPickle["InterpParam"] = paramDf
dictPickle["InterpolatedDf"] = interpDf
with open(pathTestFileInterp, "wb") as f :
pickle.dump(dictPickle, f, protocol=3)
return
def removePointsWithInvalidCoordinates(incompleteSurface, coordinates):
#Filter location with incomplete observations
def invalidCoordinates(x):
if isinstance(x, tuple):
return not any(np.isnan(x))
return not np.isnan(x)
filteredCoordinates = np.array(list(map(invalidCoordinates, coordinates)))
return incompleteSurface[filteredCoordinates], coordinates[filteredCoordinates]
def readInterpolationResult(pathFile):
pathTestFileInterp = pathFile + 'Interp'
with open(pathTestFileInterp, "rb") as f :
dictPickle = pickle.load(f)
return dictPickle["InterpParam"], dictPickle["InterpolatedDf"]
class dataSetATMPickle(datasetATM):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.nbMoneyness = 0
self.MoneynessList = []
self.nbTTM = 0
self.ttmList = []
self.minTTM = None
self.isGridStable = False
self.minStrike = 4
self.minMaturity = 0
self.logTransform = True
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
listTokeep = [1.0, 2.0, 3.0, 4.0]
self.setMaskedPoints(listTokeep)
def datasetSummary(self):
print("Number of days in dataset",
self.getDataForModel()[0].shape[0])
print("Number of days for testing", self.getTestingDataForModel()[0].shape[0])
print("Number of days for training", self.getTrainingDataForModel()[0].shape[0])
print("Working on Equity volatility level")
print("Number of points in the grid : ", self.gridSize)
print("Number of Moneyness : ", self.nbMoneyness)
print("List : ", self.MoneynessList)
print("Number of Time to maturities : ", self.nbTTM)
print("List : ", self.ttmList)
return
def loadData(self):
tmp = getATMDataFromPickle(self.pathToDataset, self.trainingSetPercentage, self.minStrike, self.minMaturity)
self.testVol = tmp[0]
self.trainVol = tmp[1]
self.testCoordinates = tmp[4]
self.trainCoordinates = tmp[5]
self.testFwd = tmp[2]
self.trainFwd = tmp[3]
self.testStrike = tmp[6]
self.trainStrike = tmp[7]
def extractSingleton(df, coordIndex):
valueList = np.unique(list(map(lambda x : x[coordIndex], np.ravel(df.values))))
return valueList[~np.isnan(valueList)]
fullCoordinatedDf = self.testCoordinates.append(self.trainCoordinates)
self.MoneynessList = extractSingleton(fullCoordinatedDf, 1)
self.ttmList = extractSingleton(fullCoordinatedDf, 0)
self.nbMoneyness = self.MoneynessList.size
self.nbTTM = self.ttmList.size
self.gridSize = self.trainVol.columns.size
return
def setMaskedPoints(self, completionPoints):
# self.maskedPoints = sampleSwaptionsToDelete(self.getTestingDataForModel(),
# completionRate)
fullObs = self.getTestingDataForModel()[0].iloc[0]
self.maskedPoints = fullObs.index.difference(completionPoints)
#Matrix where True indicates that this point is completed (i.e. hidden on the market), false otherwise
maskMatrix = pd.Series(False, index = fullObs.index)
maskMatrix.loc[self.maskedPoints] = True
self.maskSerie = maskMatrix
#self.maskMatrix = maskMatrix.unstack(level=-1)
#Return a deep copy with masked values
def maskDataset(self, completeDataset):
maskedRank = self.maskedPoints
maskedDataset = completeDataset.copy()
if completeDataset.ndim == 1 :
maskedDataset.loc[maskedRank] = np.NaN
elif completeDataset.ndim == 2 :
maskedDataset[maskedRank] = np.NaN
return maskedDataset
#When the grid is not fixed - i.e. volatilities time to maturities are sliding -
#we need to decide which instruments can be compared between two dates
def decideInvestableInstruments(self):
ttmDf = getTTMFromCoordinates(self.formatModelDataAsDataSet(self.getDataForModel()))
pairIndexHistory = []#series of pair of index
nextTTMDf = ttmDf.shift(-1).dropna(how = "all")
for serie in ttmDf.head(-1).iterrows():
currentDay = serie[1]
nextDay = nextTTMDf.loc[serie[0]]
currentRankForHedgeablePoints = currentDay[(currentDay - 1).isin(nextDay) & (~currentDay.isna())].index
nextRankForHedgeablePoints = nextDay[(nextDay).isin(currentDay - 1) & (~nextDay.isna())].index
if currentRankForHedgeablePoints.empty :#case where current or day is not considered as a business day
currentRankForHedgeablePoints = currentDay[(currentDay).isin(nextDay) & (~currentDay.isna())].index
nextRankForHedgeablePoints = nextDay[(nextDay).isin(currentDay) & (~nextDay.isna())].index
pairIndexHistory.append((currentRankForHedgeablePoints, nextRankForHedgeablePoints))
#Last day
pairIndexHistory.append((nextRankForHedgeablePoints, nextRankForHedgeablePoints))
pairIndexHistory = pd.Series(pairIndexHistory, index = ttmDf.index)
return pairIndexHistory
class datasetATMVariation(dataSetATMCSV):
def __init__(self, pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = False):
self.trainingVolVariation = None
self.testingVolVariation = None
self.yesterdayVolSerie = None
self.trainingCoordinatesVariation = None
self.testingCoordinatesVariation = None
self.trainingFwdVariation = None
self.testingFwdVariation = None
self.yesterdayFwdSerie = None
self.trainingStrikeVariation = None
self.testingStrikeVariation = None
self.yesterdayStrikeSerie = None
#No variation
super().__init__(pathToDataset,
trainingSetPercentage,
minExpiry,
completionRate,
scaleFeatures = scaleFeatures)
def addYesterdayLevel(self, variationDataset, levelDataSet):
if variationDataset.ndim == 1 :
return variationDataset + levelDataSet.loc[variationDataset.name]
elif variationDataset.ndim == 2 :
return variationDataset + levelDataSet.loc[variationDataset.index]
raise ValueError("Incorrect tensor order !")
return None
def removeYesterdayLevel(self, todayDataset, yesterdayDataSet):
if todayDataset.ndim == 1 :
return todayDataset - yesterdayDataSet.loc[todayDataset.name]
elif todayDataset.ndim == 2 :
return todayDataset - yesterdayDataSet.loc[todayDataset.index]
raise ValueError("Incorrect tensor order !")
return None
#Apply scaling and various transform to fall on model data
#Name of surface should be the date
def convertRealDataToModelFormat(self, unformattedSurface):
if (type(unformattedSurface)==type(list())) and (len(unformattedSurface)==4):
date = unformattedSurface[0].index
variation = [unformattedSurface[0] - self.yesterdayVolSerie.loc[date],
unformattedSurface[1],
unformattedSurface[2] - self.yesterdayFwdSerie.loc[unformattedSurface[2].index],
unformattedSurface[3]]
if(self.activateScaling):
lambdaTransform = lambda x : x[0] if x[1] is None else x[1].transform(x[0])
return list(map(lambdaTransform, zip(variation, self.fullScaler)))
else :
return variation
elif (type(unformattedSurface)!=type(list())) :
date = unformattedSurface.name
variation = unformattedSurface - self.yesterdayVolSerie.loc[date]
if(self.activateScaling):
return self.volScaler.transform(variation)
else :
return variation
else :
raise("Can not format as model data")
return None
#Format data returned by a model to format
#For instance variation are transformed as level with yesterday
def formatModelDataAsDataSet(self,modelData):
unScaledModelData = super().formatModelDataAsDataSet(modelData)
if (type(modelData)==type(list())) and (len(modelData)==4):
originalFormat = [self.addYesterdayLevel(unScaledModelData[0], self.yesterdayVolSerie),
unScaledModelData[1],
self.addYesterdayLevel(unScaledModelData[2], self.yesterdayFwdSerie),
unScaledModelData[3]]
elif (type(modelData)!=type(list())) :
originalFormat = self.addYesterdayLevel(unScaledModelData, self.yesterdayVolSerie)
else :
raise("Can not format as model data")
return originalFormat
def formatDataAsVariation(self, trainingDataSet, testingDataSet):
trainingVariation = trainingDataSet.diff().dropna(how='all')
testingVariation = testingDataSet.diff()
testingVariation.iloc[0] = testingDataSet.iloc[0] - trainingDataSet.iloc[-1]
#Shift date to have a serie of past values
yesterdayTraining = trainingDataSet.shift().dropna(how='all')
yesterdayTesting = testingDataSet.shift()
yesterdayTesting.iloc[0] = trainingDataSet.iloc[-1]
return trainingVariation, testingVariation, yesterdayTraining.append(yesterdayTesting)
def loadData(self):
super().loadData()
tmp1 = self.formatDataAsVariation(self.trainVol, self.testVol)
self.trainingVolVariation = tmp1[0]
self.testingVolVariation = tmp1[1]
self.yesterdayVolSerie = tmp1[2]
#Coordiantes are not formatted as variation
self.trainingCoordinatesVariation = self.trainCoordinates.loc[self.trainingVolVariation.index]
self.testingCoordinatesVariation = self.testCoordinates.loc[self.testingVolVariation.index]
tmp2 = self.formatDataAsVariation(self.trainFwd, self.testFwd)
self.trainingFwdVariation = tmp2[0]
self.testingFwdVariation = tmp2[1]
self.yesterdayFwdSerie = tmp2[2]
# tmp3 = self.formatDataAsVariation(self.trainStrike, self.testStrike)
# self.trainingStrikeVariation = tmp3[0]
# self.testingStrikeVariation = tmp3[1]
# self.yesterdayStrikeSerie = tmp3[2]
return
def scaleDataSets(self):
if(self.activateScaling):
#Define MinMax scaling for volatility
self.volScaler = customMeanStdScale() #customMinMaxScale()
self.volScaler.fit(self.trainingVolVariation)#Positive volatilities of course
self.scaledTrainVol = self.volScaler.transform(self.trainingVolVariation)
self.scaledTestVol = self.volScaler.transform(self.testingVolVariation)
#Define MinMax scaling for volatility
self.coordinatesScaler = customMeanStdScale() #customMinMaxScale()
self.coordinatesScaler.fit(self.trainCoordinates, enforceDataSetMin = 0)#Positive volatilities of course
self.scaledTrainCoordinates = self.coordinatesScaler.transform(self.trainingCoordinatesVariation)
self.scaledTestCoordinates = self.coordinatesScaler.transform(self.testingCoordinatesVariation)
#Define MinMax scaling for forward swap rates
self.fwdScaler = customMeanStdScale() #customMinMaxScale()
self.fwdScaler.fit(self.trainingFwdVariation)
self.scaledTrainFwd = self.fwdScaler.transform(self.trainingFwdVariation)
self.scaledTestFwd = self.fwdScaler.transform(self.testingFwdVariation)
else :
self.scaledTrainVol = self.trainingVolVariation
self.scaledTestVol = self.testingVolVariation
self.scaledTrainCoordinates = self.trainingCoordinatesVariation
self.scaledTestCoordinates = self.testingCoordinatesVariation
self.scaledTrainFwd = self.trainingFwdVariation
self.scaledTestFwd = self.testingFwdVariation
return
def getSkewDataFromCSV(dataSetPath, trainingSetPercentage=0.8):
formattedHistory = (extractDataFromCSV(dataSetPath)
.reorder_levels([indexName, indexExpiry, indexTenor, indexRelStrike])
.sort_index())
#Get Expiry and tenors shared by all dates
commonGridPoints = intersectionGrid(formattedHistory)
#Get indexer for multiindex
idx = pd.IndexSlice
#Filter data for Expiry, Tenors and Strike common to all dates
commonHistory = formattedHistory.loc[idx[:,commonGridPoints.get_level_values(0),
commonGridPoints.get_level_values(1),
commonGridPoints.get_level_values(2)],:]
#Feeding Data
#Take the first 80% dates as training set and the remaining ones as testing set
trainTmp,testTmp = splitTrainTestDataChronologically(commonHistory,trainingSetPercentage)
#Separate features between volatility, forward rate and Strike
testVol = splitHistory(testTmp,"Vol")
trainVol = splitHistory(trainTmp,"Vol")
trainVol = pd.DataFrame(trainVol.values, index=trainVol.index)
testVol = pd.DataFrame(testVol.values, index=testVol.index)
testFwd = splitHistory(testTmp,"forward")
trainFwd = splitHistory(trainTmp,"forward")
testStrike = splitHistory(testTmp,indexStrike)
trainStrike = splitHistory(trainTmp,indexStrike)
indexFunc = lambda x : pd.Series(x.index.values,
index = x.index)
trainCoordinates = trainVol.apply(indexFunc, axis=1)
testCoordinates = testVol.apply(indexFunc, axis=1)
trainCoordinates = pd.DataFrame(trainCoordinates.values, index=trainCoordinates.index)
testCoordinates =
|
pd.DataFrame(testCoordinates.values, index=testCoordinates.index)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# # Assignment_7
#
# Submitted By - <NAME>
# In[18]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import patches
# In[19]:
df=
|
pd.read_csv("pima-indians-diabetes.csv")
|
pandas.read_csv
|
from dataset import Dataset
from dataset_csv import Dataset_CSV
from dataset_ravdess import Ravdess
from feats_opensmile import Opensmileset
from runmanager import Runmanager
from util import Util
import glob_conf
from plots import Plots
from demo_predictor import Demo_predictor
import ast # To convert strings to objects
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from scaler import Scaler
import pickle
class Experiment:
"""Main class specifying an experiment"""
def __init__(self, config_obj):
"""
Parameters
----------
config_obj : a config parser object that sets the experiment parameters and being set as a global object.
"""
self.set_globals(config_obj)
self.name = glob_conf.config['EXP']['name']
self.util = Util()
def set_globals(self, config_obj):
"""install a config object in the global space"""
glob_conf.init_config(config_obj)
def load_datasets(self):
"""Load all databases specified in the configuration and map the labels"""
ds = ast.literal_eval(glob_conf.config['DATA']['databases'])
self.datasets = {}
for d in ds:
if d == 'ravdess':
data = Ravdess()
else:
ds_type = self.util.config_val('DATA', d+'.type', 'audformat')
if ds_type == 'audformat':
data = Dataset(d)
elif ds_type == 'csv':
data = Dataset_CSV(d)
else:
self.util.error(f'unknown data type: {ds_type}')
data.load()
self.datasets.update({d: data})
self.target = self.util.config_val('DATA', 'target', 'emotion')
def fill_train_and_tests(self):
"""Set up train and development sets. The method should be specified in the config."""
self.df_train, self.df_test =
|
pd.DataFrame()
|
pandas.DataFrame
|
from typing import Dict
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
frequencies = ["D", "15min"]
DistributionDict = Dict[str, pd.DataFrame]
@pytest.fixture(params=frequencies, ids=frequencies)
def date_range(request) -> pd.DatetimeIndex:
"""Create pd.Series with range of dates."""
freq = request.param
dtr = pd.date_range(start="2020-01-01", end="2020-03-01", freq=freq)
return dtr
@pytest.fixture
def all_date_present_df(date_range: pd.Series) -> pd.DataFrame:
"""Create pd.DataFrame that contains some target on given range of dates without gaps."""
df = pd.DataFrame({"timestamp": date_range})
df["target"] = [i for i in range(len(df))]
df.set_index("timestamp", inplace=True)
return df
@pytest.fixture
def all_date_present_df_two_segments(all_date_present_df: pd.Series) -> pd.DataFrame:
"""Create pd.DataFrame that contains two segments with some targets on given range of dates without gaps."""
df_1 = all_date_present_df.reset_index()
df_2 = all_date_present_df.copy().reset_index()
df_1["segment"] = "segment_1"
df_2["segment"] = "segment_2"
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
return df
@pytest.fixture
def df_with_missing_value_x_index(random_seed, all_date_present_df: pd.DataFrame) -> Tuple[pd.DataFrame, int]:
"""Create pd.DataFrame that contains some target on given range of dates with one gap."""
# index cannot be first or last value,
# because Imputer should know starting and ending dates
timestamps = sorted(all_date_present_df.index)[1:-1]
idx = np.random.choice(timestamps)
df = all_date_present_df
df.loc[idx, "target"] = np.NaN
return df, idx
@pytest.fixture
def df_with_missing_range_x_index(all_date_present_df: pd.DataFrame) -> Tuple[pd.DataFrame, list]:
"""Create pd.DataFrame that contains some target on given range of dates with range of gaps."""
timestamps = sorted(all_date_present_df.index)
rng = timestamps[2:7]
df = all_date_present_df
df.loc[rng, "target"] = np.NaN
return df, rng
@pytest.fixture
def df_with_missing_range_x_index_two_segments(
df_with_missing_range_x_index: pd.DataFrame,
) -> Tuple[pd.DataFrame, list]:
"""Create pd.DataFrame that contains some target on given range of dates with range of gaps."""
df_one_segment, rng = df_with_missing_range_x_index
df_1 = df_one_segment.reset_index()
df_2 = df_one_segment.copy().reset_index()
df_1["segment"] = "segment_1"
df_2["segment"] = "segment_2"
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
return df, rng
@pytest.fixture
def df_all_missing(all_date_present_df: pd.DataFrame) -> pd.DataFrame:
"""Create pd.DataFrame with all values set to nan."""
all_date_present_df.loc[:, :] = np.NaN
return all_date_present_df
@pytest.fixture
def df_all_missing_two_segments(all_date_present_df_two_segments: pd.DataFrame) -> pd.DataFrame:
"""Create pd.DataFrame with all values set to nan."""
all_date_present_df_two_segments.loc[:, :] = np.NaN
return all_date_present_df_two_segments
@pytest.fixture
def daily_exog_ts() -> Dict[str, Union[TSDataset, DistributionDict]]:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_2",
"target": [1] + 23 * [0] + [1] + 23 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="D", periods=3),
"segment": "segment_1",
"regressor_exog": 2,
}
)
df_exog2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="D", periods=3),
"segment": "segment_2",
"regressor_exog": 40,
}
)
df_exog = pd.concat([df_exog1, df_exog2], ignore_index=True)
target1 = pd.DataFrame(
{
"fold": list(range(24)),
"distribution": 1 / 24,
}
)
target2 = pd.DataFrame(
{
"fold": list(range(24)),
"distribution": [1] + 23 * [0],
}
)
ts = TSDataset(df=TSDataset.to_dataset(df), freq="H", df_exog=TSDataset.to_dataset(df_exog), known_future="all")
distribution = {"segment_1": target1, "segment_2": target2}
return {"ts": ts, "distribution": distribution}
@pytest.fixture()
def daily_exog_ts_diff_endings(daily_exog_ts):
ts = daily_exog_ts["ts"]
ts.loc[ts.index[-5] :, pd.IndexSlice["segment_1", "target"]] = np.NAN
return ts
@pytest.fixture
def inplace_resampled_daily_exog_ts() -> TSDataset:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_2",
"target": [1] + 23 * [0] + [1] + 23 * [0],
}
)
df = pd.concat([df1, df2], ignore_index=True)
df_exog1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=72),
"segment": "segment_1",
"regressor_exog": 2 / 24,
}
)
df_exog2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=72),
"segment": "segment_2",
"regressor_exog": [40] + 23 * [0] + [40] + 23 * [0] + [40] + 23 * [0],
}
)
df_exog = pd.concat([df_exog1, df_exog2], ignore_index=True)
ts = TSDataset(df=TSDataset.to_dataset(df), freq="H", df_exog=TSDataset.to_dataset(df_exog), known_future="all")
return ts
@pytest.fixture
def noninplace_resampled_daily_exog_ts() -> TSDataset:
df1 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_1",
"target": 1,
}
)
df2 = pd.DataFrame(
{
"timestamp": pd.date_range(start="2020-01-05", freq="H", periods=48),
"segment": "segment_2",
"target": [1] + 23 * [0] + [1] + 23 * [0],
}
)
df =
|
pd.concat([df1, df2], ignore_index=True)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import wisps
import numpy as np
import matplotlib.pyplot as plt
import wisps.simulations as wispsim
import pandas as pd
from tqdm import tqdm
import seaborn as sns
from matplotlib.colors import Normalize
import astropy.units as u
import wisps.simulations.effective_numbers as eff
import seaborn as sns
import matplotlib
import popsims
import itertools
#plt.style.use('dark_background')
#get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
wispsim.MAG_LIMITS
# In[3]:
import popsims
import splat
# In[4]:
sgrid=wispsim.SPGRID
pnts=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl')
corr_pols=wisps.POLYNOMIAL_RELATIONS['mag_limit_corrections']
klf=pd.read_csv('/users/caganze/research/wisps/data/kirkpatricklf.txt', delimiter=',')
klf['bin_center']=np.mean(np.array([klf.t0.values, klf.tf.values]), axis=0)
klf=klf.replace(0.0,np.nan)
ucds=pd.read_pickle(wisps.LIBRARIES+'/new_real_ucds.pkl')
#cands=cands[(cands.spt >=17) & (cands.snr1>=3)].reset_index(drop=True)
cands=(ucds[ucds.selection!='']).reset_index(drop=True)
tab=wisps.Annotator.reformat_table(cands)
pnt_names=[x.name for x in pnts]
# In[5]:
#spgrid
# In[6]:
#cmap= sns.color_palette("coolwarm", 8, as_cmap=True)
cmap=matplotlib.cm.get_cmap('coolwarm')
cnorm=Normalize(wispsim.HS[0], (wispsim.HS[-1]))
# In[7]:
kirkpatrick2020LF={'bin_center':np.flip(np.array([2025, 1875, 1725, 1575, 1425, 1275, 1125 , 975, 825, 675, 525])),
'values':np.flip(np.array([0.72, 0.50,0.78, 0.81,0.94, 1.95, 1.11, 1.72, 1.99, 2.80, 4.24])),
'unc':np.flip(([0.18, 0.17, 0.20,0.20, 0.22, 0.3, 0.25, 0.3, 0.32, 0.37, 0.70]))}
# In[8]:
MODEL_NAMES=['burrows1997', 'burrows2001', 'baraffe2003', 'saumon2008', 'marley2019', 'phillips2020']
MODEL_SHORT_NAMES=['B97', 'B01', 'B03', 'SM08', 'M19', 'P20']
# In[9]:
def bin_by_spt_bin(sp_types, number, ltonly=False):
ranges=[[17, 20], [20, 25], [25, 30], [30, 35], [35, 40]]
if ltonly:
ranges=[[17, 20], [20, 30], [30, 41]]
numbers=[]
for r in ranges:
idx= np.logical_and((r[0]<=sp_types), (r[1]>sp_types))
numbers.append(np.nansum(number[idx]))
return numbers
def get_all_numbers():
#Distribute the parameter sets evenly across the cores
func=lambda x, y: get_simulated_number_model(y, x)
paramlist=[(i, j) for i, j in itertools.product(MODEL_NAMES, wispsim.HS)]
res = [func(x, y) for x,y in tqdm(paramlist)]
nbrs = {}
for k in MODEL_NAMES:
ds0={}
for j in res:
if k in j.keys():
key=[x for x in j[k].keys()][0]
ds0.update({key: [(j[k][key])[yi] for yi in wispsim.SPGRID]})
#print (ds0)
nbrs[k]=np.array([ds0[k] for k in wispsim.HS])
return nbrs
def get_pointing(grism_id):
if grism_id.startswith('par'):
pntname=grism_id.lower().split('-')[0]
else:
pntname=grism_id.lower().split('-g141')[0]
loc=pnt_names.index(pntname)
return np.array(pnts)[loc]
def iswithin_mag_limits(mags, pnt, spt):
#mgs is a dictionary
flags=[]
for k in pnt.mag_limits.keys():
if k =='F110' and pnt.survey =='hst3d':
flags.append(True)
else:
flags.append(mags[k] <= pnt.mag_limits[k]+ (corr_pols[k+'W'][0])(spt))
return np.logical_or.reduce(flags)
def get_simulated_number_model(hidx, model):
#hidx is a scale height, model is evolutionary model
df=pd.read_hdf(wisps.OUTPUT_FILES+'/final_simulated_sample_cut_binaries.h5', key=str(model)+str(hidx)+str('spt_abs_mag'))
cutdf=(df[~df.is_cut]).rename(columns={'temperature': 'teff', 'slprob': 'sl'})
#cutdf=pd.read_hdf(wisps.OUTPUT_FILES+'/final_simulated_sample_cut.h5', key=str(model)+str('h')+str(hidx)+'F110_corrected')
#scl_dict=pd.read_pickle(wisps.OUTPUT_FILES+'/lf_scales.pkl')
#scales=scl_dict[model]
scale=[cutdf.scale.mean(), cutdf.scale_unc.mean(), cutdf.scale_times_model.mean()]
#scale=scale_lf_teff(cutdf.teff)
NSIM=dict(zip(wispsim.SPGRID,np.zeros((len(wispsim.SPGRID), 2))))
cutdf['spt_r']=cutdf.spt.apply(np.round)
for g in cutdf.groupby('spt_r'):
sn= len(cutdf.teff[np.logical_and(cutdf.teff>=450, cutdf.teff<=2100)])
n0=scale[-1]/scale[0]
#print (n0)
scln=np.array([scale[0]*n0/sn, (scale[1]*scale[-1])/(sn*scale[0])])
#scln=np.array(scale)
#assert scln[0] > scale[0]
NSIM[g[0]]=np.nansum(g[1].sl)*scln
del cutdf
return {model: {hidx:NSIM}}
# In[10]:
MODEL_NAMES
# In[ ]:
# In[11]:
#cutdf.scale
# In[12]:
def plot_one(NUMBERS, VOLUMES, filename='/oberved_numbers_one_panel.pdf'):
data_to_save={}
# In[ ]:
nall=wisps.custom_histogram(cands.spt.apply(wisps.make_spt_number), sgrid, 1)
y2=bin_by_spt_bin(wispsim.SPGRID,nobs, ltonly=False)-THICK
yall=bin_by_spt_bin(wispsim.SPGRID,nall, ltonly=False)-THICK
dy2=np.sqrt(y2)
dyall=np.sqrt(yall)
#add this to the dictionary
data_to_save['nall']=nall
data_to_save['nobs']=nobs
data_to_save['yall']=yall
data_to_save['y2']=y2
fig, a=plt.subplots(figsize=(8, 6))
#for model, a in zip(['baraffe2003', 'saumon2008', 'marley2019', 'phillips2020'], np.concatenate(ax)):
model='baraffe2003'
for idx, h in enumerate(wispsim.HS):
ns=None
ns=((NUMBERS[model])[idx])[:,0]*VOLUMES[idx]
nuncs=((NUMBERS[model])[idx])[:,1]*VOLUMES[idx]
a.plot(spgrid2, bin_by_spt_bin(wispsim.SPGRID,ns, ltonly=False),
color= cmap(cnorm(h)),
linewidth=3, drawstyle="steps-mid")
a.fill_between(spgrid2, bin_by_spt_bin(wispsim.SPGRID,ns+nuncs, ltonly=False),
bin_by_spt_bin(wispsim.SPGRID,ns-nuncs, ltonly=False), alpha=0.5,
color= cmap(cnorm(h/100)), step="mid")
a.set_yscale('log')
#a.errorbar(spgrid2,y2, yerr=dy2,fmt='o', color='#111111')
#a.errorbar(spgrid2,yall, yerr=dyall,color='#B10DC9', mfc='white', fmt='o')
a.set_xlabel('SpT',fontsize=18)
a.set_ylabel('N',fontsize=18)
a.minorticks_on()
#a.set_title('Model= SM08', fontsize=18)
a.set_title('Model= B03', fontsize=18)
#a.set_title('Model= M19', fontsize=18)
#a.set_title('Model= P20', fontsize=18)
a.errorbar(spgrid2,y2, yerr=dy2,fmt='o', label='Mag Limited')
#a.errorbar(spgrid2,yall, yerr=dyall, fmt='o', label='All Observations')
cax = fig.add_axes([.5, 0.7, .3, 0.03])
mp=matplotlib.cm.ScalarMappable(norm=cnorm, cmap=cmap)
cbar=plt.colorbar(mp, cax=cax, orientation='horizontal')
cbar.ax.set_xlabel(r'Scaleheight (H)', fontsize=18)
#cbar.ax.set_yticks([1, 3, 5, 10])
#a.legend(fontsize=14, loc='upper left')
plt.tight_layout()
plt.savefig(wisps.OUTPUT_FIGURES+filename, bbox_inches='tight')
# In[13]:
#d=pd.read_pickle(wisps.OUTPUT_FILES+'/distance_samples{}'.format(h))
# In[14]:
#expectted counts from thick disk
THICK=np.array([8.79798048, 2.30571423, 0.14145726, 0.08853498, 0.01784511])
# In[15]:
tab['pnt']=tab['grism_id'].apply(get_pointing)
tab['spt_val']=np.vstack(tab.spt.values)[:,0]
obsmgs=tab[['F140W', 'F110W', 'F160W']].rename(columns={"F110W": "F110",
"F140W": "F140",
"F160W": "F160"}).to_dict('records')
flags=[iswithin_mag_limits(x, y, z) for x, y, z in zip(obsmgs, tab.pnt.values,tab.spt.values )]
#let's see what happens if we include all objects
#flags=np.ones(len(flags)).astype(bool)
cdf_to_use=tab[flags]
nobs=wisps.custom_histogram(cdf_to_use.spt_val.apply(wisps.make_spt_number), sgrid, 1)
spgrid2=['M7-L0', 'L0-L5', 'L5-T0', 'T0-T5', 'T5-Y0']
spgrid3=['Late M', 'L', 'T']
# In[16]:
sgrid,
# In[17]:
#for k in ['F140', 'F110', 'F160']:
# tab['lim_{}'.format(k)]=tab.pnt.apply(lambda x: x.mag_limits[k])
# tab['detected_{}'.format(k)]= tab[k+'W'] < tab['lim_{}'.format(k)]
# In[18]:
flags=np.array(flags)
# In[19]:
spgrid=np.arange(17, 42)
# In[20]:
fig, ax=plt.subplots(figsize=(12, 4), ncols=3)
ax[0].errorbar(tab.spt[flags], tab.F110W[flags], xerr=tab.spt_er[flags], yerr=tab.F110W_er[flags], fmt='o', c='k')
ax[0].errorbar(tab.spt[~flags], tab.F110W[~flags], xerr=tab.spt_er[~flags], yerr=tab.F110W_er[~flags], mfc='white', fmt='o')
ax[1].errorbar(tab.spt[flags], tab.F140W[flags], xerr=tab.spt_er[flags], yerr=tab.F140W_er[flags], fmt='o', c='k')
ax[1].errorbar(tab.spt[~flags], tab.F140W[~flags], xerr=tab.spt_er[~flags], yerr=tab.F140W_er[~flags], mfc='white', fmt='o')
ax[-1].errorbar(tab.spt[flags], tab.F160W[flags], xerr=tab.spt_er[flags], yerr=tab.F160W_er[flags], fmt='o', c='k')
ax[-1].errorbar(tab.spt[~flags], tab.F160W[~flags], xerr=tab.spt_er[~flags], yerr=tab.F160W_er[~flags], mfc='white', fmt='o')
for p in pnts:
ax[0].plot(spgrid, p.mag_limits['F110']+(corr_pols['F110'+'W'][0])(spgrid), alpha=0.01, c='b')
ax[1].plot(spgrid, p.mag_limits['F140']+(corr_pols['F140'+'W'][0])(spgrid), alpha=0.01, c='b')
ax[-1].plot(spgrid, p.mag_limits['F160']+(corr_pols['F160'+'W'][0])(spgrid), alpha=0.01, c='b')
ax[0].set_xlabel('F110')
ax[1].set_xlabel('F140')
ax[-1].set_xlabel('F160')
ax[0].set_ylabel('SpT')
ax[1].set_ylabel('SpT')
ax[-1].set_ylabel('SpT')
for a in ax:
a.minorticks_on()
plt.tight_layout()
# In[21]:
#wisps.POLYNOMIALS
# In[22]:
#.MAG_LIMITS
# In[23]:
subtab=(tab[tab.spt.between(30, 35)]).reset_index(drop=True)
# In[24]:
#with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print( subtab[['F140W', 'F160W', 'lim_F140', 'lim_F160', 'detected_F140', 'detected_F160', 'grism_id',
# 'spt']])
# In[ ]:
# In[25]:
#NUMBERS=pd.read_pickle(wisps.OUTPUT_FILES+'/numbers_simulated.pkl')
NUMBERS=get_all_numbers()
# In[26]:
NUMBERS.keys()
# In[27]:
#plt.hist(np.log10(NUMBERS['baraffe2003'][0][:,1]))
# In[28]:
volumes=[]
for pnt in pnts:
vs=[]
for h in wispsim.HS:
vsx=[]
for g in wispsim.SPGRID:
vsx.append((pnt.volumes[h])[g])
vs.append(vsx)
volumes.append(vs)
volumes=np.array(volumes)
VOLUMES=(np.nansum(volumes, axis=0))*4.1*(u.arcmin**2).to(u.radian**2)
# In[29]:
MODEL_NAMES, MODEL_SHORT_NAMES
# In[30]:
def plot(NUMBERS, VOLUMES, filename='/oberved_numbers.pdf'):
# In[ ]:
nall=wisps.custom_histogram(cands.spt.apply(wisps.make_spt_number), sgrid, 1)
y2=bin_by_spt_bin(wispsim.SPGRID,nobs, ltonly=False)-THICK
yall=bin_by_spt_bin(wispsim.SPGRID,nall, ltonly=False)
dy2=np.sqrt(y2)
dyall=np.sqrt(yall)
fig, ax=plt.subplots(figsize=(14, 8), ncols=3, nrows=2, sharey=True, sharex=False)
for model, name, a in zip(MODEL_NAMES, MODEL_SHORT_NAMES, np.concatenate(ax)):
for idx, h in enumerate(wispsim.HS):
ns=None
ns=((NUMBERS[model])[idx])[:,0]*VOLUMES[idx]
nuncs=((NUMBERS[model])[idx])[:,1]*VOLUMES[idx]
a.plot(spgrid2, bin_by_spt_bin(wispsim.SPGRID,ns, ltonly=False),
color= cmap(cnorm(h)),
linewidth=3, drawstyle="steps-mid")
#a.fill_between(spgrid2, bin_by_spt_bin(wispsim.SPGRID,ns+nuncs, ltonly=False),
# bin_by_spt_bin(wispsim.SPGRID,ns-nuncs, ltonly=False), alpha=0.5,
# color= cmap(cnorm(h/100)), step="mid")
a.set_yscale('log')
a.errorbar(spgrid2,y2, yerr=dy2,fmt='o', color='#111111')
a.errorbar(spgrid2,yall, yerr=dyall,color='#B10DC9', mfc='white', fmt='o')
a.set_xlabel('SpT',fontsize=18)
a.set_ylabel('N',fontsize=18)
a.minorticks_on()
a.set_title('Model= {}'.format(name), fontsize=18)
ax[1][-2].errorbar(spgrid2,y2, yerr=dy2,fmt='o', label='Mag Limited', color='#111111')
ax[1][-2].errorbar(spgrid2,yall, yerr=dyall,color='#B10DC9', fmt='o', mfc='white', label='All Observations')
#ax[-1][-2].legend(fontsize=14, bbox_to_anchor=(1.05, 1), loc='upper left')
#fig.delaxes(np.concatenate(ax)[-1])
ax[1][-2].legend( fontsize=14, loc='upper right')
cax = fig.add_axes([1.01, 0.25, .015, 0.5])
mp=matplotlib.cm.ScalarMappable(norm=cnorm, cmap=cmap)
cbar=plt.colorbar(mp, cax=cax, orientation='vertical')
cbar.ax.set_ylabel(r'Scaleheight (H, pc)', fontsize=18)
#cbar.ax.set_yticks([1, 3, 5, 10])
#np.concatenate(ax)[-2].legend(loc='center left', bbox_to_anchor=(1, 1.5), fontsize=14)
plt.tight_layout()
plt.savefig(wisps.OUTPUT_FIGURES+filename, bbox_inches='tight')
# In[31]:
plot(NUMBERS, VOLUMES, filename='/obs_numbers_plus_binaries.pdf')
# In[32]:
#save into pickle file
#NUMBERS
counts_numbers={'volumes': VOLUMES, 'densities': NUMBERS, 'scaleheights': wispsim.HS, 'nobs': nobs}
import pickle
with open(wisps.OUTPUT_FILES+'/expected_numbers_wisps_plus_binaries.pkl', 'wb') as file:
pickle.dump(counts_numbers,file)
# In[33]:
nall=wisps.custom_histogram(cands.spt.apply(wisps.make_spt_number), sgrid, 1)
y2=bin_by_spt_bin(wispsim.SPGRID,nobs, ltonly=False)-THICK
# In[34]:
def asymetric_errors(vals):
if len(vals)<1:
return [np.nan, np.nan]
else:
med= np.nanmedian(vals)
up= np.nanpercentile(vals, 86)
dn= np.nanpercentile(vals, 14)
return np.array([med-dn, up-med])
# In[35]:
np.nanpercentile(wispsim.HS, 10)
# In[ ]:
# In[36]:
#just for L dwarfs and T dwarfs
y3=bin_by_spt_bin(wispsim.SPGRID,nall, ltonly=False)-THICK
y4=bin_by_spt_bin(wispsim.SPGRID,nobs, ltonly=True)#-THICK
y5= np.nansum(y4)
print ('all ----- {}'.format(y3))
print ('used ----- {}'.format(y2))
print ('MLT ----{}'.format(y4))
print ('All ----{}'.format(y5))
# In[37]:
#PRINT THE BEST FIT NUMBER
#best_fit={}
numbers_fit={} #predictions for all
numbers_fit_lt={} #predictions for M, L, T
#numbers_fit_total={} #predictions for total number counts
for model in MODEL_NAMES:
model_number_lt={}
model_number={}
for idx, h in enumerate(wispsim.HS):
ns=None
ns=((NUMBERS[model])[idx])[:,0]*VOLUMES[idx]
nuncs=((NUMBERS[model])[idx])[:,1]*VOLUMES[idx]
binned=np.array(bin_by_spt_bin(wispsim.SPGRID,ns, ltonly=False))
binned_lt= np.array(bin_by_spt_bin(wispsim.SPGRID,ns, ltonly=True))
#binned_unc=np.array(bin_by_spt_bin(wispsim.SPGRID,nuncs, ltonly=False))
#add L and
#compute chi-squared
#print (ns)
#chisq= abs((y2-binned)**2/(y2))
#model_fit.update({h: chisq})
#binned_total=np.append(binned, binned_lt)
#binned_total=np.append(binned, binned_lt)
model_number.update({h: binned})
model_number_lt.update({h: binned_lt})
# best_fit.update({model: model_fit})
numbers_fit.update({model: model_number})
numbers_fit_lt.update({model: model_number_lt})
# In[38]:
#chisq_dicts=pd.DataFrame.from_records(best_fit)
pred_number_dicts=pd.DataFrame.from_records(numbers_fit)
pred_number_lt_dicts=
|
pd.DataFrame.from_records(numbers_fit_lt)
|
pandas.DataFrame.from_records
|
import pandas as pd
import numpy as np
from KLS_Data import name_set_list, blamed_list, karachi_ls_df, kls_df
def name_set_length_lister(name_set_list):
'''Returns a list of the lengths of a passed iterable.'''
name_set_length_list = [len(name_set) for name_set in name_set_list]
return name_set_length_list
kls_df.T.info()
blamed_percent = pd.DataFrame([kls_df.loc[blamed].value_counts(normalize = True) for blamed in kls_df.T])
blamed_percent.plot.bar()
# Find the length of the name_set_list
name_set_lengths = name_set_length_lister(name_set_list)
# Create a dictionary of the blamed entities and the number of times each entity is blamed
blamed_dict = dict(zip(blamed_list, name_set_lengths))
print(blamed_dict)
# Create a pandas Series out of the dictionary
times_blamed =
|
pd.Series(blamed_dict)
|
pandas.Series
|
import argparse
import json
import os
import pandas as pd
import requests
def get_parser():
parser = argparse.ArgumentParser(description=__doc__)
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('-i', "--infile", action='store',
help="""Path to .txt file containing accessions of experiments to process. The txt file must contain two columns with 1 header row, one labeled 'accession' and another labeled 'align_only'. It can optionally include 'custom_message' and 'custom_crop_length' columns.""")
parser.add_argument('-o', '--outputpath', action='store', default='',
help="""Optional path to output folder. Defaults to current path.""")
parser.add_argument('-g', '--gcpath', action='store', default='',
help="""Optional path where the input.json will be uploaded to the Google Cloud instance. Only affects the list of caper commands that is generated.""")
parser.add_argument('--wdl', action='store', default=False,
help="""Path to .wdl file.""")
parser.add_argument('-s', '--server', action='store', default='https://www.encodeproject.org',
help="""Optional specification of server using the full URL. Defaults to production server.""")
parser.add_argument('--use-s3-uris', action='store_true', default=False,
help="""Optional flag to use s3_uri links. Otherwise, defaults to using @@download links from the ENCODE portal.""")
input_group.add_argument("--accessions", action='store',
help="""List of accessions separated by commas.""")
parser.add_argument('--align-only', action='store', default=False,
help="""Pipeline will end after alignments step if True.""")
parser.add_argument('--custom-message', action='store',
help="""An additional custom string to be appended to the messages in the caper submit commands.""")
parser.add_argument('--caper-commands-file-message', action='store', default='',
help="""An additional custom string to be appended to the file name of the caper submit commands.""")
parser.add_argument('--custom-crop-length', action='store', default='',
help="""Custom value for the crop length.""")
parser.add_argument('--multiple-controls', action='store', default='',
help="""Pipeline will assume multiple controls should be used.""")
parser.add_argument('--force-se', action='store', default='',
help="""Pipeline will map as single-ended regardless of input fastqs.""")
parser.add_argument('--redacted', action='store', default='',
help="""Control experiment has redacted alignments.""")
return parser
def check_path_trailing_slash(path):
if path.endswith('/'):
return path.rstrip('/')
else:
return path
def build_experiment_report_query(experiment_list, server):
joined_list = '&accession='.join(experiment_list)
return server + '/report/?type=Experiment' + \
f'&accession={joined_list}' + \
'&field=@id' + \
'&field=accession' + \
'&field=assay_title' + \
'&field=control_type' + \
'&field=possible_controls' + \
'&field=replicates.antibody.targets' + \
'&field=files.s3_uri' + \
'&field=files.href' + \
'&field=replicates.library.biosample.organism.scientific_name' + \
'&limit=all' + \
'&format=json'
def build_file_report_query(experiment_list, server, file_format):
joined_list = '&dataset='.join(experiment_list)
if file_format == 'fastq':
format_parameter = '&file_format=fastq'
award_parameter = ''
output_type_parameter = '&output_type=reads'
elif file_format == 'bam':
format_parameter = '&file_format=bam'
award_parameter = '&award.rfa=ENCODE4'
output_type_parameter = '&output_type=alignments&output_type=redacted alignments'
return server + '/report/?type=File' + \
f'&dataset={joined_list}' + \
'&status=released' + \
'&status=in+progress' + \
award_parameter + \
'&assembly!=hg19' + \
'&assembly!=mm9' + \
format_parameter + \
output_type_parameter + \
'&field=@id' + \
'&field=dataset' + \
'&field=file_format' + \
'&field=biological_replicates' + \
'&field=paired_end' + \
'&field=paired_with' + \
'&field=run_type' + \
'&field=mapped_run_type' + \
'&field=read_length' + \
'&field=cropped_read_length' + \
'&field=cropped_read_length_tolerance' + \
'&field=status' + \
'&field=s3_uri' + \
'&field=href' + \
'&field=replicate.status' + \
'&limit=all' + \
'&format=json'
def parse_infile(infile):
try:
infile_df = pd.read_csv(infile, '\t')
infile_df['align_only'].astype('bool')
infile_df['multiple_controls'].astype('bool')
infile_df['force_se'].astype('bool')
return infile_df
except FileNotFoundError as e:
print(e)
exit()
except KeyError:
print('Missing required align_only column in input file.')
exit()
def strs2bool(strings):
out = []
for string in strings:
if string == "True":
out.append(True)
elif string == "False":
out.append(False)
return out
def get_data_from_portal(infile_df, server, keypair, link_prefix, link_src):
# Retrieve experiment report view json with necessary fields and store as DataFrame.
experiment_input_df = pd.DataFrame()
experiment_accessions = infile_df['accession'].tolist()
# Chunk the list to avoid sending queries longer than the character limit
chunked_experiment_accessions = [experiment_accessions[x:x+100] for x in range(0, len(experiment_accessions), 100)]
for chunk in chunked_experiment_accessions:
experiment_report = requests.get(
build_experiment_report_query(chunk, server),
auth=keypair,
headers={'content-type': 'application/json'})
experiment_report_json = json.loads(experiment_report.text)
experiment_df_temp = pd.json_normalize(experiment_report_json['@graph'])
experiment_input_df = experiment_input_df.append(experiment_df_temp, ignore_index=True, sort=True)
experiment_input_df.sort_values(by=['accession'], inplace=True)
# Fill in columns that may be missing
if 'control_type' not in experiment_input_df:
experiment_input_df['control_type'] = None
# Retrieve list of wildtype controls
wildtype_ctl_query_res = requests.get(
link_prefix+'/search/?type=Experiment&assay_title=Control+ChIP-seq&replicates.library.biosample.applied_modifications%21=%2A&limit=all',
auth=keypair,
headers={'content-type': 'application/json'})
wildtype_ctl_ids = [ctl['@id'] for ctl in json.loads(wildtype_ctl_query_res.text)['@graph']]
# Gather list of controls from the list of experiments to query for their files.
datasets_to_retrieve = experiment_input_df.get('@id').tolist()
for ctl in experiment_input_df.get('possible_controls'):
for item in ctl:
datasets_to_retrieve.append(item['@id'])
# Retrieve file report view json with necessary fields and store as DataFrame.
file_input_df = pd.DataFrame()
chunked_dataset_accessions = [datasets_to_retrieve[x:x+100] for x in range(0, len(datasets_to_retrieve), 100)]
for chunk in chunked_dataset_accessions:
for file_format in ['fastq', 'bam']:
file_report = requests.get(
build_file_report_query(chunk, server, file_format),
auth=keypair,
headers={'content-type': 'application/json'})
file_report_json = json.loads(file_report.text)
file_df_temp = pd.json_normalize(file_report_json['@graph'])
file_input_df = file_input_df.append(file_df_temp, ignore_index=True, sort=True)
file_input_df.set_index(link_src, inplace=True)
file_df_required_fields = ['paired_end', 'paired_with', 'mapped_run_type']
for field in file_df_required_fields:
if field not in file_input_df:
file_input_df[field] = None
file_input_df['biorep_scalar'] = [x[0] for x in file_input_df['biological_replicates']]
return experiment_input_df, wildtype_ctl_ids, file_input_df
# Simple function to count the number of replicates per input.json
def count_reps(row):
x = 0
for value in row:
if None in value or value == []:
continue
else:
x = x+1
return x
def main():
keypair = (os.environ.get('DCC_API_KEY'), os.environ.get('DCC_SECRET_KEY'))
parser = get_parser()
args = parser.parse_args()
allowed_statuses = ['released', 'in progress']
output_path = check_path_trailing_slash(args.outputpath)
wdl_path = args.wdl
gc_path = args.gcpath
caper_commands_file_message = args.caper_commands_file_message
server = check_path_trailing_slash(args.server)
use_s3 = args.use_s3_uris
if use_s3:
link_prefix = ''
link_src = 's3_uri'
else:
link_prefix = server
link_src = 'href'
if args.infile:
infile_df = parse_infile(args.infile)
infile_df.sort_values(by=['accession'], inplace=True)
infile_df.drop_duplicates(subset=['accession'],inplace=True)
elif args.accessions:
accession_list = args.accessions.split(',')
align_only = strs2bool(args.align_only.split(','))
message = args.custom_message.split(',')
custom_crop_length = args.custom_crop_length.split(',')
multiple_controls = strs2bool(args.multiple_controls.split(','))
force_se = strs2bool(args.force_se.split(','))
redacted = strs2bool(args.redacted.split(','))
infile_df = pd.DataFrame({
'accession': accession_list,
'align_only': align_only,
'custom_message': message,
'crop_length': custom_crop_length,
'multiple_controls': multiple_controls,
'force_se': force_se,
'redacted': redacted
})
infile_df.sort_values(by=['accession'], inplace=True)
use_custom_crop_length_flag = False
if 'custom_crop_length' in infile_df:
use_custom_crop_length_flag = True
custom_crop_lengths = infile_df['custom_crop_length'].tolist()
else:
custom_crop_lengths = [None] * len(infile_df['accession'])
force_se_flag = False
if 'force_se' in infile_df:
force_se_flag = True
force_ses = infile_df['force_se'].tolist()
else:
force_ses = False * len(infile_df['accession'])
if 'redacted' in infile_df:
redacted_flags = [x if x is True else None for x in infile_df['redacted'].tolist()]
else:
redacted_flags = [None] * len(infile_df['accession'])
if 'multiple_controls' in infile_df:
multiple_controls = infile_df['multiple_controls'].tolist()
else:
multiple_controls = False * len(infile_df['accession'])
# Arrays to store lists of potential errors.
ERROR_no_fastqs = []
ERROR_missing_fastq_pairs = []
ERROR_control_error_detected = []
ERROR_not_matching_endedness = []
# Fetch data from the ENCODE portal
experiment_input_df, wildtype_ctl_ids, file_input_df = get_data_from_portal(infile_df, server, keypair, link_prefix, link_src)
# Create output_df to store all data for the final input.json files.
output_df = pd.DataFrame()
output_df['chip.title'] = infile_df['accession']
output_df['chip.align_only'] = infile_df['align_only']
if 'custom_message' in infile_df:
output_df['custom_message'] = infile_df['custom_message']
output_df['custom_message'].fillna('', inplace=True)
else:
output_df['custom_message'] = ''
output_df.set_index('chip.title', inplace=True, drop=False)
output_df['assay_title'] = experiment_input_df['assay_title'].to_list()
'''
Experiment sorting section
'''
# Assign blacklist(s) and genome reference file.
blacklist = []
blacklist2 = []
genome_tsv = []
chrom_sizes = []
ref_fa = []
bowtie2 = []
# Only (human) Mint-ChIP-seq should have bwa_idx_tar value.
bwa_index = []
for assay, replicates in zip(experiment_input_df.get('assay_title'), experiment_input_df.get('replicates')):
organism = set()
for rep in replicates:
organism.add(rep['library']['biosample']['organism']['scientific_name'])
if ''.join(organism) == 'Homo sapiens':
genome_tsv.append('https://storage.googleapis.com/encode-pipeline-genome-data/genome_tsv/v3/hg38.tsv')
chrom_sizes.append('https://www.encodeproject.org/files/GRCh38_EBV.chrom.sizes/@@download/GRCh38_EBV.chrom.sizes.tsv')
ref_fa.append('https://www.encodeproject.org/files/GRCh38_no_alt_analysis_set_GCA_000001405.15/@@download/GRCh38_no_alt_analysis_set_GCA_000001405.15.fasta.gz')
if assay in ['Mint-ChIP-seq', 'Control Mint-ChIP-seq']:
blacklist.append('https://www.encodeproject.org/files/ENCFF356LFX/@@download/ENCFF356LFX.bed.gz')
blacklist2.append('https://www.encodeproject.org/files/ENCFF023CZC/@@download/ENCFF023CZC.bed.gz')
bowtie2.append(None)
bwa_index.append('https://www.encodeproject.org/files/ENCFF643CGH/@@download/ENCFF643CGH.tar.gz')
elif assay in ['Histone ChIP-seq', 'TF ChIP-seq', 'Control ChIP-seq']:
blacklist.append('https://www.encodeproject.org/files/ENCFF356LFX/@@download/ENCFF356LFX.bed.gz')
blacklist2.append(None)
bowtie2.append('https://www.encodeproject.org/files/ENCFF110MCL/@@download/ENCFF110MCL.tar.gz')
bwa_index.append(None)
elif ''.join(organism) == 'Mus musculus':
genome_tsv.append('https://storage.googleapis.com/encode-pipeline-genome-data/genome_tsv/v3/mm10.tsv')
chrom_sizes.append('https://www.encodeproject.org/files/mm10_no_alt.chrom.sizes/@@download/mm10_no_alt.chrom.sizes.tsv')
ref_fa.append('https://www.encodeproject.org/files/mm10_no_alt_analysis_set_ENCODE/@@download/mm10_no_alt_analysis_set_ENCODE.fasta.gz')
if assay in ['Mint-ChIP-seq', 'Control Mint-ChIP-seq']:
blacklist.append(None)
blacklist2.append(None)
bowtie2.append(None)
bwa_index.append(None)
elif assay in ['Histone ChIP-seq', 'TF ChIP-seq', 'Control ChIP-seq']:
blacklist.append('https://www.encodeproject.org/files/ENCFF547MET/@@download/ENCFF547MET.bed.gz')
blacklist2.append(None)
bowtie2.append('https://www.encodeproject.org/files/ENCFF309GLL/@@download/ENCFF309GLL.tar.gz')
bwa_index.append(None)
output_df['chip.blacklist'] = blacklist
output_df['chip.blacklist2'] = blacklist2
output_df['chip.genome_tsv'] = genome_tsv
output_df['chip.chrsz'] = chrom_sizes
output_df['chip.ref_fa'] = ref_fa
output_df['chip.bowtie2_idx_tar'] = bowtie2
output_df['chip.bwa_idx_tar'] = bwa_index
# Determine pipeline types and bwa related properties for Mint
pipeline_types = []
aligners = []
use_bwa_mem_for_pes = []
bwa_mem_read_len_limits = []
for assay, ctl_type in zip(experiment_input_df.get('assay_title'), experiment_input_df.get('control_type')):
if
|
pd.notna(ctl_type)
|
pandas.notna
|
import pandas as pd
from prep import get_ratio
from datetime import datetime, timedelta
stime, etime = 'formatted_start_time', 'formatted_end_time'
dformat = '%Y-%m-%d %H:%M:%S'
def get_between_with_duration(df0, target, merge_way, s, e, duration=None):
df = df0.copy()
df[stime] = pd.to_datetime(df[stime])
df[etime] = pd.to_datetime(df[etime])
df_avg = None
if duration is None:
duration = 'manual_duration'
df[duration] = (df[etime] - df[stime]).apply(lambda x: x.total_seconds())
df = df[['timestamps', stime, etime, 'enSN', target, duration]]
df_avg = pd.DataFrame(columns=df.columns)
df1 = df[(df[etime] >= s) & (df[stime] < e)].reset_index(drop=True).copy()
if len(df1) == 0:
df1 = pd.DataFrame({'timestamps': df0['timestamps'].iat[0], 'enSN': df0['enSN'].iat[0],
target: 0.0, duration: 0.0}, index=[0])
else:
ratios = df1.apply(lambda r: get_ratio(r[stime], r[etime], s, e), axis=1)
df1[duration] *= ratios
if merge_way == 'mean':
df1[target] = df1[target] * df1[duration]
df1 = df1.groupby(['timestamps', 'enSN'])[target, duration].sum().reset_index()
df1[target] = df1[target] / df1[duration]
if merge_way == 'sum':
df1[target] *= ratios
df1 = df1.groupby(['timestamps', 'enSN'])[target, duration].sum().reset_index()
df1[stime], df1[etime]= s, e
df_avg = df_avg.append(df1, ignore_index=True)
df_avg[target] = pd.to_numeric(df_avg[target])
#df_avg.drop(columns=[duration], inplace=True)
df_avg[duration] = pd.to_numeric(df_avg[duration])
#df_avg[stime] = df_avg[stime].apply(lambda s: datetime.strftime(s, dformat))
#df_avg[etime] = df_avg[etime].apply(lambda s: datetime.strftime(s, dformat))
return df_avg[target].iat[0], df_avg[duration].iat[0]
def get_between(df0, target, merge_way, s, e, duration=None):
targ, _ = get_between_with_duration(df0, target, merge_way, s, e, duration)
return targ
def get_point(df0, target, t, default_value=0):
df = df0.copy()
df[stime] = pd.to_datetime(df[stime])
df[etime] = pd.to_datetime(df[etime])
df1 = df[(df[etime] >= t) & (df[stime] <= t)]
if len(df1) == 0:
return default_value
else:
return df1[target].iat[0]
def get_mode(df, target, t, default_value=1):
d = {1: [1, 0, 0, 0], 2: [0, 1, 0, 0], 4: [0, 0, 1, 0], 5: [0, 0, 0, 1]}
if int(get_point(df, target, t, default_value)) == 0:
print(target, t)
return d[int(get_point(df, target, t, default_value))]
def get_bright_session_num(df, t, duration=10):
t_before = t - timedelta(minutes=duration)
return len(df[(df['last_bright_start_time'] <= t) & (df['last_bright_start_time'] >= t_before)])
def get_app_num(df_disp, t, duration=10):
df_disp = df_disp.copy()
t_before = t - timedelta(minutes=duration)
df_disp[stime] =
|
pd.to_datetime(df_disp[stime])
|
pandas.to_datetime
|
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200,
|
pd.Timestamp('2015-01-12')
|
pandas.Timestamp
|
import argparse
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from grid_resolution import grid_resolution
from postpro_tools import postpro_tools
import matplotlib
matplotlib.rcParams["mathtext.fontset"] = "stix"
matplotlib.rcParams["font.family"] = "STIXGeneral"
matplotlib.rcParams.update({"font.size": 14})
sns.set_style("darkgrid")
model_vars = ["PSG0", "PSG1", "TG0", "TG1", "TRG0", "TRG1", "UG0", "UG1", "VG0", "VG1"]
var_codes = {
"TG0": "T_0",
"UG0": "u_0",
"VG0": "v_0",
"TRG0": "Hq_0",
"TG1": "T_1",
"UG1": "u_1",
"VG1": "v_1",
"TRG1": "Hq_1",
"PSG0": "PS_0",
"PSG1": "PS_1",
}
pslvl = [30, 100, 200, 300, 500, 700, 850, 925]
def single_error_plotter(analysis, background, var, lvl, ppt, plots_path):
lvl_str = str(lvl)
zero = pd.Series(background[lvl_str].values[0])
data_analysis_by_level = np.log(zero.append(analysis[lvl_str]))
data_backgroudn_by_level = np.log(zero.append(background[lvl_str]))
fr = np.log(ppt.noda[var][lvl, :])
plt.figure(figsize=(9, 4))
plt.title(f"$\mathrm{{{var_codes[var]}}} \ at \ {{{pslvl[lvl]}}}mb}}$")
plt.plot(data_analysis_by_level, color="r", label="Analysis")
plt.plot(data_backgroudn_by_level, color="b", label="Background")
plt.plot(fr, color="k", label="NODA")
plt.legend()
plt.ylabel(r"$log(\mathcal{l}_2)$")
plt.xlabel(r"$\mathrm{Assimilation\;Step}$")
plt.legend(loc="best", prop={"size": 14})
plt.tight_layout()
plt.autoscale()
plt.savefig(plots_path / f"single_error_{var}_{lvl}.png", bbox_inches="tight")
plt.close()
def main_general_plotter(df_params):
# input_file = sys.argv[1]
root_path = Path.cwd()
exp_pth = root_path.parents[0] / "runs"
for _, row in df_params.iterrows():
method_path = exp_pth / row["exp_path"]
variables = row["variable"]
levels = row["level"]
grid_res = row["resolution"]
if np.isnan(variables):
variables = model_vars
else:
variables = variables.strip().split(",")
if np.isnan(levels):
levels = range(8)
else:
levels = levels.strip().split(",")
levels = [int(v) for v in levels]
plots_path = method_path / "plots" / "errors"
gs = grid_resolution(grid_res)
ppt = postpro_tools(grid_res, gs, method_path, 30)
ppt.compute_NODA()
Path(plots_path).mkdir(parents=True, exist_ok=True)
for var in variables:
analysis = pd.read_csv(method_path / "results" / f"{var}_ana.csv")
bckg = pd.read_csv(method_path / "results" / f"{var}_bck.csv")
for lvl in levels:
if ("PSG" in var) and lvl > 0:
break
if ("TRG" in var) and lvl < 2:
continue
single_error_plotter(analysis, bckg, var, lvl, ppt, plots_path)
print(f"* ENDJ - Plot {var} {lvl} Finished")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Creates a heatmap for a defined set of parameters "
+ "using the specified configurations.\n"
+ "Remember the config file must be a CSV containing the headers:\n"
+ "setting,method,infla,mask,variable,level\n"
+ "If no level or variable is set, default values will be used",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"file", help="The name of the CSV file containing the configuration"
)
args = parser.parse_args()
input_file = args.file
print("* STARTJ - Reading input file {0}".format(input_file))
df_params =
|
pd.read_csv(input_file)
|
pandas.read_csv
|
import numpy as np
import datetime
import glob2
import xarray as xr
import pandas as pd
import re
#plt.close("all")
pd.options.display.max_columns = None
pd.options.display.max_rows = None
import plotly.io as pio
import plotly.express as px
pio.renderers.default='browser'
dircInput1 = 'C:/Users/Chenxi/OneDrive/phd/age_and_fire/data/01_raw/03_HALO-DB_mission_116/'
dircInput2 = 'C:/Users/Chenxi/OneDrive/phd/age_and_fire/data/03_cleaned/03_HALO-DB_mission_116_cleaned/'
#file names
fn_amica_database = glob2.glob(dircInput1 + 'AMICA/' + '*.ames')
fn_amica_wiki = glob2.glob(dircInput1 + 'AMICA/' + '*.dat')
fn_bahamas = glob2.glob(dircInput1 + 'BAHAMAS/' + '*.nc')
fn_fairo = glob2.glob(dircInput1 + 'FAIRO/' + '*FAIRO_O3*.ames')
fn_fairoCI = glob2.glob(dircInput1 + 'FAIRO/' + '*FAIROCI_O3*.ames')
fn_fish = glob2.glob(dircInput1 + 'FISH/' + '*FISH_H2O.ames')
fn_umaq = glob2.glob(dircInput1 + 'UMAQ/' + '*.ames')
fn_aeneas = glob2.glob(dircInput1 + 'AENEAS/' + '*.ames')
fn_hagar_li = glob2.glob(dircInput1+'*HAGARV_LI_prelim.ames')
fn_hagar_ecd = glob2.glob(dircInput1+'*HAGARV_ECD_preliminary.ames')
fn_ghost = glob2.glob(dircInput1+'*GhOST_MS_preliminary.ames')
fn_Met_V2 = glob2.glob(dircInput1 + 'CLAMS_Met/' + '*CLAMS_Met_V2.nc')
fn_agespec_HN2 = glob2.glob(dircInput1+'*CLAMS_agespec_HN2.nc')
# fn_backtraj_nr_ST1_cfc_clim = glob2.glob(dircInput1+'clams_at_halo/'+'*backtraj_nr_ST1_cfc_clim.nc')
# fn_backtraj_nr_ST1_clim = glob2.glob(dircInput1+'clams_at_halo/'+'*backtraj_nr_ST1_clim.nc')
fn_sfctracer_F02 = glob2.glob(dircInput1+'*CLAMS_sfctracer_F02.nc')
fn_chem_V1 = glob2.glob(dircInput1+'*CLAMS_chem_V1.nc')
#fn_gloria_kit = glob2.glob(dircInput1+'*GLORIA_chemistry_mode_KIT.nc')
#fn_gloria_fzj = glob2.glob(dircInput1+'*GLORIAFZJ_L1V0002preL2V00pre.nc')
############################in-situ measurements###############################
def clean_bahamas(res, fn = fn_bahamas):
frame = []
for filename in fn:
df = xr.open_dataset(filename).to_dataframe()
df.rename(columns = {'TIME': "time",
'IRS_ALT': 'ALT',
'IRS_LAT': 'LAT',
'IRS_LON': 'LON',
}, inplace = True)
df.set_index('time', inplace = True)
df = df.resample(f'{res}S').mean()
df['flight'] = df.index[0].strftime('%Y-%m-%d')
frame.append(df)
output = pd.concat(frame)
output.sort_index(inplace = True)
return output[['ALT','LAT','LON','THETA','flight']]
def clean_amica(res, fn = fn_amica_wiki):
frame = []
for filename in fn:
df = pd.read_csv(
filename,
delimiter = ',',
skiprows = [1],
header = [0],
parse_dates = [0],
infer_datetime_format = True,
#names=['time','AMICA:OCS','AMICA:CO','AMICA:H2O']
)
df.set_index('time', inplace = True)
df.sort_index(inplace = True)
new_names = {
'CO': 'AMICA_CO',
'H2O': 'AMICA_H2O',
}
df.rename(columns = new_names, inplace = True)
frame.append(df)
output = pd.concat(frame)
output.sort_index(inplace = True)
output.rename(columns = {'O3': 'AMICA_O3',}, inplace = True)
return output#[list(new_names.values())+ ['OCS', 'AMICA_O3']]
def clean_fairo(res, fn = fn_fairo):
frame = []
for filename in fn:
date = pd.to_datetime(re.findall("(\d+)", filename)[-3])
df = pd.read_csv(
filename,
delimiter='\t',
skiprows=27,
header=0
)
df['time'] = df['UTC_seconds'].apply(lambda x: datetime.timedelta(seconds = x) + date)
df.drop(columns = ['UTC_seconds'], inplace = True)
df.set_index('time', inplace = True)
df.replace(-9999, np.nan, inplace=True)
df = df.resample(f'{res}S').mean()
df.rename(columns = {'Ozone[ppb]': 'FAIRO_O3'}, inplace = True)
frame.append(df)
output = pd.concat(frame)
output.sort_index(inplace = True)
return output
def clean_fish(res, fn = fn_fish):
frame=[]
for filename in fn:
date=pd.to_datetime(re.findall("(\d+)", filename)[-2])
df = pd.read_csv(
filename,
skiprows=19,
delimiter=' ',
names=['UTC_seconds','H2O_tot','H2O_tot_err']
)
df['time']=df['UTC_seconds'].apply(lambda x: datetime.timedelta(seconds=x)+date )
#,parse_dates=[0])
df.set_index('time', inplace = True)
df = df.resample(f'{res}S').mean()
df.rename(columns = {'H2O_tot': 'FISH_H2O'}, inplace = True)
frame.append(df)
output = pd.concat(frame)
output.sort_index(inplace = True)
return output[['FISH_H2O']]
def clean_umaq(res, fn = fn_umaq):
frame=[]
for filename in fn:
date=pd.to_datetime(re.findall("(\d+)", filename)[-5])
df = pd.read_csv(
filename,
skiprows=36,
delimiter=' ',
header=0,
)
df['time']=df['UTC_seconds'].apply(lambda x: datetime.timedelta(seconds=x)+date )
#,parse_dates=[0])
df.drop(columns = ['UTC_seconds'], inplace = True)
df.set_index('time', inplace = True)
df.replace(99999.00, np.nan, inplace = True)
df = df.resample(f'{res}S').mean()
df.rename(columns = {'UMAQS_CH4_ppbv': 'UMAQS_CH4',
'UMAQS_N2O_ppbv': 'UMAQS_N2O',
'UMAQS_CO2_ppmv': 'UMAQS_CO2',
'UMAQS_CO_ppbv': 'UMAQS_CO',
}, inplace = True)
frame.append(df)
output = pd.concat(frame)
output.sort_index(inplace = True)
return output
def clean_aeneas(res, fn = fn_aeneas):
frame=[]
for filename in fn:
date=pd.to_datetime(''.join(re.findall("(\d+)", filename)[-3:-1]))
df = pd.read_csv(
filename,
skiprows=20,
delimiter=' ',
names=['UTC_seconds', 'AENEAS_NO', 'AENEAS_NOy'],
)
df['time']=df['UTC_seconds'].apply(lambda x: datetime.timedelta(seconds=x)+date )
#,parse_dates=[0])
df.drop(columns = ['UTC_seconds'], inplace = True)
df.set_index('time', inplace = True)
df.replace(-9999, np.nan, inplace = True)
df = df.resample(f'{res}S').mean()
frame.append(df)
output = pd.concat(frame)
output.sort_index(inplace = True)
return output
#####################################CLaMS#####################################
def clean_Met_V2(res, fn = fn_Met_V2):
frame=[]
for filename in fn:
df = xr.open_dataset(filename).to_dataframe()
df = df.resample(f'{res}S').mean()
# df=df.rename(columns={"TIME": "time"})
# df=df.set_index('time')
frame.append(df)
output =
|
pd.concat(frame)
|
pandas.concat
|
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
np.warnings.filterwarnings('ignore')
titanic = pd.read_csv('data/titanic/train.csv')
titanic['Embarked'] = titanic['Embarked'].fillna(value='S')
# Overall exploration before cleanup
print('\n\n********************************** Dataset BEFORE Cleanup ***************************************')
print(titanic.head(4).to_string())
# Encoding categorical variables using dummy variables
print('\n\n********************************** Dummy Encoded ***************************************')
encoded_sex = pd.get_dummies(titanic['Sex'], drop_first=True)
encoded_embarked =
|
pd.get_dummies(titanic['Embarked'], drop_first=True)
|
pandas.get_dummies
|
import os
import csv
import pandas as pd
dataset = "./data-csv/"
keys = ['Дата измерения', 'Температура', 'Влажность', 'СО2', 'ЛОС',
'Пыль pm 1.0', 'Пыль pm 2.5', 'Пыль pm 10', 'Давление', 'AQI', 'Формальдегид']
if not os.path.exists(dataset):
os.system("./download_data.sh")
files = os.listdir(dataset)
files.sort()
dataframes = []
for file in files:
full_path = os.path.join(dataset, file)
name, tail = file.split("_")
sensor_n, tail = tail.split(".")
name = name.strip()
sensor_n = int(sensor_n)
print(name, "|", sensor_n)
df = pd.read_csv(full_path)
dataframes.append({"name": name, "number": sensor_n, "df": df})
# with open(full_path) as csvfile:
# reader = csv.DictReader(csvfile)
# for row in reader:
# date = row['Дата измерения']
# temp = row['Температура']
# humid = row['Влажность']
# co2 = row['СО2']
# los = row['ЛОС']
# pm1_0 = row['Пыль pm 1.0']
# pm2_5 = row['Пыль pm 2.5']
# pm10 = row['Пыль pm 10']
# pressure = row['Давление']
# AQI = row['AQI']
# formald = row['Формальдегид']
# # print(row)
# print(dataframes)
all_df =
|
pd.DataFrame(dataframes)
|
pandas.DataFrame
|
#---------------------------------------------------------------
#__main__.py
#this script collates measurements from individual csv outputs of
#the morphometriX GUI
#the csvs can be saved either all in one folder or within each individual
#animals folder.
#this version includes a safety net that recalculates the measurement using
#accurate altitude and focal lengths that the user must provie in csvs.
# this version uses PyQt5 instead of easygui (used in v2.0)
#created by: <NAME> (<EMAIL>), March 2020
#updated by: <NAME>, June 2021
#----------------------------------------------------------------
#import modules
import pandas as pd
import numpy as np
import os, sys
import math
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QFileDialog, QMessageBox, QLabel, QVBoxLayout
from PyQt5.QtGui import QIcon
import collatrix.collatrix_functions
from collatrix.collatrix_functions import anydup, readfile, fheader, lmeas, wmeas, setup, pull_data, safe_data, end_concat, df_formatting
from collatrix.collatrix_functions import collate_v4and5, collate_v6
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'close box to end script'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
#add message box with link to github documentation
msgBox = QMessageBox()
msgBox.setWindowTitle("For detailed input info click link below")
msgBox.setTextFormat(QtCore.Qt.RichText)
msgBox.setText('<a href = "https://github.com/cbirdferrer/collatrix#inputs">CLICK HERE</a> for detailed input instructions, \n then click on OK button to continue')
x = msgBox.exec_()
#do you want the Animal ID to be assigned based on the name of the folder
items = ('yes', 'no')
anFold, okPressed = QInputDialog.getItem(self,"Input #1", "Do you want the Animal ID to be assigned based on the name of the folder? \n yes or no",items,0,False)
if okPressed and anFold:
print("{0} Animal ID in folder name".format(anFold))
#ask if they want safey net
items = ('yes', 'no')
safety, okPressed = QInputDialog.getItem(self,"Input #2", "Do you want to use the safety? \n Yes or No?",items,0,False)
if okPressed and safety:
print("{0} safety".format(safety))
#if safety yes, ask for file
if safety == 'yes':
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
safe_csv, _ = QFileDialog.getOpenFileName(self,"2.1 Safety File: Image list with altitudes and other information.", "","All Files (*);;csv files (*.csv)", options=options)
print("safety csv = {0}".format(safe_csv))
elif safety == 'no':
pass
#animal id list?
items = ('no','yes')
idchoice, okPressed = QInputDialog.getItem(self, "Input #3", "Do you want output to only contain certain individuals? \n Yes or No?",items,0,False)
if idchoice and okPressed:
print("{0} subset list".format(idchoice))
if idchoice == 'yes':
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
idsCSV, _ = QFileDialog.getOpenFileName(self,"3.1 File containing ID list", "","All Files (*);;csv files (*.csv)", options=options)
if idsCSV:
print("ID list file = {0}".format(idsCSV))
elif idchoice == 'no':
pass
#ask for name of output
outname, okPressed = QInputDialog.getText(self, "Input #4", "Prefix for output file",QLineEdit.Normal,"")
#import safety csv if safety selected
if safety == 'yes':
dfList = pd.read_csv(safe_csv, sep = ",")
dfList = dfList.dropna(how="all",axis='rows').reset_index()
df_L = dfList.groupby('Image').first().reset_index()
df_L['Image'] = [x.strip() for x in df_L['Image']]
elif safety == 'no':
df_L = "no safety"
#get folders
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
GUIfold = QFileDialog.getExistingDirectory(None, "Input 5. Folder containing MorphoMetriX outputs",options=options)
saveFold = QFileDialog.getExistingDirectory(None,"Input 6. Folder where output should be saved",options = options)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
#make lists
#for csvs
csvs_all = []
csvs = []
not_mmx = []
#for measurements
measurements = []
nonPercMeas = []
#walk through all folders in GUI folder and collect all csvs
for root,dirs,files in os.walk(GUIfold):
csvs_all += [os.path.join(root,f) for f in files if f.endswith('.csv')]
#make sure the csvs are morphometrix outputs by checking first row
csvs += [c for c in csvs_all if 'Image ID' in
|
pd.read_csv(c,sep='^',header=None,prefix='X',engine = 'python',quoting=3, na_values = ['""','"'],encoding_errors = "ignore")
|
pandas.read_csv
|
"CMIP6 data read comparison code"
import sys
import json
from pprint import pprint
import logging
import time
import os
import pandas as pd
import argparse
from random import randint
parser = argparse.ArgumentParser(description='Gather variables from command line', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'config',
help = "Config file location",
)
parser.add_argument(
'-l', '--log-level',
action = 'store',
dest = 'log_level',
help = 'Set the log level of console output (CRITICAL, ERROR, WARNING, INFO, DEBUG). Default: INFO',
required = False,
default = 'INFO',
)
class ConfigError(Exception):
"Raised on config error"
pass
class ReadError(Exception):
"Raised on problem with the test's read"
class RunError(Exception):
"Raised on problem with running test"
class CMIPRead:
def __init__(self, config):
self.config = config
self.results = {'test': '{}-{}-{}'.format(config['method'],
config['source'], config['read_pattern']),
'config_name': config['config_name'],
'run_at': time.ctime(),
'config_file': config['config_file'],
'config_modified_at': time.ctime(os.stat(config['config_file']).st_mtime) if config['config_file'] else None,
'repeats': config['repeats'],
'read_pattern': config['read_pattern']
}
def get_zarr_store(self):
caringo_s3 = s3fs.S3FileSystem(anon=True,
client_kwargs={'endpoint_url': self.config['endpoint']})
zarr_path = os.path.join(self.config['path'], self.config['file'])
store = s3fs.S3Map(root=zarr_path, s3=caringo_s3)
return store
def save_results(self):
# check if there's a pickled dataframe already on disk
logging.info('Saving data...')
try:
df =
|
pd.read_json('results_df.json')
|
pandas.read_json
|
'''
The master decomposition file used to decompose raw accelerometer data.
The following steps are executed in this order:
Magnitudes from three axes of accelerometers are calculated
Magnitudes are highpass filtered at 1 hertz
RMS values are calculated and a motion threshold slightly\
above the noise floor is created
Data is transformed in an fft 200 points (10 seconds) at a time
All the remaining tsfresh decomposition source code is ran
https://tsfresh.readthedocs.io/en/latest/text/list_of_features.html
Decomposed datasets are saved for each baby
'''
import pandas as pd
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import os
plt.style.use('ggplot')
class noise:
def __init__(self, n):
self.df_comp = pd.read_csv(str(n) + '.csv')
self.cols = ['LL','LA','C','RA','RL']
self.df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from datetime import datetime
import unittest
import numpy as np
import pandas.core.datetools as datetools
from pandas.core.daterange import DateRange, XDateRange
####
## XDateRange Tests
####
def eqXDateRange(kwargs, expected):
assert(np.array_equal(list(XDateRange(**kwargs)), expected))
def testXDateRange1():
eqXDateRange(dict(start = datetime(2009, 3, 25),
nPeriods = 2),
[datetime(2009, 3, 25), datetime(2009, 3, 26)])
def testXDateRange2():
eqXDateRange(dict(start = datetime(2008, 1, 1),
end = datetime(2008, 1, 3)),
[datetime(2008, 1, 1),
datetime(2008, 1, 2),
datetime(2008, 1, 3)])
def testXDateRange3():
eqXDateRange(dict(start = datetime(2008, 1, 5),
end = datetime(2008, 1, 6)),
[])
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestXDateRange(unittest.TestCase):
def test_constructor(self):
rng = XDateRange(START, END, offset=datetools.bday)
self.assertEquals(rng.timeRule, 'WEEKDAY')
rng = XDateRange(START, END, timeRule='WEEKDAY')
self.assertEquals(rng.offset, datetools.bday)
class TestDateRange(unittest.TestCase):
def setUp(self):
self.rng = DateRange(START, END, offset=datetools.bday)
def test_constructor(self):
rng = DateRange(START, END, offset=datetools.bday)
rng = DateRange(START, periods=20, offset=datetools.bday)
rng = DateRange(end=START, periods=20, offset=datetools.bday)
def test_getCachedRange(self):
rng =
|
DateRange.getCachedRange(START, END, offset=datetools.bday)
|
pandas.core.daterange.DateRange.getCachedRange
|
"""Utilities for visualization tools."""
import itertools
import pandas
__all__ = ["assign_tree_color", "check_angle", "check_location",
"check_orientation"]
def check_orientation(orientation):
"""Check ``orientation`` parameter and return as `bool`.
Parameters
----------
orientaion : {'vertical', 'horizontal'}
Returns
-------
is_vertical : bool
Raises
------
ValueError
"""
if orientation == "vertical":
is_vertical = True
elif orientation == "horizontal":
is_vertical = False
else:
raise ValueError("'orientation' must be 'vertical' or 'horizontal'")
return is_vertical
def check_angle(angle):
"""Check ``angle`` parameter and return as `int`.
Parameters
----------
angle : {0, 90, -90}
Returns
-------
int
Raises
------
ValueError
"""
if angle not in [0, 90, -90]:
raise ValueError("'angle' must be 0, 90, or -90")
return int(angle)
def check_location(location):
"""Check ``location`` parameter and return itself.
Parameters
----------
location : {'first', 'last', 'inner', 'outer'}
Returns
-------
str
Raises
------
ValueError
"""
if location not in ["first", "last", "inner", "outer"]:
raise ValueError("'location' must be 'first', 'last', 'inner', or "
"'outer'")
return location
def assign_tree_color(tree_data, palette, default_color):
"""Assign colors to tree lines.
Parameters
----------
tree_data : pandas.DataFrame
palette : list[str]
default_color : str
Notes
-----
``tree_data`` is modified in-place.
"""
groups = tree_data["group"].copy()
if not groups.isna().all():
groups.sort_values(inplace=True)
groups = groups.unique()
it = itertools.cycle(palette)
colors = {group: next(it) for group in groups if not
|
pandas.isna(group)
|
pandas.isna
|
import os
from concurrent.futures import ProcessPoolExecutor
import itertools
import yaml
import numpy as np
import pandas as pd
from lib.constants import *
from lib.utils import *
TOP_N = 15
config = yaml.load(open('config.yaml'), Loader=yaml.FullLoader)
parameters = {k: [v['default']] for k, v in config['parameters'].items()}
to_update = {
"cross_policy": ['BLXa','BLXab'],
"elitism": [False,True],
"num_pop": [25,50,100],
"num_generations": [25,50,100],
"cross_rate": [0.6,0.8,1.0],
"mutation_rate": [0.01,0.05,0.1],
"eid": list(range(1,NUM_EXECUTIONS+1)),
}
parameters.update(to_update)
parameters_names = list(parameters.keys())
combinations = itertools.product(*list(parameters.values()))
# args = [('python genetic_algorithm.py '+' '.join([f'--{k}={v}' for k,v in zip(parameters_names,combination)]),)
# for combination in combinations]
result_df = pd.DataFrame(columns=parameters_names)
for i,combination in enumerate(combinations):
p = {k:v for k,v in zip(parameters_names,combination)}
name = get_parameters_name(p)
# print(DIRS['DATA']+name+'.json')
df =
|
pd.read_json(DIRS['DATA']+name+'.json')
|
pandas.read_json
|
import pandas as pd
rawData = {'t': 1525019820000, 'T': 1525019879999, 'o': '0.07282300', 'c': '0.07290700', 'h': '0.07293300', 'l': '0.07279800', 'v': '48.57300000'}
df =
|
pd.DataFrame([rawData])
|
pandas.DataFrame
|
# coding: utf-8
# # Process data
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import os
import pandas as pd
import numpy as np
import random
import glob
import umap
import seaborn as sns
import matplotlib.pyplot as plt
from ggplot import *
from numpy.random import seed
randomState = 123
seed(randomState)
# ## Data
#
# Data downloaded from ADAGE repository [ADAGE](https://github.com/greenelab/adage).
#
# ```
# data
# ```
# 1. Raw data was downloaded from ArrayExpress using Affymetrix GeneChip
# 2. Use [RMA](https://www.rdocumentation.org/packages/affy/versions/1.50.0/topics/rma) bioconductor library to convert raw array data to log 2 gene expression data.
# 3. Only keep PA genes, remove control genes
#
# ```
# normalized_data
# ```
# 1. Use data from above
# 2. Normalize each gene to be between 0 and 1
# ## About Affymetrix GeneChip processing
#
# **Measurements**
# mRNA samples samples are labeled with flouresence and hybridized to GeneChip probe array. The probe array is then scanned and the flouresence intensity of each probe (or feature) is measured. A trasncript is represented by a probe set (~11-20 pairs of probes - see explanation of pairs below). The probe set intensity forms the expression measure for a given transcript.
#
# **Array Design**
# Two probes: 1) probe is completely complementary to target sequence, perfect match probe (PM) and 2) probe contains a single mismatch to the target sequence in the middle of the probe, mismatch probe(MM). A probe pair is (PM, MM)
#
# from [The Affymetrix GeneChip Platform: An Overview](https://www.sciencedirect.com/science/article/pii/S0076687906100014?via%3Dihub#fig0001)
#
# **Robust multiarray average (rma)**
#
# 1. Assuming PM = background + signal we want to correct for background signal, returns E[signal|background+signal] assuming signal~exponential and background~normal.
# 2. Use quantile normalization is to make the distribution of probe intensities the same across arrays. The steps are 1) for each array, rank the probe intensity from lowest to highest, 2) For each array rearrange probe intensity values from lowest to highest, 3) Take the average across arrays for each probe and asssign rank, 4) replace ranks from (1) with mean values. Example from [Quantile Normalization wiki](https://en.wikipedia.org/wiki/Quantile_normalization)
# 3. Calculating the probe set intensity by averaging PM-MM across probes in probe set and log2 transform, Y. Fit regression model to Y (probe set intensity) = probe affinity effect + *log scale expression level* + error
#
# from [Exploration, normalization, and summaries of high density oligonucleotide array probe level data](https://academic.oup.com/biostatistics/article/4/2/249/245074)
#
# **Alternative normalization methods**
# In[2]:
# Load arguments
data_file = f"{os.path.dirname(os.getcwd())}/data/pseudomonas/Pa_compendium_02.22.2014.pcl"
normalized_data_file = f"{os.path.dirname(os.getcwd())}/data/pseudomonas/train_set_normalized.pcl"
metadata_file = f"{os.path.dirname(os.getcwd())}/metadata/sample_annotations.tsv"
# In[3]:
# Read in data
data = pd.read_table(data_file, header=0, sep='\t', index_col=0).T
data.head(5)
# In[4]:
# Read in data
normalized_data = pd.read_table(normalized_data_file, header=0, sep='\t', index_col=0).T
normalized_data.head(5)
# In[5]:
# Read in metadata
metadata =
|
pd.read_table(metadata_file, header=0, sep='\t', index_col='ml_data_source')
|
pandas.read_table
|
import slack
from flask import Response
import pandas as pd
import numpy as np
from numpy import nan
import re
import os
import networkx as nx
from pyvis.network import Network
from dotenv import load_dotenv, dotenv_values
from statsmodels.tsa.arima.model import ARIMA
# load environment variables
# config = dotenv_values(".env")
load_dotenv()
SLACK_TOKEN = os.getenv('SLACK_TOKEN')
# define slack client
client = slack.WebClient(token=SLACK_TOKEN)
# function to retrieve the display name of the user based on user id
def get_name(user_id):
try:
out = client.users_info(user=user_id)["user"]["profile"]["real_name"]
except:
out = None
return out
# function to get the channels that a user is active in
def get_user_channels(user_id):
return client.users_conversations(user=user_id)["channels"]
# send response message to user
def send_response_message(user_id):
# define message to be posted
message = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': (
":sparkles: Hey, check out our latest analysis of your team here: <https://network-analysis.azurewebsites.net|LINK> :sparkles:"
)
}
}
client.chat_postMessage(channel=user_id, blocks=[message])
# function used to retrieve network analysis data for a specific channel
def get_slack_data(user_id, text):
# define channel id
try:
channel_id = [channel["id"] for channel in get_user_channels(user_id) if channel["name"] == text][0]
except:
channel_id = "C01T6GGTBQD"
# get channel history
result = client.conversations_history(channel=channel_id, limit=1000)
# retrieve messages
conversation_history = result["messages"]
# create DataFrame
messages = pd.DataFrame(conversation_history)
# add channel id to df
messages["user_id"] = str(user_id)
# convert timestamp to datetime object
messages['date'] = pd.to_datetime(messages['ts'], unit="s").dt.date
messages['ts'] = pd.to_datetime(messages['ts'], unit="s")
# clean text column from quotation marks
messages["text"] = messages["text"].apply(lambda x: re.sub(r"\"", "", x))
# replace user ids with names of users
# messages["reply_users"] = messages["reply_users"].apply(get_name)
# messages["user"] = messages["user"].apply(get_name)
# select columns to save
messages = messages[["client_msg_id", "user_id", "reply_users", "user", "text", "date"]]
# def find_reaction_users(col):
# try:
# return col[0]["users"]
# except:
# return np.nan
# find user ids in the reactions column
#messages["reactions"] = messages["reactions"].apply(find_reaction_users)
# explode the reply_users column to get senders of replies
# messages = messages.explode("reply_users")
# explode the reactions column to get senders of reactions
#messages = messages.explode("reactions")
messages.dropna(inplace=True)
# convert reply users to string for database
messages["reply_users"] = messages["reply_users"].astype(str)
return messages
def time_series_analysis(df):
# df = df[df["reply_users"] != "nan"]
df['reply_users'] = pd.eval(df['reply_users'])
df = df.explode("reply_users")
df['week'] = pd.to_datetime(df['ts']).dt.to_period('W')
df = df.groupby(["week"]).size().reset_index().rename(columns={0: 'count'})
df.set_index("week", inplace=True)
df.index = df.index.to_timestamp()
df.sort_index(inplace=True)
df = df.resample('W').first()
df.fillna(0, inplace=True)
# # messages per day
# df = df.groupby(["ts"]).size().reset_index().rename(columns={0: 'count'})
# # convert column to datetime
# df['ts'] = pd.to_datetime(df['ts'])
# # make ts column into index
# df.index = pd.DatetimeIndex(df['ts'], freq='infer')
# # make sure we count per day and fill empty days
# df = df.asfreq('D')
# # remove ts column
# df.drop(["ts"], axis=1, inplace=True)
# # fill NaN values with 0
# df.fillna(0, inplace=True)
# fit model
model = ARIMA(df["count"], order=(5,1,0))
model_fit = model.fit()
#make prediction
predictions = model_fit.predict(start=len(df)-1, end=len(df)+3)
return {"data": df, "predictions": predictions}
def network_analysis(messages):
#messages = messages[messages["reply_users"] != "nan"]
messages['reply_users'] = pd.eval(messages['reply_users'])
messages = messages.explode("reply_users")
# get number of messages per user
df = messages.groupby(["reply_users", "user"]).size().reset_index().rename(columns={0: 'count'})
# rename columns
df.rename({"reply_users": "source", "user": "target"}, inplace=True, axis=1)
# create graph
Q = nx.from_pandas_edgelist(df, source="source", target="target", edge_attr="count")
# create vis network
net = Network(height='750px', width='100%', bgcolor='white', font_color='black')
net.barnes_hut()
sources = df['source']
targets = df['target']
weights = df['count']
edge_data = zip(sources, targets, weights)
for e in edge_data:
src = e[0]
dst = e[1]
w = e[2]
net.add_node(src, src, title=src)
net.add_node(dst, dst, title=dst)
net.add_edge(src, dst, value=w)
net.show('application/templates/graph_data.html')
# get the degree of centrality for the graph object
degree_centrality = nx.degree_centrality(Q)
# get the closeness of centrality for the graph object
closeness_centrality = nx.closeness_centrality(Q)
# get the betweenness of centrality for the graph object
betweenness_centrality = nx.betweenness_centrality(Q)
# the density of the network
# density = nx.density(Q)
# make network analysis result to dataframe
# network_res = pd.DataFrame({"degree_centrality": degree_centrality, "closeness_centrality": closeness_centrality, "betweenness_centrality": betweenness_centrality}).reset_index()
# number of messages sent
messages_sent = messages.groupby(["reply_users"]).size().reset_index().rename(columns={0: 'count_sent'})
# number of messages received
messages_received = messages.groupby(["user"]).size().reset_index().rename(columns={0: 'count_received'})
print(messages_sent.dtypes)
print(messages_received.dtypes)
# rename index
messages_received.rename({"user": "reply_users"}, inplace=True, axis=1)
# network_res.rename({"index": "reply_users"}, inplace=True, axis=1)
# res = messages_sent[["reply_users", "count_sent"]].merge(network_res, left_on="reply_users", right_on="reply_users")
res = messages_sent[["reply_users", "count_sent"]].merge(messages_received, left_on="reply_users", right_on="reply_users")
return res.values.tolist()
def graph_data():
# load csv
df =
|
pd.read_csv("message_data.csv")
|
pandas.read_csv
|
# dedicated to the penguin
import pandas as pd
import numpy as np
import dask.array.gufunc
import dask.array as da
from dask.diagnostics import ProgressBar
from dask.distributed import Client
from . import io, nexus, tools
from .stream_parser import StreamParser
# from .map_image import MapImage
import h5py
from typing import Union, Dict, Optional, List, Tuple, Callable
import copy
from collections import defaultdict
from warnings import warn, catch_warnings, simplefilter
from tables import NaturalNameWarning
from concurrent.futures import ProcessPoolExecutor, wait, FIRST_EXCEPTION
from contextlib import contextmanager
import os
# top-level helper functions for chunking operations
# ...to be refactored into tools or compute later...
def _check_commensurate(init: Union[list, tuple, np.ndarray], final: Union[list, tuple, np.ndarray],
equal_size: bool = False):
'''check if blocks with sizes in init are commensurate with (i.e. have boundaries aligned with)
blocks in final, and (optionally) if final blocks in final are equally-sized within each block in initial.
Useful to check if a dask rechunk operation will act across boundaries of existing chunks,
which is often something you'll want to try to avoid (and might be a sign that something is going wrong).
Blocks in final must hence be smaller than those in init, i.e. len(final) >= len(init),
and of course: sum(final) == sum(init).
Returns whether the blocks are commensurate, and (if so), the number of
final blocks in each of the initial block.'''
#TODO consider using numba jit
final_inv = list(final)[::-1] # invert for faster popping
init = list(init)
if sum(init) != sum(final):
raise ValueError('Sum of init and final must be identical.')
blocksize = []
if equal_size:
for s0 in init:
# iterate over initial blocks
n_final_in_initial = s0 // final_inv[-1]
for _ in range(n_final_in_initial):
# iterate over final blocks within initial
if (s0 % final_inv.pop()) != 0:
return False, None
blocksize.append(n_final_in_initial)
else:
for s0 in init:
# iterate over initial blocks
# n_rem = copy.copy(s0)
n_rem = s0
b_num = 0
while n_rem != 0:
n_rem -= final_inv.pop()
b_num += 1
if n_rem < 0:
# incommensurate block found!
return False, None
blocksize.append(b_num)
assert len(final_inv) == 0
return True, blocksize
def _agg_groups(stack: np.ndarray, labels: Union[np.ndarray, list, tuple], agg_function: callable, *args, **kwargs):
'''Apply aggregating function to a numpy stack group-by-group, with groups defined by unique labels,
and return the concatenated results; i.e., the length of the result along the aggregation
axis equals the number of unique labels.
'''
res_list = []
labels = labels.squeeze()
for lbl in np.unique(labels):
res_list.append(agg_function(stack[labels == lbl,...], *args, **kwargs))
return np.concatenate(res_list)
def _map_sub_blocks(stack: da.Array, labels: Union[np.ndarray, list, tuple], func: callable, aggregating: bool = True,
*args, **kwargs):
'''Wrapper for da.map_blocks, which instead of applying the function chunk-by-chunk can apply it also to sub-groups
within each chunk, as identified by unique labels (e.g. integers). Useful if you want to use large chunks to have fast computation, but
want to apply the function to smaller blocks. Obvious example: you want to sum frames from a diffraction
movie, but have many diffraction movies stored in each single chunk, as otherwise the chunk number would be too large.
The input stack must be chunked along its 0th axis only, and len(labels) must equal the height of the stack.
If aggregating=True, func is assumed to reduce the sub-block height to 1 (like summing all stack frames), whereas
aggregating=False assumes func to leave the sub-block sizes as is (e.g. for cumulative summing).'''
chunked_labels = da.from_array(labels.reshape((-1,1,1)), chunks=(stack.chunks[0],-1,-1), name='sub_block_label')
cc_out = _check_commensurate(stack.chunks[0], np.unique(labels, return_counts=True)[1], equal_size=False)
if not cc_out[0]:
raise ValueError('Mismatched chunk structure: mapping groups are not within single chunk each')
if 'chunks' in kwargs:
final_chunks = kwargs['chunks']
else:
final_chunks = (tuple(cc_out[1]), ) + stack.chunks[1:] if aggregating else stack.chunks
return da.map_blocks(_agg_groups, stack, chunked_labels,
agg_function=func, chunks=final_chunks, *args, **kwargs)
class Dataset:
def __init__(self):
self._shots_changed = False
self._peaks_changed = False
self._predict_changed = False
self._features_changed = False
# HDF5 file addresses
self.data_pattern: str = '/%/data'
'''Path to data stacks in HDF5 files. % can be used as placeholder (as in CrystFEL). Default /%/data'''
self.shots_pattern: str = '/%/shots'
'''Path to shot table data in HDF5 files. % can be used as placeholder (as in CrystFEL). Default /%/shots'''
self._fallback_shots_pattern: str = '/%/data/shots'
self.result_pattern: str = '/%/results'
'''Path to result data (peaks, predictions) in HDF5 files. % can be used as placeholder (as in CrystFEL).
Default /%/results. **Note that storing results in this way is discouraged and deprecated.**'''
self.map_pattern: str = '/%/map'
'''Path to map and feature data in HDF5 files. % can be used as placeholder (as in CrystFEL). Default /%/map'''
self.instrument_pattern: str = '/%/instrument'
'''Path to instrument metadat in HDF5 files. % can be used as placeholder (as in CrystFEL). Default /%/instrument'''
self.parallel_io: bool = True
'''Toggles if parallel I/O is attempted for datasets spanning many files. Note that this is independent
from `dask.distributed`-based parallelization as in `store_stack_fast`. Default True, which is overriden
if the Dataset comprises a single file only.'''
# internal stuff
self._file_handles = {}
self._stacks = {}
self._shot_id_cols = ['file', 'Event']
self._feature_id_cols = ['crystal_id', 'region', 'sample']
self._diff_stack_label = ''
# tables: accessed via properties!
self._shots =
|
pd.DataFrame(columns=self._shot_id_cols + self._feature_id_cols + ['selected'])
|
pandas.DataFrame
|
# General Packages
from math import atan2, degrees
from datetime import datetime
from pathlib import Path
import time
import pprint
import numpy as np
import pandas as pd
import pickle
# Plotting
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from matplotlib.dates import date2num
import seaborn as sns
# Scaling
from sklearn.preprocessing import StandardScaler
settings = {
#
# audit settings
'data_name': 'credit',
'method_name': 'logreg',
'normalize_data': True,
'force_rational_actions': False,
#
# script flags
'audit_recourse': True,
'plot_audits': True,
'print_flag': True,
'save_flag': True,
'randomseed': 2338,
#
# placeholders
'method_suffixes': [''],
'audit_suffixes': [''],
}
# Paths
repo_dir = Path(__file__).absolute().parent.parent
paper_dir = repo_dir / 'paper/' # directory containing paper related info
data_dir = paper_dir / 'data/' # directory containing data files
results_dir = paper_dir / 'results/' # directory containing results
# create directories that don't exist
for d in [data_dir, results_dir]:
d.mkdir(exist_ok = True)
# Formatting Options
np.set_printoptions(precision = 4, suppress = False)
pd.set_option('display.max_columns', 30)
pd.options.mode.chained_assignment = None
pp = pprint.PrettyPrinter(indent = 4)
# Plotting Settings
sns.set(style="white", palette="muted", color_codes = True)
plt.rcParams['font.size'] = 20
plt.rcParams['axes.labelsize'] = 24
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rc('legend', fontsize = 20)
# file names
output_dir = results_dir / settings['data_name']
output_dir.mkdir(exist_ok = True)
if settings['normalize_data']:
settings['method_suffixes'].append('normalized')
if settings['force_rational_actions']:
settings['audit_suffixes'].append('rational')
# set file header
settings['dataset_file'] = '%s/%s_processed.csv' % (data_dir, settings['data_name'])
settings['file_header'] = '%s/%s_%s%s' % (output_dir, settings['data_name'], settings['method_name'], '_'.join(settings['method_suffixes']))
settings['audit_file_header'] = '%s%s' % (settings['file_header'], '_'.join(settings['audit_suffixes']))
settings['model_file'] = '%s_models.pkl' % settings['file_header']
settings['audit_file'] = '%s_audit_results.pkl' % settings['audit_file_header']
# Recourse Objects
from recourse.action_set import ActionSet
from recourse.builder import RecourseBuilder
from recourse.auditor import RecourseAuditor
from recourse.flipset import Flipset
### Helper Functions for Experimental Script
def load_data():
"""Helper function to load in data, and output that and optionally a scaler object:
Output:
data: dict with the following fields
outcome_name: Name of the outcome variable (inferred as the first column.)
variable_names: A list of names indicating input columns.
X: The input features for our model.
y: The column of the dataframe indicating our outcome variable.
scaler: The sklearn StandardScaler used to normalize the dataset, if we wish to scale.
X_scaled: Scaled version of X, if we wish to scale
X_train: The training set: set to the whole dataset if not scaled. Set to X_scaled if we do scale.
scaler:
Object used to scale data. If "scale" is set to None, then this is returned as None.
"""
# data set
data_df = pd.read_csv(settings['dataset_file'])
data = {
'outcome_name': data_df.columns[0],
'variable_names': data_df.columns[1:].tolist(),
'X': data_df.iloc[:, 1:],
'y': data_df.iloc[:, 0]
}
scaler = None
data['X_train'] = data['X']
data['scaler'] = None
if settings['normalize_data']:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler(copy=True, with_mean=True, with_std=True)
data['X_scaled'] = pd.DataFrame(scaler.fit_transform(data['X'].to_numpy(dtype=float), data['y'].values),
columns=data['X'].columns)
data['X_train'] = data['X_scaled']
data['scaler'] = scaler
return data, scaler
def undo_coefficient_scaling(clf = None, coefficients = None, intercept = 0.0, scaler = None):
"""
given coefficients and data for scaled data, returns coefficients and intercept for unnormalized data
w = w_scaled / sigma
b = b_scaled - (w_scaled / sigma).dot(mu) = b_scaled - w.dot(mu)
:param sklearn linear classifier
:param coefficients: vector of coefficients
:param intercept: scalar for the intercept function
:param scaler: sklearn.Scaler or
:return: coefficients and intercept for unnormalized data
"""
if coefficients is None:
assert clf is not None
assert intercept == 0.0
assert hasattr(clf, 'coef_')
coefficients = clf.coef_
intercept = clf.intercept_ if hasattr(clf, 'intercept_') else 0.0
if scaler is None:
w = np.array(coefficients)
b = float(intercept)
else:
isinstance(scaler, StandardScaler)
x_shift = np.array(scaler.mean_)
x_scale = np.sqrt(scaler.var_)
w = coefficients / x_scale
b = intercept - np.dot(w, x_shift)
w = np.array(w).flatten()
b = float(b)
return w, b
def get_coefficient_df(model_dict, variable_names = None, scaler = None):
"""
extract coefficients of all models and store them into a data.frame
:param model_dict: dictionary of models
:param variable_names:
:return:
"""
# get the coefficient values
assert isinstance(model_dict, dict)
coef_df = []
for k in sorted(model_dict.keys()):
coef_vals = model_dict[k].coef_.flatten()
intercept_val = model_dict[k].intercept_[0]
coef_vals, intercept_val = undo_coefficient_scaling(coefficients = coef_vals, intercept = intercept_val, scaler = scaler)
if variable_names is None:
coef_vals = (pd.Series(coef_vals, index = ['x%d' % j for j in range(coef_vals)]).to_frame(k))
else:
coef_vals = (pd.Series(coef_vals, index = variable_names).to_frame(k))
coef_df.append(coef_vals)
return pd.concat(coef_df, axis = 1)
def format_gridsearch_df(grid_search_df, settings, n_coefficients, invert_C = True):
"""
Take a fitted GridSearchCV and return:
model_stats_df: data frame containing 1 row for fold x free parameter instance.
columns include:
- 'data_name',
- 'method_name',
- 'free_parameter_name',
- 'free_parameter_value' (for each item in free parameter),
- training error,
- testing error,
- n_coefficients
:param grid_search_df:
:param n_coefficients: size of input dataset
:param invert_C: if C is a parameter, invert it (C = 1/lambda in l1 regression)
:return:
"""
train_score_df = (grid_search_df
.loc[:, filter(lambda x: 'train_score' in x and 'split' in x, grid_search_df.columns)]
.unstack()
.reset_index()
.rename(columns={'level_0': 'split_num', 0: 'train_score'})
.set_index('level_1')
.assign(split_num=lambda df: df.apply(lambda x: x['split_num'].replace('_train_score', ''), axis=1))
)
test_score_df = (grid_search_df
.loc[:, filter(lambda x: 'test_score' in x and 'split' in x, grid_search_df.columns)]
.unstack()
.reset_index()
.rename(columns={'level_0': 'split_num', 0: 'test_score'})
.set_index('level_1')
.assign(split_num=lambda df: df.apply(lambda x: x['split_num'].replace('_test_score', ''), axis=1)))
model_stats_df= pd.concat([train_score_df, test_score_df.drop('split_num', axis=1)], axis=1)
model_stats_df['dataname'] = settings['data_name']
param_df = (grid_search_df['params']
.apply(pd.Series))
if invert_C:
param_df['C'] = 1 / param_df['C']
param_df = (param_df.rename(
columns={col: 'param %d: %s' % (idx, col) for idx, col in enumerate(param_df.columns)})
).assign(key=grid_search_df['key'])
model_stats_df = (model_stats_df
.merge(param_df, left_index=True, right_index=True)
)
return model_stats_df.assign(n_coefficients=n_coefficients)
def get_flipset_solutions(model, data, action_set, mip_cost_type = 'max', scaler = None, print_flag = True):
"""
Run a basic audit of a model on the training dataset.
:param model:
:param data:
:param action_set:
:param mip_cost_type:
:param scaler:
:return:
"""
if scaler is not None:
yhat = model.predict(data['X_scaled'])
coefficients, intercept = undo_coefficient_scaling(coefficients=np.array(model.coef_).flatten(), intercept = model.intercept_[0], scaler = scaler)
else:
yhat = model.predict(data['X'])
coefficients, intercept = np.array(model.coef_).flatten(), model.intercept_[0]
action_set.align(coefficients)
# get defaults
audit_results = []
predicted_neg = np.flatnonzero(yhat < 1)
if any(predicted_neg):
U = data['X'].iloc[predicted_neg].values
fb = RecourseBuilder(coefficients = coefficients, intercept = intercept, action_set = action_set, x = U[0], mip_cost_type = mip_cost_type)
# basic audit
start_time = time.time()
if print_flag:
for i, u in enumerate(U):
fb.x = u
info = fb.fit()
audit_results.append(info)
print_log('cost[%06d] = %1.2f' % (i, info['total_cost']))
else:
for i, u in enumerate(U):
fb.x = u
audit_results.append(fb.fit())
print_log('runtime: solved %i IPs in %1.1f seconds' % (i, time.time() - start_time))
return audit_results
def print_score_function(names, coefficients, intercept):
s = ['score function =']
s += ['%1.6f' % intercept]
for n, w in zip(names, coefficients):
if w >= 0.0:
s += ['+\t%1.6f * %s' % (np.abs(w), n)]
else:
s += ['-\t%1.6f * %s' % (np.abs(w), n)]
return '\n'.join(s)
#### PLOTS
def create_data_summary_plot(data_df, subplot_side_length = 3.0, subplot_font_scale = 0.5, max_title_length = 30):
df =
|
pd.DataFrame(data_df)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import scrapy # needed to scrape
import xlrd # used to easily import xlsx file
import json
import re
import pandas as pd
import numpy as np
from openpyxl import load_workbook
import datetime
from datetime import timedelta
class StoreDataCurrent(scrapy.Spider):
name = 'Ercotbot'
### Get Dates: Today, Yesterday, Last Day in "MASTER-Ercot"
dateToday = str(datetime.datetime.today().strftime('%Y%m%d')) # Today, as an Integer
dateYesterday = str(datetime.date.fromordinal(datetime.date.today().toordinal()-1)).replace('-', '')
### Create dates for URL
df = pd.read_excel('MASTER-Ercot.xlsx', sheet_name = 'Master Data')
df = pd.DataFrame(df)
lastDate = str(df.iat[df.shape[0] - 1, 0]) # get the last scraped date in "MASTER-Ercot"
splitDate = lastDate.split('/') # split up the date and get rid of "/"
SD0 = splitDate[0]; SD1 = splitDate[1]; SD2 = splitDate[2]
splitDate[0] = SD2; splitDate[1] = SD0; splitDate[2] = SD1
lastDate = str(''.join(splitDate))
# lastDate = str(20180810)
print('dateToday: ', dateToday)
print('dateYesterday: ', dateYesterday)
print('lastDate: ', lastDate)
print('Appened? ', int(lastDate) != int(dateYesterday))
allowed_domains = ['ercot.com']
start_urls = ['http://ercot.com/content/cdr/html/{}_real_time_spp'.format(dateYesterday)]
print("``````````````````````````````````````````````````````````````````````````````")
################################################################################################
################################################################################################
###
# Scrape Daily Ercot data and Append to "MASTER-Ercot" file
###
def parse(self, response):
self.logger.info('A response has arrived from %s just arroved form ', response.url)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
### Scrape table Headers and Values (energy prices)
#headers = response.css(".headerValueClass::text").extract()
values = response.css(".labelClassCenter::text").extract()
### convert to a data frame and append Header
values = np.array(values) # turn ercot data into an numpy array
values = np.reshape(values, (int(len(values)/16), 16)) #reshape the array to be in the same format as it is online
#frameHeaders = pd.DataFrame(data = values[0:][0:], columns = headers) # --> makes it easier to write to xlsx file
frame = pd.DataFrame(data = values[1:][0:], columns = values[0][0:]) # --> makes it easier to write to xlsx file
### Get Dates: Today, Yesterday, Last Day in "MASTER-Ercot"
dateToday = str(datetime.datetime.today().strftime('%Y%m%d')) # Today, as an Integer
dateYesterday = str(datetime.date.fromordinal(datetime.date.today().toordinal()-1)).replace('-', '')
df = pd.read_excel('MASTER-Ercot.xlsx', sheet_name = 'Master Data')
df = pd.DataFrame(df)
lastDate = str(df.iat[df.shape[0] - 1, 0]) # get the last scraped date in "MASTER-Ercot"
splitDate = lastDate.split('/') # split up the date and get rid of "/"
SD0 = splitDate[0]; SD1 = splitDate[1]; SD2 = splitDate[2]
splitDate[0] = SD2; splitDate[1] = SD0; splitDate[2] = SD1
lastDate = str(''.join(splitDate))
# lastDate = str(20180810)
print('dateToday: ', dateToday)
print('dateYesterday: ', dateYesterday)
print('lastDate: ', lastDate)
print('Appened? ', int(lastDate) != int(dateYesterday))
### Append new data to the "MASTER-Ercot" spreadsheet
# If we already appended yesterdays data --> do not append again
if ( int(lastDate) != int(dateYesterday) ): # this prevents us from writing the same data to the spreadsheet multiple times
# Write to Current Working Directory
writer = pd.ExcelWriter('MASTER-Ercot.xlsx', engine='openpyxl')
book = load_workbook('MASTER-Ercot.xlsx')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
frame.to_excel(writer, startrow=len(df)+1 , index=False, sheet_name = 'Master Data')
writer.save()
writer.close()
# Write to Dropbox
out_path = r"/Users/YoungFreeesh/Dropbox/Ercot Data/MASTER-Ercot.xlsx" # the `r` prefix means raw string
writer =
|
pd.ExcelWriter(out_path, engine='openpyxl')
|
pandas.ExcelWriter
|
from __future__ import division
import empyrical as ep
import numpy as np
import pandas as pd
from . import pos
def daily_txns_with_bar_data(transactions, market_data):
"""
Sums the absolute value of shares traded in each name on each day.
Adds columns containing the closing price and total daily volume for
each day-ticker combination.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Contains "volume" and "price" DataFrames for the tickers
in the passed positions DataFrames
Returns
-------
txn_daily : pd.DataFrame
Daily totals for transacted shares in each traded name.
price and volume columns for close price and daily volume for
the corresponding ticker, respectively.
"""
transactions.index.name = 'date'
txn_daily = pd.DataFrame(transactions.assign(
amount=abs(transactions.amount)).groupby(
['symbol', pd.Grouper(freq='D')]).sum()['amount'])
txn_daily['price'] = market_data.xs('price', level='market_data').unstack()
txn_daily['volume'] = market_data.xs('volume',
level='market_data').unstack()
txn_daily = txn_daily.reset_index().set_index('date')
return txn_daily
def days_to_liquidate_positions(positions, market_data,
max_bar_consumption=0.2,
capital_base=1e6,
mean_volume_window=5):
"""
Compute the number of days that would have been required
to fully liquidate each position on each day based on the
trailing n day mean daily bar volume and a limit on the proportion
of a daily bar that we are allowed to consume.
This analysis uses portfolio allocations and a provided capital base
rather than the dollar values in the positions DataFrame to remove the
effect of compounding on days to liquidate. In other words, this function
assumes that the net liquidation portfolio value will always remain
constant at capital_base.
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
Returns
-------
days_to_liquidate : pd.DataFrame
Number of days required to fully liquidate daily positions.
Datetime index, symbols as columns.
"""
dv = market_data.xs('volume', level='market_data') * \
market_data.xs('price', level='market_data')
roll_mean_dv = dv.rolling(window=mean_volume_window,
center=False).mean().shift()
roll_mean_dv = roll_mean_dv.replace(0, np.nan)
positions_alloc = pos.get_percent_alloc(positions)
positions_alloc = positions_alloc.drop('cash', axis=1)
days_to_liquidate = (positions_alloc * capital_base) / \
(max_bar_consumption * roll_mean_dv)
return days_to_liquidate.iloc[mean_volume_window:]
def get_max_days_to_liquidate_by_ticker(positions, market_data,
max_bar_consumption=0.2,
capital_base=1e6,
mean_volume_window=5,
last_n_days=None):
"""
Finds the longest estimated liquidation time for each traded
name over the course of backtest (or last n days of the backtest).
Parameters
----------
positions: pd.DataFrame
Contains daily position values including cash
- See full explanation in tears.create_full_tear_sheet
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
max_bar_consumption : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position.
capital_base : integer
Capital base multiplied by portfolio allocation to compute
position value that needs liquidating.
mean_volume_window : float
Trailing window to use in mean volume calculation.
last_n_days : integer
Compute for only the last n days of the passed backtest data.
Returns
-------
days_to_liquidate : pd.DataFrame
Max Number of days required to fully liquidate each traded name.
Index of symbols. Columns for days_to_liquidate and the corresponding
date and position_alloc on that day.
"""
dtlp = days_to_liquidate_positions(positions, market_data,
max_bar_consumption=max_bar_consumption,
capital_base=capital_base,
mean_volume_window=mean_volume_window)
if last_n_days is not None:
dtlp = dtlp.loc[dtlp.index.max() -
|
pd.Timedelta(days=last_n_days)
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 3 11:22:49 2017
@author: tkc
"""
import pandas as pd
import os
import datetime
import sys
import numpy as np
import pkg.SC_messaging_functions as SCmess
import pkg.SC_schedule_functions as SCsch
import pkg.SC_config as cnf # specifies input/output file directories
#%%
from importlib import reload
reload(SCsch)
reload(SCmess)
#%% Download from google sheets Cabrini basketball schedule
sheetID = '1-uX2XfX5Jw-WPw3YBm-Ao8d2DOzou18Upw-Jb6UiPWg'
rangeName = 'Cabrini!A:G'
cabsched = SCapi.downloadSheet(sheetID, rangeName)
#%% Load of other commonly needed info sheets
teams=pd.read_csv(cnf._INPUT_DIR +'\\Teams_2019.csv', encoding='cp437')
coaches=pd.read_csv(cnf._INPUT_DIR +'\\coaches.csv', encoding='cp437')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv', encoding='cp437')
Mastersignups =
|
pd.read_csv(cnf._INPUT_DIR +'\\\master_signups.csv', encoding='cp437')
|
pandas.read_csv
|
# benchmark_tools.creteil
# Copyright 2019 Fondation Medecins Sans Frontières https://fondation.msf.fr/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of the ASTapp image processing library
# Author: <NAME>
from ...interfaces import *
from collections import Mapping
from os import path
import pandas as pd
class AST_annotation_Creteil(AST_annotation):
"""Class to access one line (an AST) of the annotation files of Creteil.
This class behaves like a dictionary which keys are extended antibiotic names.
"""
def __init__(self, guiID, diams, sirs):
meta = sirs[0:3]
tests = diams[4:9]
species = sirs[3]
diameters = diams[9:]
sir_values = sirs[9:]
atb_names = diams.index[9:]
self.ast_id = guiID
self.species = species
self.expert_system_status = []
for k in tests.index:
value = tests[k]
if not pd.isna(value):
self.expert_system_status.append(
{'name': k, 'value': value, 'input': False})
self.sample_date = meta[2]
self.atb_names = atb_names.tolist()
self.sir_values = sir_values.tolist()
self.raw_sir_values = None
self.diameters = diameters.tolist()
self.sample_type = None
class Annotations_set_Creteil(Annotations_set):
"""Help accessing and combining SIR annotation results.
It beheaves like a dictionary which keys are the guiID (id of the pictures)"""
def __init__(self, annotation_folder):
self.files = {
"diam": path.join(annotation_folder, "annotations_diam.csv"),
"sir": path.join(annotation_folder, "annotations_SIR.csv")
}
self.diam_df = Annotations_set_Creteil.read_annotation_file(
self.files["diam"])
self.sir_df = Annotations_set_Creteil.read_annotation_file(
self.files["sir"])
assert len(self.diam_df) == len(self.sir_df)
self.atb_names = self.diam_df.keys()[9:]
self.ast_ids = list(self.diam_df.index)
@staticmethod
def read_annotation_file(path):
df =
|
pd.read_csv(path, sep=';', index_col="guiID")
|
pandas.read_csv
|
import ee
from geemap import ee_to_geopandas
import pandas as pd
import boto3
from sklearn.utils import shuffle
def points_to_df(pts_geo):
"""Converts a feature collection into a pandas dataframe
Args:
pts_geo: collection of pixels on an image
Returns:
df_geo: dataframe containing bands and coordinates of pixels
"""
df_geo = ee_to_geopandas(pts_geo)
df_geo = df_geo.drop_duplicates()
df_geo["x"] = df_geo["geometry"].x
df_geo["y"] = df_geo["geometry"].y
df_geo = df_geo.drop("geometry", axis=1)
return df_geo
def satellite_data(collection, region_pt, date_range):
"""Returns image data from a landsat collection.
Args:
collection: dataset name
region_pt: coordinates of location the image must contain
date_range: first and last dates to use
Returns:
object: a single satellite image
"""
return (
ee.ImageCollection(collection)
.filterBounds(ee.Geometry.Point(region_pt))
.filterDate(date_range[0], date_range[1])
.sort("CLOUD_COVER")
.first()
)
def sample_points(image, region, scale, num_pix, geom=True, seed=1234):
"""Sample points from dataset
Args:
image: image to sample from
region: region to sample from
scale: pixel size in meters
num_pix: number of pixels to be sampled
geom: whether to add the center of the sampled pixel as property
seed: random seed used for sampling
Returns:
object: ee.FeatureCollection
"""
return image.sample(
**{
"region": region,
"scale": scale,
"numPixels": num_pix,
"seed": seed,
"geometries": geom,
}
)
def get_masks(base_image):
"""Returns image masks corresponding to mangrove and non-mangrove pixels
Args:
base_image: earth engine image to create masks from
Returns:
objects: ee.Image, ee.Image
"""
img_mangrove = get_mangrove_data()
mangrove_mask = base_image.updateMask(img_mangrove.eq(1))
non_mangrove_mask = base_image.updateMask(mangrove_mask.unmask().Not())
return mangrove_mask, non_mangrove_mask
def get_data_by_zone_year(
area_of_int,
date_range,
base_dataset,
bands,
scale=30,
num_pix={"minor": 10000, "major": 1000},
):
"""Returns sampled data points from an area of interest
Args:
area_of_int: tuple containing (longitude, latitude) of the point of interest
date_range: list of two strings of format yyyy-mm-dd
base_dataset: name of satellite data to sample points from
bands: satellite image bands to keep in dataset
scale: pixel size in meters for sampling points
num_pix: dictionary containing number of pixels to sample for two classes
Returns:
object: dict
"""
base_image = satellite_data(base_dataset, area_of_int, date_range)
base_image = base_image.select(bands)
mangrove_mask, non_mangrove_mask = get_masks(base_image)
# sample points from mangrove area
pts_mangrove = sample_points(
mangrove_mask, mangrove_mask.geometry(), scale, num_pix["minor"]
)
mangrove_gdf = points_to_df(pts_mangrove)
mangrove_gdf["label"] = 1
# sample points from non-mangrove area
pts_non_mangrove = sample_points(
non_mangrove_mask, non_mangrove_mask.geometry(), scale, num_pix["major"]
)
non_mangrove_gdf = points_to_df(pts_non_mangrove)
non_mangrove_gdf["label"] = 0
return {
"base_image": base_image,
"mangrove_points": pts_mangrove,
"other_points": pts_non_mangrove,
"df_mangrove": mangrove_gdf,
"df_other": non_mangrove_gdf,
}
def save_regional_data(data_dict, meta_dict, bucket):
"""Uploads the labeled data for a region to s3
Args:
data_dict: dictionary containing base image, mangrove and non-mangrove data frames
meta_dict: dictionary containing metadata
bucket: s3 bucket name
"""
df_training = pd.concat([data_dict["df_mangrove"], data_dict["df_other"]], axis=0)
df_training = shuffle(df_training)
# fname = f"{meta_dict['src_dataset']}_year{meta_dict['year']}_{meta_dict['poi']}.csv"
fname = f"{meta_dict['src_dataset']}/Year{meta_dict['year']}/{meta_dict['poi']}.csv"
df_training.to_csv(f"s3://{bucket}/{fname}", index=False)
num_rows = df_training.label.value_counts()
print(
f"rows: {len(df_training)}, rows_mangrove = {num_rows[1]}, rows_other = {num_rows[0]}"
)
def split_dataset(test_set_names, bucket, folder):
"""Splits S3 dataset into training and test by region
Args:
test_set_names: list of region names for test dataset
folder: folder name within S3 bucket
bucket: S3 bucket name
"""
s3_client = boto3.client("s3")
items = s3_client.list_objects_v2(Bucket=bucket, Prefix=folder)
list_train = []
list_test = []
for item in items["Contents"]:
file = item["Key"].split("/")[-1]
if file.endswith(".csv"):
list_train.append(file)
for file_name in list_train:
for pattern in test_set_names:
if pattern in file_name:
list_test.append(file_name)
list_train.remove(file_name)
continue
df_train = pd.concat(
[
|
pd.read_csv(f"s3://{bucket}/{folder}/{item}")
|
pandas.read_csv
|
import glob
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from matplotlib.lines import Line2D
def plot_metrics(names='default', logs='training', save_name='default', timesteps=1000000, ci='sd', rolling=10):
name = save_name
names_print = names
if logs == 'training':
rolling = rolling
if logs == 'evaluation':
rolling = None
logging_dir = []
for i in names:
logging_dir.append('./logs/benchmark/' + i)
all_in_one = []
ci = ci
y = 0
timestep_lim = timesteps
for z in logging_dir:
if logs is 'training':
path = glob.glob(os.path.join(z, "*", "monitor*"))
if logs is 'evaluation':
path = glob.glob(os.path.join(z, "*", "*", "monitor*"))
path.sort()
all_in_one.append(pd.DataFrame({"reward": [], "timestep": [], "crash": [], "supervised": [], "intervention": [],
"safelyDone": [], "timeout": [], "time": []}))
for i in range(0, len(path)):
run_reward = pd.read_csv(path[i], skiprows=2, names=["reward", "timestep", "crash", "supervised",
"intervention", "safelyDone", "timeout", "time", "TF",
"distance"])
run_reward.loc[:, "repl"] = i
run_reward.loc[:, "intervention"] = run_reward.loc[:, "intervention"] / run_reward.loc[:, "timestep"]
run_reward.loc[:, "timestep2"] = run_reward.loc[:, "timestep"]
run_reward.loc[:, "crash2"] = run_reward.loc[:, "crash"]
run_reward.loc[:, "Episode ended by"] = run_reward.loc[:, "crash"]
run_reward.loc[:, "timestepSafe"] = run_reward.loc[:, "timestep"] * run_reward.loc[:, "safelyDone"]
for jj in range(0, len(run_reward)):
if run_reward.loc[jj, "timestepSafe"] == 0:
run_reward.loc[jj, "timestepSafe"] = np.nan
for jj in range(0, len(run_reward)):
if run_reward.loc[jj, "Episode ended by"] == 0:
if run_reward.loc[jj, "safelyDone"]:
run_reward.loc[jj, "Episode ended by"] = "Reach target"
if run_reward.loc[jj, "Episode ended by"] == 1:
run_reward.loc[jj, "Episode ended by"] = "Crash"
if run_reward.loc[jj, "Episode ended by"] == 0:
run_reward.loc[jj, "Episode ended by"] = "Time out"
run_reward = run_reward.reset_index()
del run_reward['index']
for jj in range(0, len(run_reward)-1):
run_reward.loc[jj+1, "timestep"] += run_reward.loc[jj, "timestep"]
for jj in range(0, len(run_reward)-1):
run_reward.loc[jj+1, "crash2"] += run_reward.loc[jj, "crash2"]
all_in_one[y] = all_in_one[y].append(run_reward)
all_in_one[y] = all_in_one[y].sort_values(by=["timestep"])
if rolling is not None:
all_in_one[y]["reward"] = all_in_one[y]["reward"].rolling(rolling).mean()
all_in_one[y]["crash"] = all_in_one[y]["crash"].rolling(rolling).mean()
all_in_one[y]["crash2"] = all_in_one[y]["crash2"].rolling(rolling).mean()
all_in_one[y]["supervised"] = all_in_one[y]["supervised"].rolling(rolling).mean()
all_in_one[y]["timestep2"] = all_in_one[y]["timestep2"].rolling(rolling).mean()
all_in_one[y]["timestepSafe"] = all_in_one[y]["timestepSafe"].rolling(rolling).mean()
all_in_one[y]["intervention"] = all_in_one[y]["intervention"].rolling(rolling).mean()
all_in_one[y]["safelyDone"] = all_in_one[y]["safelyDone"].rolling(rolling).mean()
all_in_one[y]["timeout"] = all_in_one[y]["timeout"].rolling(rolling).mean()
all_in_one[y]["timestep"] = (all_in_one[y]["timestep"] / 20000)
all_in_one[y]["timestep"] = all_in_one[y]["timestep"].astype(int) * 20000
y += 1
if logs is 'training':
#y = 0
#for data in all_in_one:
# plt.figure()
# sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
# sns.kdeplot(data=data, x="timestep", hue="Episode ended by", multiple="fill")
# plt.xlabel('Time step')
# plt.ylabel('Density')
# plt.xlim(0, timestep_lim)
# plt.legend(loc=4, borderaxespad=0, labels=['Reach Target', 'Time out', 'Crash'], title='Episode ended by')
# plt.savefig("art/plots/3in1/" + names[y] + ".png", dpi=100, transparent=True)
# plt.show()
# y += 1
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="crash2", err_style='band', ci=ci, estimator='mean')
plt.xlabel('Time step')
plt.ylabel('Total number of crashes')
plt.legend(labels=names_print)
plt.xlim(0, timestep_lim)
plt.ylim(0, 6000)
plt.savefig("art/plots/total_crashes" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="intervention", err_style='band', ci=ci, estimator='mean')
plt.xlabel('Time step')
plt.ylabel('Probability of an supervisors intervention per time step ')
plt.legend(labels=names)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/intervention" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="timestep2", err_style='band', ci=ci, estimator='mean')
plt.xlabel('time step')
plt.ylabel('Number of time steps until episode ends')
plt.legend(labels=names)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/timestepepisode" + name + ".png", dpi=100, transparent=True)
plt.show()
#plt.figure()
#sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
#for data in all_in_one:
# sns.lineplot(data=data, x="timestep", y="timestepSafe", err_style='band', ci=ci, estimator='mean')
#plt.xlabel('time step')
#plt.ylabel('Number of time steps until episode ends safely')
#plt.legend(labels=names)
#plt.xlim(0, timestep_lim)
#plt.savefig("art/plots/timestepSafeEpisode.png", dpi=100, transparent=True)
#plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="supervised", err_style='band', ci=ci, estimator='mean')
plt.xlabel('time step')
plt.ylabel('Probability of an supervisors intervention')
plt.legend(labels=names)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/supervisor" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True,
'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="crash", err_style='band', ci=ci)
plt.xlabel('time step')
plt.ylabel('probability of crashing')
plt.legend(labels=names_print)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/crash" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True,
'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="safelyDone", err_style='band', ci=ci)
plt.xlabel('time step')
plt.ylabel('probability of reaching the target safely')
plt.legend(labels=names_print)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/reachtarget" + name + ".png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True,
'axes.edgecolor': 'black'})
for data in all_in_one:
sns.lineplot(data=data, x="timestep", y="timeout", err_style='band', ci=ci)
plt.xlabel('time step')
plt.ylabel('probability of reaching the max # of time steps')
plt.legend(labels=names_print)
plt.ylim(bottom=-0.05, top=1.05)
plt.xlim(0, timestep_lim)
plt.savefig("art/plots/timeout" + name + ".png", dpi=100, transparent=True)
plt.show()
if logs is 'evaluation':
for i in range(0, len(logging_dir)):
ended_by_all = []
ended_by = np.array([all_in_one[i]["Episode ended by"], all_in_one[i]["repl"],
[names[i]]*len(all_in_one[i]["Episode ended by"])])
ended_by = np.transpose(ended_by)
ended_by_all.append(ended_by)
ended_by_all = np.concatenate(ended_by_all)
ended_by_all = pd.DataFrame(ended_by_all)
ended_by_all = ended_by_all.groupby(0)[1].value_counts().unstack()
ended_by_all = np.transpose(ended_by_all)
number_episodes = ended_by_all.sum(axis=1)
ended_by_all['Crash'] = ended_by_all['Crash'] / number_episodes
ended_by_all['Reach target'] = ended_by_all['Reach target'] / number_episodes
ended_by_all['Time out'] = ended_by_all['Time out'] / number_episodes
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
sns.boxplot(data=ended_by_all)
plt.xlabel(names[i])
plt.ylim(bottom=0, top=1)
plt.ylabel('Density')
plt.savefig("art/plots/3in1/eval_" + names[i] + '_' + name + ".png", dpi=100, transparent=True)
plt.show()
def plot_mbo(name=None):
front = pd.read_csv('./logs/mbo/' + name + '/iterations/front.csv', header=None)
front = front[(len(front) - 100):len(front)]
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
front[1] = front[1] * (-1)
sns.lineplot(data=front, x=0, y=1)
front1 = pd.read_csv('./logs/mbo/' + name + '/initial_design/initial_design.csv', header=None)
front2 = pd.read_csv('./logs/mbo/' + name + '/iterations/mbo_result.csv', header=None)
frames = [front1, front2]
front =
|
pd.concat(frames)
|
pandas.concat
|
import MSfingerprinter.decoder as decoder
import MSfingerprinter.preprocessing as preprocessing
import MSfingerprinter.pysax as SAX
import MSfingerprinter.periodicityfinder as periodicityfinder
import MSfingerprinter.maxsubpatterntree as maxsubpatterntree
import MSfingerprinter.datacube as datacube
import MSfingerprinter.miningperiodicpatterns as miningperiodicpatterns
import MSfingerprinter.maxsubpatternhitset as maxsubpatternhitset
import MSfingerprinter.reactiontreecompletespace as reactiontreecompletespace
import MSfingerprinter.postprocessing as postprocessing
import MSfingerprinter.comparetrees as comparetrees
import matplotlib.pyplot as plt
import multiprocessing
import os
import math
import traceback
import sys
import ntpath
import csv
import itertools
import re
import sys
import pandas as pd
import treelib
import numpy as np
from collections import defaultdict
import gc
import timeit
# np.set_printoptions(threshold=np.inf)
class MSfingerprinter(object):
# MSfingerprinter object instance, constructor method
def __init__(self):
super(MSfingerprinter, self).__init__()
############################################################
#
# Postprocessing
#
###############################################################
def doPostProcessingSingleAlldiffs(self, resultfile1, repeatunitarray):
cwd = os.getcwd()
directory = cwd + '/MSfingerprinter/resultsPeriodicity/'
# outputfiles postprocessing
outfilemasspatterns = os.path.join(directory, 'postprocessedpatternsMASS' + resultfile1.rstrip('.txt') + 'MASSPATTERNS.csv')
# outfile massdifferences between and within patterns Mass and Freq
outfilepatternswithinmass = os.path.join('postprocessed' + resultfile1.rstrip('.txt') + 'DIFFWITHINPATTERNSMASS.txt')
# save similarity/dissimilarity of freq vs. mass space to file
periodicmasses, periodicfreqs = postprocessing.openresultssingle(resultfile1)
# gets from results periodicmasses and periodicfreqs
postperiodicmasses = postprocessing.retrieveperiodicmasses(periodicmasses)
return postperiodicmasses
def searchinTreesgroundtruthstart(self, meaningfuldiffpatterns, path, extensions):
# for later analysis saves patterns and rootnodes of trees found in initiatortrees
patternsplusrootnodes = []
nodesdetected = 0
treefilearray = list(comparetrees.find_files(path, extensions))
# for each tree
for tree in treefilearray:
nametree = ntpath.basename(tree).rstrip('.json')
data = comparetrees.getdata(tree)
nodesfound = []
counter = 0
parent = None
child = None
reactiontreeinstance = treelib.Tree()
reactiontreeinstance = comparetrees.retrievenodes(data, counter, parent, child, reactiontreeinstance)
for i in range(len(meaningfuldiffpatterns)):
# get for each meaningfulpattern the root and target
rootsubtree = round(meaningfuldiffpatterns[i][3], 6)
targetsubtree = round(meaningfuldiffpatterns[i][1],6)
pattern = meaningfuldiffpatterns[i][4]
try:
rootnode, stoichiometformula = comparetrees.searchMasspatterngroundtruth(reactiontreeinstance, rootsubtree, targetsubtree, pattern, nametree)
if rootnode != None and stoichiometformula != None:
print('original root value without rounding')
print(meaningfuldiffpatterns[i][3])
print('original target value without rounding')
print(meaningfuldiffpatterns[i][1])
patternsplusrootnodes.append([rootnode, rootsubtree, targetsubtree, stoichiometformula])
nodesfound.append(pattern)
except:
continue
print('all trees in directory : ' + path.rstrip('completeInitiatortrees/') + 'resultsubtrees/')
return patternsplusrootnodes
def searchinTreesgroundtruth(self, meaningfuldiffpatterns, path, extensions):
# for later analysis saves patterns and rootnodes of trees found in initiatortrees
patternsplusrootnodes = []
nodesdetected = 0
treefilearray = list(comparetrees.find_files(path, extensions))
# for each tree
for tree in treefilearray:
nametree = ntpath.basename(tree).rstrip('.json')
data = comparetrees.getdata(tree)
nodesfound = []
counter = 0
parent = None
child = None
reactiontreeinstance = treelib.Tree()
reactiontreeinstance = comparetrees.retrievenodes(data, counter, parent, child, reactiontreeinstance)
for i in range(len(meaningfuldiffpatterns)):
# get for each meaningfulpattern the root and target
rootsubtree = round(meaningfuldiffpatterns[i][1], 6)
targetsubtree = round(meaningfuldiffpatterns[i][3],6)
pattern = meaningfuldiffpatterns[i][4]
try:
rootnode, stoichiometformula = comparetrees.searchMasspatterngroundtruth(reactiontreeinstance, rootsubtree, targetsubtree, pattern, nametree)
if rootnode != None and stoichiometformula != None:
print('original root value without rounding')
print(meaningfuldiffpatterns[i][1])
print('original target value without rounding')
print(meaningfuldiffpatterns[i][3])
patternsplusrootnodes.append([rootnode, rootsubtree, targetsubtree, stoichiometformula])
nodesfound.append(pattern)
except:
continue
print('all trees in directory : ' + path.rstrip('completeInitiatortrees/') + 'resultsubtrees/')
return patternsplusrootnodes
def constructInitiatorTreescompletespace(self, Initiator, nodelist, massrange):
counter = 0
previousname = None
currenttrees = []
boolean = True
initiatorname = Initiator[0]
maxlevel, maxleveltwo = reactiontreecompletespace.getmaxtreelevel(nodelist, massrange)
treelen = len(Initiator)
for i in Initiator:
trees = reactiontreecompletespace.createRootsInitiators(Initiator)
counter += 1
while boolean == True:
for i in trees:
if len(currenttrees) == treelen:
trees = currenttrees
currenttrees = []
continue
else:
reactiontreeinstance, counter, previousname = reactiontreecompletespace.createnextlevelinitiator(nodelist, i, counter, maxlevel, maxleveltwo, previousname, initiatorname)
if reactiontreeinstance != None and treelen > 0:
currenttrees.append(reactiontreeinstance)
else:
treelen = treelen - 1
return
return
###############################################################################
#
# Preprocessing
#
#################################################################################3
# preprocessing data for Periodicityfindingalgorithms, returns function (i.e. intensity values)
# masspoints corresponding m/z values, Mass space
def preprocessMSSpectraMass(self, filename):
print('preprocessing raw data of ...'+ filename)
originaldata, sampleID = decoder.readdataframe(filename)
masspoints, function = preprocessing.FunctionMSMass(originaldata)
return function, masspoints
# preprocessing data for Periodicityfindingalgorithms, returns function (i.e. intensity values)
# Frequencyspace
def preprocessMSSpectraFreq(self, filename):
print('preprocessing raw data of ...'+ filename)
originaldata, sampleID = decoder.readdataframe(filename)
freqpoints, function = preprocessing.FunctionMSFreq(originaldata)
return function, freqpoints
def preprocess_directoryCSVfreqsampling(self, path, extensions, sampling, nprocesses=None):
filenames_to_preprocess = []
colnames = []
counter = 0
# Try to use the maximum amount of processes if not given.
try:
nprocesses = nprocesses or multiprocessing.cpu_count()
except NotImplementedError:
nprocesses = 1
else:
nprocesses = 1 if nprocesses <= 0 else nprocesses
for filename, _ in decoder.find_files(path, extensions):
filenames_to_preprocess.append(filename)
try:
filename = filename
except ValueError:
pass
# print('preprocessing raw data of ...'+ filename)
originaldata, sampleID = decoder.readdataframecsvfreqsampling(filename, sampling)
colnames.append(sampleID)
# three digits to make join possible
# originaldata['freq'] = originaldata['freq'].round(0)
if counter == 0:
originaldataframe = originaldata
counter+= 1
else:
originaldata = originaldata
merged = pd.concat([originaldataframe, originaldata], axis=1)
# merged = pd.merge(originaldataframe, originaldata, on='freq', how='outer')
originaldataframe = merged
originaldataframe.fillna(value=np.nan, inplace=True)
originaldataframe.columns = colnames
# originaldataframe.sort(columns='freq', inplace=True)
originaldataframe = originaldataframe.apply(np.log)
return originaldataframe
def preprocess_directoryCSVmasssampling(self, path, extensions, sampling, nprocesses=None):
filenames_to_preprocess = []
colnames = []
counter = 0
# Try to use the maximum amount of processes if not given.
try:
nprocesses = nprocesses or multiprocessing.cpu_count()
except NotImplementedError:
nprocesses = 1
else:
nprocesses = 1 if nprocesses <= 0 else nprocesses
for filename, _ in decoder.find_files(path, extensions):
filenames_to_preprocess.append(filename)
try:
filename = filename
except ValueError:
pass
# print('preprocessing raw data of ...'+ filename)
originaldata, sampleID = decoder.readdataframecsvmasssampling(filename, sampling)
colnames.append(sampleID)
# three digits to make join possible
# originaldata['mass'] = originaldata['mass'].round(3)
if counter == 0:
originaldataframe = originaldata
counter+= 1
else:
originaldata = originaldata
merged = pd.concat([originaldataframe, originaldata], axis=1)
# merged = pd.merge(originaldataframe, originaldata, on='freq', how='outer')
originaldataframe = merged
originaldataframe.fillna(value=999, inplace=True)
originaldataframe.columns = colnames
# originaldataframe.sort(columns='freq', inplace=True)
originaldataframe = originaldataframe.apply(np.log)
return originaldataframe
def getmaxminmass(self, path, extensions, nprocesses=None):
filenames_to_preprocess = []
minimum = 10000000000000000000000
maximum = 0
counter = 0
# Try to use the maximum amount of processes if not given.
try:
nprocesses = nprocesses or multiprocessing.cpu_count()
except NotImplementedError:
nprocesses = 1
else:
nprocesses = 1 if nprocesses <= 0 else nprocesses
for filename, _ in decoder.find_files(path, extensions):
filenames_to_preprocess.append(filename)
try:
filename = filename
except ValueError:
pass
print('preprocessing raw data of ...'+ filename)
originaldata, sampleID = decoder.readdataframe(filename)
# three digits to make join possible
originaldata['mass'] = originaldata['mass'].round(3)
maxmass = max(originaldata['mass'])
minmass = min(originaldata['mass'])
if maxmass > maximum:
maximum = maxmass
if minmass < minimum:
minimum = minmass
return maximum, minimum
#######################################################3
#
# Entropy based feature ranking
#
###############################################
# each feature is removed and total entropy is calculated
def RankFeatures(self, clusteringinputdata, rangenum):
featurenames = np.arange(len(clusteringinputdata))
bestfeatures = []
# produces a matrix with each column being one feature
X = np.array(clusteringinputdata)
totallengthfeatures = range(len(featurenames)/rangenum)
rangestart = 0
for i in totallengthfeatures:
rangeend = rangestart+rangenum
Xslice = X[rangestart:rangeend,0:10]
featurenamesslice = featurenames[rangestart:rangeend]
rankedfeatures = clustering.doRankfeatures(Xslice, featurenamesslice, rangestart, rangeend)
rangestart = rangeend
bestfeatureofsubset = rankedfeatures[-1]
bestfeatures.append(bestfeatureofsubset)
return bestfeatures
def doStats(self, entropyvectormass, entropyvectorfreq):
freqstats = clustering.statsentropy(entropyvectorfreq, 'freq')
massstats = clustering.statsentropy(entropyvectormass, 'mass')
correlatefreqmassentropy(rankedentropyvectormass, rankedentropyvectorfreq)
return freqstats, massstats
######################################################
#
# Time series standardization with SAX
#
########################################################
def standardizeTimeSeries(self, MSarray, timepoints):
# this uses the Symbolic aggregate approximation for time series discretization
# https://github.com/dolaameng/pysax/blob/master/Tutorial-SAX%20(Symbolic%20Aggregate%20Approximation).ipynb
# MS = decoder.readMStimedomaintransient(filename)
# MSarray = np.asarray(MS.columns.tolist(), dtype=float)
print('length original data')
print(len(MSarray))
# this does symbolization for each window, no overlap of windows
sax = SAX.SAXModel()
# standardizes the time series whiten accross windows
normalizedMSarray = sax.whiten(MSarray)
print('mean and standarddeviation of standardized MS: ')
print(normalizedMSarray.mean(), normalizedMSarray.std())
# saves MSarray (TS) to csv for later cube generation
dforiginalTS = pd.DataFrame(normalizedMSarray)
# time points are m/z values of the function
timepoints = range(len(normalizedMSarray))
dftime =
|
pd.DataFrame(timepoints)
|
pandas.DataFrame
|
import pandas as pd
import tensorflow
from tensorflow import keras
from keras import optimizers
from keras import regularizers
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dense
from keras.layers import Dropout
from keras.callbacks import ModelCheckpoint
data_test = 'test.csv'
data_train = 'train.csv'
df_train = pd.read_csv(data_train)
df_test = pd.read_csv(data_test)
season_train = pd.get_dummies(df_train.season, prefix ='season')
mnth_train = pd.get_dummies(df_train.mnth, prefix ='mnth')
weekday_train = pd.get_dummies(df_train.weekday, prefix ='weekday')
hr_train = pd.get_dummies(df_train.hr, prefix ='hr')
yr_train = pd.get_dummies(df_train.yr, prefix ='yr')
weathersit_train = pd.get_dummies(df_train.weathersit, prefix ='weathersit')
workingday_train = pd.get_dummies(df_train.workingday, prefix ='workingday')
holiday_train = pd.get_dummies(df_train.holiday, prefix ='holiday')
season_test = pd.get_dummies(df_test.season, prefix ='season')
mnth_test = pd.get_dummies(df_test.mnth, prefix ='mnth')
weekday_test = pd.get_dummies(df_test.weekday, prefix ='weekday')
hr_test = pd.get_dummies(df_test.hr, prefix ='hr')
yr_test = pd.get_dummies(df_test.yr, prefix ='yr')
weathersit_test = pd.get_dummies(df_test.weathersit, prefix ='weathersit')
workingday_test = pd.get_dummies(df_test.workingday, prefix ='workingday')
holiday_test = pd.get_dummies(df_test.holiday, prefix ='holiday')
atmosphere_train = df_train[['temp','atemp','hum','windspeed']]
atmosphere_test = df_test[['temp','atemp','hum','windspeed']]
df_test['weathersit_4']=0
missing_weathersit = df_test[['weathersit_4']]
ohe_test = pd.concat([hr_test,weekday_test,mnth_test,yr_test,season_test,holiday_test,workingday_test,weathersit_test,missing_weathersit,atmosphere_test], axis=1)
ohe_train =
|
pd.concat([hr_train,weekday_train,mnth_train,yr_train,season_train,holiday_train,workingday_train,weathersit_train,atmosphere_train], axis=1)
|
pandas.concat
|
## 1. Introduction ##
import pandas as pd
happiness2015 = pd.read_csv("World_Happiness_2015.csv")
happiness2016 = pd.read_csv("World_Happiness_2016.csv")
happiness2017= pd.read_csv("World_Happiness_2017.csv")
happiness2015['Year'] = 2015
happiness2016['Year'] = 2016
happiness2017['Year'] = 2017
## 2. Combining Dataframes with the Concat Function ##
head_2015 = happiness2015[['Country','Happiness Score', 'Year']].head(3)
head_2016 = happiness2016[['Country','Happiness Score', 'Year']].head(3)
concat_axis0 = pd.concat([head_2015, head_2016], axis=0)
concat_axis1 = pd.concat([head_2015, head_2016], axis=1)
print(concat_axis0)
print(concat_axis1)
question1 = concat_axis0.shape[0]
print(question1)
question2 = concat_axis1.shape[0]
print(question2)
## 3. Combining Dataframes with the Concat Function Continued ##
head_2015 = happiness2015[['Year','Country','Happiness Score', 'Standard Error']].head(4)
head_2016 = happiness2016[['Country','Happiness Score', 'Year']].head(3)
concat_axis0 = pd.concat([head_2015, head_2016])
print(concat_axis0)
rows = concat_axis0.shape[0]
columns = concat_axis0.shape[1]
print(rows)
print(columns)
## 4. Combining Dataframes with Different Shapes Using the Concat Function ##
head_2015 = happiness2015[['Year','Country','Happiness Score', 'Standard Error']].head(4)
head_2016 = happiness2016[['Country','Happiness Score', 'Year']].head(3)
concat_update_index = pd.concat([head_2015, head_2016], ignore_index=True)
print(concat_update_index)
## 5. Joining Dataframes with the Merge Function ##
three_2015 = happiness2015[['Country','Happiness Rank','Year']].iloc[2:5]
three_2016 = happiness2016[['Country','Happiness Rank','Year']].iloc[2:5]
merged = pd.merge(left=three_2015, right= three_2016, on='Country')
print(merged)
## 6. Joining on Columns with the Merge Function ##
three_2015 = happiness2015[['Country','Happiness Rank','Year']].iloc[2:5]
three_2016 = happiness2016[['Country','Happiness Rank','Year']].iloc[2:5]
merged = pd.merge(left=three_2015, right=three_2016, on='Country')
merged_left = pd.merge(left=three_2015, right=three_2016, on='Country', how='left')
merged_left_updated = pd.merge(left=three_2016, right= three_2015, on='Country', how='left')
print(merged_left)
print(merged_left_updated)
## 7. Left Joins with the Merge Function ##
three_2015 = happiness2015[['Country','Happiness Rank','Year']].iloc[2:5]
three_2016 = happiness2016[['Country','Happiness Rank','Year']].iloc[2:5]
merged =
|
pd.merge(left=three_2015, right=three_2016, how='left', on='Country')
|
pandas.merge
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from experiment_handler.object_recognition.object_detection_reader import read_filtered_object_detection_results, read_object_detections
from experiment_handler.label_data_reader import read_experiment_phases, read_location_labels, read_activity_labels_from_eyetracker_labelling
def object_rec_by_label_per_location(exp_root, model):
phases = read_experiment_phases(exp_root)
start = phases['assembly'][0]
end = phases['disassembly'][1]
object_recognitions = read_filtered_object_detection_results(exp_root, model, start, end, "video")
loc_labels = read_location_labels(exp_root)
print(loc_labels)
infos = []
for p in loc_labels.keys():
detections_for_person = object_recognitions.loc[object_recognitions["person_id"] == p]
for label in loc_labels[p]:
during_label = detections_for_person.loc[detections_for_person["timestamp"].between(label["start"], label["end"])]
current = {
"person_id": p,
"location": label["location"],
"duration": label["end"] - label["start"],
"screwdriver": during_label[during_label["label"] == "screwdriver"].size,
"power_drill": during_label[during_label["label"] == "power_drill"].size
}
infos.append(current)
infos =
|
pd.DataFrame(infos)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
from datetime import datetime
import numpy as np
import pandas as pd
import peewee as pw
from api_helpers.api_processing_helpers import process_text
# Training NMF residual stastics
# - from 8_*.ipynb (after last histogram of section 8)
training_residuals_stats = {
"mean": 0.8877953271898069,
"25%": 0.8537005649638325,
"50%": 0.9072421433186082,
"max": 0.9948739031036548,
}
# Acceptable percent diff between training and inference residual statistics
training_residual_stat_margin = 5
def add_datepart(df):
df[["year", "month", "day"]] = df["url"].str.extract(
r"/(\d{4})/([a-z]{3})/(\d{2})/"
)
d = {"jan": 1, "feb": 2, "nov": 11, "dec": 12, "sep": 9, "oct": 10}
df["month"] = df["month"].map(d).astype(int)
df["date"] = pd.to_datetime(df[["year", "month", "day"]])
df["month"] = df["month"].map({v: k.title() for k, v in d.items()})
df["weekday"] = df["date"].dt.day_name()
df["week_of_month"] = df["date"].apply(lambda d: (d.day - 1) // 7 + 1)
return df
def get_residual(pipe, new_texts):
A = pipe.named_steps["vectorizer"].transform(new_texts)
W = pipe.named_steps["nmf"].components_
H = pipe.named_steps["nmf"].transform(A)
f"A={A.toarray().shape}, W={W.shape}, H={H.shape}"
r = np.zeros(A.shape[0])
for row in range(A.shape[0]):
r[row] = np.linalg.norm(A[row, :] - H[row, :].dot(W), "fro")
return r
def get_documentwise_residuals(
pipe, df, training_res_stats, training_residual_stat_margin
):
# Get residuals
unseen_residuals_series = pd.Series(
get_residual(pipe, df["processed_text"].tolist()), index=df.index
)
# if len(df) == 1:
# unseen_residuals_valid_series = pd.Series(
# [False] * len(df), index=df.index
# )
# print(
# f"Single unseen observation detected. "
# "Set placeholder value for residual."
# )
# return [unseen_residuals_valid_series, unseen_residuals_series]
# Get statistics on residuals
training_res_stats = pd.DataFrame.from_dict(
training_residuals_stats, orient="index"
).rename(columns={0: "training"})
unseen_res_stats = (
unseen_residuals_series.describe()
.loc[["mean", "25%", "50%", "max"]]
.rename("unseen")
.to_frame()
)
df_residual_stats = training_res_stats.merge(
unseen_res_stats, left_index=True, right_index=True
)
df_residual_stats["diff_pct"] = percentage_change(
df_residual_stats["training"], df_residual_stats["unseen"]
)
# Sanity check
try:
assert (
df_residual_stats["diff_pct"] <= training_residual_stat_margin
).all()
unseen_residuals_valid_series = pd.Series(
[True] * len(df), index=df.index
)
print(
"Unseen data residual distribution within bounds compared to "
"training."
)
except AssertionError as e:
unseen_residuals_valid_series = pd.Series(
[False] * len(df), index=df.index
)
print(
f"{str(e)} - Unseen data residuals OOB compared to training. "
"Set placeholder value."
)
return [unseen_residuals_valid_series, unseen_residuals_series]
def percentage_change(col1, col2):
return ((col2 - col1) / col1) * 100
def get_top_words_per_topic(row, n_top_words=5):
return row.nlargest(n_top_words).index.tolist()
def make_predictions(df, pipe, n_top_words, n_topics_wanted, df_named_topics):
df["processed_text"] = df["text"].apply(process_text)
# Transform unseen texts with the trained ML pipeline
doc_topic = pipe.transform(df["processed_text"])
topic_words = pd.DataFrame(
pipe.named_steps["nmf"].components_,
index=[str(k) for k in range(n_topics_wanted)],
columns=pipe.named_steps["vectorizer"].get_feature_names(),
)
topic_df = (
pd.DataFrame(
topic_words.apply(
lambda x: get_top_words_per_topic(x, n_top_words), axis=1
).tolist(),
index=topic_words.index,
)
.reset_index()
.rename(columns={"index": "topic"})
.assign(topic_num=range(n_topics_wanted))
)
# for k, v in topic_df.iterrows():
# print(k, ",".join(v[1:-1]))
df_temp = (
|
pd.DataFrame(doc_topic)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
from __future__ import division
from sklearn import linear_model
import pandas as pd
import numpy as np
#==============================================================================
df_mwh1 = pd.read_csv('mwh_1.csv',header=0)
df_mwh2 = pd.read_csv('mwh_2.csv',header=0)
df_mwh3 = pd.read_csv('mwh_3.csv',header=0)
df_gen = pd.read_csv('generators.csv',header=0)
last_hour = df_mwh1['Time'].iloc[-1]
zonal_prices = np.zeros((last_hour,4))
zones = ['PGE_valley','PGE_bay','SCE','SDGE']
for z in zones:
z_index = zones.index(z)
z1 = df_mwh1.loc[df_mwh1['Zones']==z]
z2 = df_mwh2.loc[df_mwh1['Zones']==z]
z3 = df_mwh3.loc[df_mwh1['Zones']==z]
for i in range(0,last_hour):
h1 = z1.loc[z1['Time']==i+1]
h2 = z2.loc[z1['Time']==i+1]
h3 = z3.loc[z1['Time']==i+1]
o1 = h1.loc[h1['Value']>0]
o2 = h2.loc[h2['Value']>0]
o3 = h3.loc[h3['Value']>0]
m1 = np.max(o1.loc[:,'$/MWh'])
m2 = np.max(o2.loc[:,'$/MWh'])
m3 = np.max(o3.loc[:,'$/MWh'])
if np.isnan(m1) > 0:
m1 = 0
if np.isnan(m2) > 0:
m2 = 0
if np.isnan(m3) > 0:
m3 = 0
zonal_prices[i,z_index] = np.max((m1,m2,m3))
no_hours = last_hour
no_days = int(no_hours/24)
daily_prices = np.zeros((no_days,4))
for i in range(0,no_days):
for z in zones:
z_index = zones.index(z)
daily_prices[i,z_index] = np.mean(zonal_prices[i*24:i*24+24,z_index])
hourly =
|
pd.DataFrame(zonal_prices)
|
pandas.DataFrame
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import warnings
from collections import defaultdict
from datetime import datetime
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
List,
Optional,
Sequence,
TextIO,
Tuple,
Union,
)
import numpy as np
import pandas as pd # type: ignore
from elasticsearch.exceptions import NotFoundError
from eland.actions import PostProcessingAction
from eland.common import (
DEFAULT_PAGINATION_SIZE,
DEFAULT_PIT_KEEP_ALIVE,
DEFAULT_PROGRESS_REPORTING_NUM_ROWS,
DEFAULT_SEARCH_SIZE,
SortOrder,
build_pd_series,
elasticsearch_date_to_pandas_date,
es_api_compat,
es_version,
)
from eland.index import Index
from eland.query import Query
from eland.tasks import (
RESOLVED_TASK_TYPE,
ArithmeticOpFieldsTask,
BooleanFilterTask,
HeadTask,
QueryIdsTask,
QueryTermsTask,
SampleTask,
SizeTask,
TailTask,
)
if TYPE_CHECKING:
from numpy.typing import DTypeLike
from eland.arithmetics import ArithmeticSeries
from eland.field_mappings import Field
from eland.filter import BooleanFilter
from eland.query_compiler import QueryCompiler
from eland.tasks import Task
class QueryParams:
def __init__(self) -> None:
self.query: Query = Query()
self.sort_field: Optional[str] = None
self.sort_order: Optional[SortOrder] = None
self.size: Optional[int] = None
self.fields: Optional[List[str]] = None
self.script_fields: Optional[Dict[str, Dict[str, Any]]] = None
class Operations:
"""
A collector of the queries and selectors we apply to queries to return the appropriate results.
For example,
- a list of the field_names in the DataFrame (a subset of field_names in the index)
- a size limit on the results (e.g. for head(n=5))
- a query to filter the results (e.g. df.A > 10)
This is maintained as a 'task graph' (inspired by dask)
(see https://docs.dask.org/en/latest/spec.html)
"""
def __init__(
self,
tasks: Optional[List["Task"]] = None,
arithmetic_op_fields_task: Optional["ArithmeticOpFieldsTask"] = None,
) -> None:
self._tasks: List["Task"]
if tasks is None:
self._tasks = []
else:
self._tasks = tasks
self._arithmetic_op_fields_task = arithmetic_op_fields_task
def __constructor__(
self,
*args: Any,
**kwargs: Any,
) -> "Operations":
return type(self)(*args, **kwargs)
def copy(self) -> "Operations":
return self.__constructor__(
tasks=copy.deepcopy(self._tasks),
arithmetic_op_fields_task=copy.deepcopy(self._arithmetic_op_fields_task),
)
def head(self, index: "Index", n: int) -> None:
# Add a task that is an ascending sort with size=n
task = HeadTask(index, n)
self._tasks.append(task)
def tail(self, index: "Index", n: int) -> None:
# Add a task that is descending sort with size=n
task = TailTask(index, n)
self._tasks.append(task)
def sample(self, index: "Index", n: int, random_state: int) -> None:
task = SampleTask(index, n, random_state)
self._tasks.append(task)
def arithmetic_op_fields(
self, display_name: str, arithmetic_series: "ArithmeticSeries"
) -> None:
if self._arithmetic_op_fields_task is None:
self._arithmetic_op_fields_task = ArithmeticOpFieldsTask(
display_name, arithmetic_series
)
else:
self._arithmetic_op_fields_task.update(display_name, arithmetic_series)
def get_arithmetic_op_fields(self) -> Optional[ArithmeticOpFieldsTask]:
# get an ArithmeticOpFieldsTask if it exists
return self._arithmetic_op_fields_task
def __repr__(self) -> str:
return repr(self._tasks)
def count(self, query_compiler: "QueryCompiler") -> pd.Series:
query_params, post_processing = self._resolve_tasks(query_compiler)
# Elasticsearch _count is very efficient and so used to return results here. This means that
# data frames that have restricted size or sort params will not return valid results
# (_count doesn't support size).
# Longer term we may fall back to pandas, but this may result in loading all index into memory.
if self._size(query_params, post_processing) is not None:
raise NotImplementedError(
f"Requesting count with additional query and processing parameters "
f"not supported {query_params} {post_processing}"
)
# Only return requested field_names
fields = query_compiler.get_field_names(include_scripted_fields=False)
counts = {}
for field in fields:
body = Query(query_params.query)
body.exists(field, must=True)
field_exists_count = query_compiler._client.count(
index=query_compiler._index_pattern, body=body.to_count_body()
)["count"]
counts[field] = field_exists_count
return build_pd_series(data=counts, index=fields)
def _metric_agg_series(
self,
query_compiler: "QueryCompiler",
agg: List["str"],
numeric_only: Optional[bool] = None,
) -> pd.Series:
results = self._metric_aggs(query_compiler, agg, numeric_only=numeric_only)
if numeric_only:
return build_pd_series(results, index=results.keys(), dtype=np.float64)
else:
# If all results are float convert into float64
if all(isinstance(i, float) for i in results.values()):
dtype: "DTypeLike" = np.float64
# If all results are int convert into int64
elif all(isinstance(i, int) for i in results.values()):
dtype = np.int64
# If single result is present consider that datatype instead of object
elif len(results) <= 1:
dtype = None
else:
dtype = "object"
return build_pd_series(results, index=results.keys(), dtype=dtype)
def value_counts(self, query_compiler: "QueryCompiler", es_size: int) -> pd.Series:
return self._terms_aggs(query_compiler, "terms", es_size)
def hist(
self, query_compiler: "QueryCompiler", bins: int
) -> Tuple[pd.DataFrame, pd.DataFrame]:
return self._hist_aggs(query_compiler, bins)
def idx(
self, query_compiler: "QueryCompiler", axis: int, sort_order: str
) -> pd.Series:
if axis == 1:
# Fetch idx on Columns
raise NotImplementedError(
"This feature is not implemented yet for 'axis = 1'"
)
# Fetch idx on Index
query_params, post_processing = self._resolve_tasks(query_compiler)
fields = query_compiler._mappings.all_source_fields()
# Consider only Numeric fields
fields = [field for field in fields if (field.is_numeric)]
body = Query(query_params.query)
for field in fields:
body.top_hits_agg(
name=f"top_hits_{field.es_field_name}",
source_columns=[field.es_field_name],
sort_order=sort_order,
size=1,
)
# Fetch Response
response = query_compiler._client.search(
index=query_compiler._index_pattern, size=0, body=body.to_search_body()
)
response = response["aggregations"]
results = {}
for field in fields:
res = response[f"top_hits_{field.es_field_name}"]["hits"]
if not res["total"]["value"] > 0:
raise ValueError("Empty Index with no rows")
if not res["hits"][0]["_source"]:
# This means there are NaN Values, we skip them
# Implement this when skipna is implemented
continue
else:
results[field.es_field_name] = res["hits"][0]["_id"]
return pd.Series(results)
def aggs(
self,
query_compiler: "QueryCompiler",
pd_aggs: List[str],
numeric_only: Optional[bool] = None,
) -> pd.DataFrame:
results = self._metric_aggs(
query_compiler, pd_aggs, numeric_only=numeric_only, is_dataframe_agg=True
)
return pd.DataFrame(
results, index=pd_aggs, dtype=(np.float64 if numeric_only else None)
)
def mode(
self,
query_compiler: "QueryCompiler",
pd_aggs: List[str],
is_dataframe: bool,
es_size: int,
numeric_only: bool = False,
dropna: bool = True,
) -> Union[pd.DataFrame, pd.Series]:
results = self._metric_aggs(
query_compiler,
pd_aggs=pd_aggs,
numeric_only=numeric_only,
dropna=dropna,
es_mode_size=es_size,
)
pd_dict: Dict[str, Any] = {}
row_diff: Optional[int] = None
if is_dataframe:
# If multiple values of mode is returned for a particular column
# find the maximum length and use that to fill dataframe with NaN/NaT
rows_len = max(len(value) for value in results.values())
for key, values in results.items():
row_diff = rows_len - len(values)
# Convert np.ndarray to list
values = list(values)
if row_diff:
if isinstance(values[0], pd.Timestamp):
values.extend([pd.NaT] * row_diff)
else:
values.extend([np.NaN] * row_diff)
pd_dict[key] = values
return
|
pd.DataFrame(pd_dict)
|
pandas.DataFrame
|
import logging
import os
import time
import warnings
from datetime import date, datetime, timedelta
from io import StringIO
from typing import Dict, Iterable, List, Optional, Union
from urllib.parse import urljoin
import numpy as np
import pandas as pd
import requests
import tables
from pvoutput.consts import (
BASE_URL,
CONFIG_FILENAME,
ONE_DAY,
PV_OUTPUT_DATE_FORMAT,
RATE_LIMIT_PARAMS_TO_API_HEADERS,
)
from pvoutput.daterange import DateRange, merge_date_ranges_to_years
from pvoutput.exceptions import NoStatusFound, RateLimitExceeded
from pvoutput.utils import (
_get_param_from_config_file,
_get_response,
_print_and_log,
get_date_ranges_to_download,
sort_and_de_dupe_pv_system,
system_id_to_hdf_key,
)
_LOG = logging.getLogger("pvoutput")
class PVOutput:
"""
Attributes:
api_key
system_id
rate_limit_remaining
rate_limit_total
rate_limit_reset_time
data_service_url
"""
def __init__(
self,
api_key: str = None,
system_id: str = None,
config_filename: Optional[str] = CONFIG_FILENAME,
data_service_url: Optional[str] = None,
):
"""
Args:
api_key: Your API key from PVOutput.org.
system_id: Your system ID from PVOutput.org. If you don't have a
PV system then you can register with PVOutput.org and select
the 'energy consumption only' box.
config_filename: Optional, the filename of the .yml config file.
data_service_url: Optional. If you have subscribed to
PVOutput.org's data service then add the data service URL here.
This string must end in '.org'.
"""
self.api_key = api_key
self.system_id = system_id
self.rate_limit_remaining = None
self.rate_limit_total = None
self.rate_limit_reset_time = None
self.data_service_url = data_service_url
# Set from config file if None
for param_name in ["api_key", "system_id"]:
if getattr(self, param_name) is None:
try:
param_value_from_config = _get_param_from_config_file(
param_name, config_filename
)
except Exception as e:
msg = (
"Error loading configuration parameter {param_name}"
" from config file {filename}. Either pass"
" {param_name} into PVOutput constructor, or create"
" config file {filename}. {exception}".format(
param_name=param_name, filename=CONFIG_FILENAME, exception=e
)
)
print(msg)
_LOG.exception(msg)
raise
setattr(self, param_name, param_value_from_config)
# Convert to strings
setattr(self, param_name, str(getattr(self, param_name)))
# Check for data_service_url
if self.data_service_url is None:
try:
self.data_service_url = _get_param_from_config_file(
"data_service_url", config_filename
)
except KeyError:
pass
except FileNotFoundError:
pass
if self.data_service_url is not None:
if not self.data_service_url.strip("/").endswith(".org"):
raise ValueError("data_service_url must end in '.org'")
def search(
self,
query: str,
lat: Optional[float] = None,
lon: Optional[float] = None,
include_country: bool = True,
**kwargs
) -> pd.DataFrame:
"""Search for PV systems.
Some quirks of the PVOutput.org API:
- The maximum number of results returned by PVOutput.org is 30.
If the number of returned results is 30, then there is no
indication of whether there are exactly 30 search results,
or if there are more than 30. Also, there is no way to
request additional 'pages' of search results.
- The maximum search radius is 25km
Args:
query: string, see https://pvoutput.org/help.html#search
e.g. '5km'.
lat: float, e.g. 52.0668589
lon: float, e.g. -1.3484038
include_country: bool, whether or not to include the country name
with the returned postcode.
Returns:
pd.DataFrame, one row per search results. Index is PV system ID.
Columns:
name,
system_DC_capacity_W,
address, # If `include_country` is True then address is
# 'country> <postcode>',
# else address is '<postcode>'.
orientation,
num_outputs,
last_output,
panel,
inverter,
distance_km,
latitude,
longitude
"""
api_params = {"q": query, "country": int(include_country)}
if lat is not None and lon is not None:
api_params["ll"] = "{:f},{:f}".format(lat, lon)
pv_systems_text = self._api_query(service="search", api_params=api_params, **kwargs)
pv_systems = pd.read_csv(
StringIO(pv_systems_text),
names=[
"name",
"system_DC_capacity_W",
"address",
"orientation",
"num_outputs",
"last_output",
"system_id",
"panel",
"inverter",
"distance_km",
"latitude",
"longitude",
],
index_col="system_id",
)
return pv_systems
def get_status(
self, pv_system_id: int, date: Union[str, datetime], historic: bool = True, **kwargs
) -> pd.DataFrame:
"""Get PV system status (e.g. power generation) for one day.
The returned DataFrame will be empty if the PVOutput API
returns 'status 400: No status found'.
Args:
pv_system_id: int
date: str in format YYYYMMDD; or datetime
(localtime of the PV system)
Returns:
pd.DataFrame:
index: datetime (DatetimeIndex, localtime of the PV system)
columns: (all np.float64):
cumulative_energy_gen_Wh,
energy_efficiency_kWh_per_kW,
instantaneous_power_gen_W,
average_power_gen_W,
power_gen_normalised,
energy_consumption_Wh,
power_demand_W,
temperature_C,
voltage
"""
_LOG.info("system_id %d: Requesting system status for %s", pv_system_id, date)
date = date_to_pvoutput_str(date)
_check_date(date)
api_params = {
"d": date, # date, YYYYMMDD, localtime of the PV system
"h": int(historic == True), # We want historical data.
"limit": 288, # API limit is 288 (num of 5-min periods per day).
"ext": 0, # Extended data; we don't want extended data.
"sid1": pv_system_id, # SystemID.
}
try:
pv_system_status_text = self._api_query(
service="getstatus", api_params=api_params, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date %s", pv_system_id, date)
pv_system_status_text = ""
# See https://pvoutput.org/help.html#api-getstatus but make sure
# you read the 'History Query' subsection, as a historical query
# has slightly different return columns compared to a non-historical
# query!
columns = (
[
"cumulative_energy_gen_Wh",
"energy_efficiency_kWh_per_kW",
"instantaneous_power_gen_W",
"average_power_gen_W",
"power_gen_normalised",
"energy_consumption_Wh",
"power_demand_W",
"temperature_C",
"voltage",
]
if historic
else [
"cumulative_energy_gen_Wh",
"instantaneous_power_gen_W",
"energy_consumption_Wh",
"power_demand_W",
"power_gen_normalised",
"temperature_C",
"voltage",
]
)
pv_system_status = pd.read_csv(
StringIO(pv_system_status_text),
lineterminator=";",
names=["date", "time"] + columns,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime"],
dtype={col: np.float64 for col in columns},
).sort_index()
return pv_system_status
def get_batch_status(
self,
pv_system_id: int,
date_to: Optional[Union[str, datetime]] = None,
max_retries: Optional[int] = 1000,
**kwargs
) -> Union[None, pd.DataFrame]:
"""Get batch PV system status (e.g. power generation).
The returned DataFrame will be empty if the PVOutput API
returns 'status 400: No status found'.
Data returned is limited to the last 366 days per request.
To retrieve older data, use the date_to parameter.
The PVOutput getbatchstatus API is asynchronous. When it's first
called, it replies to say 'accepted'. This function will then
wait a minute and call the API again to see if the data is ready.
Set `max_retries` to 1 if you want to return immediately, even
if data isn't ready yet (and hence this function will return None)
https://pvoutput.org/help.html#dataservice-getbatchstatus
Args:
pv_system_id: int
date_to: str in format YYYYMMDD; or datetime
(localtime of the PV system). The returned timeseries will
include 366 days of data: from YYYY-1MMDD to YYYYMMDD inclusive
max_retries: int, number of times to retry after receiving
a '202 Accepted' request. Set `max_retries` to 1 if you want
to return immediately, even if data isn't ready yet (and hence
this function will return None).
Returns:
None (if data isn't ready after retrying max_retries times) or
pd.DataFrame:
index: datetime (DatetimeIndex, localtime of the PV system)
columns: (all np.float64):
cumulative_energy_gen_Wh,
instantaneous_power_gen_W,
temperature_C,
voltage
"""
api_params = {"sid1": pv_system_id}
_set_date_param(date_to, api_params, "dt")
for retry in range(max_retries):
try:
pv_system_status_text = self._api_query(
service="getbatchstatus", api_params=api_params, use_data_service=True, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date_to %s", pv_system_id, date_to)
pv_system_status_text = ""
break
if "Accepted 202" in pv_system_status_text:
if retry == 0:
_print_and_log("Request accepted.")
if retry < max_retries - 1:
_print_and_log("Sleeping for 1 minute.")
time.sleep(60)
else:
_print_and_log(
"Call get_batch_status again in a minute to see if" " results are ready."
)
else:
break
else:
return
return _process_batch_status(pv_system_status_text)
def get_metadata(self, pv_system_id: int, **kwargs) -> pd.Series:
"""Get metadata for a single PV system.
Args:
pv_system_id: int
Returns:
pd.Series. Index is:
name,
system_DC_capacity_W,
address,
num_panels,
panel_capacity_W_each,
panel_brand,
num_inverters,
inverter_capacity_W,
inverter_brand,
orientation,
array_tilt_degrees,
shade,
install_date,
latitude,
longitude,
status_interval_minutes,
secondary_num_panels,
secondary_panel_capacity_W_each,
secondary_orientation,
secondary_array_tilt_degrees
"""
pv_metadata_text = self._api_query(
service="getsystem",
api_params={
"array2": 1, # Provide data about secondary array, if present.
"tariffs": 0,
"teams": 0,
"est": 0,
"donations": 0,
"sid1": pv_system_id, # SystemID
"ext": 0, # Include extended data?
},
**kwargs
)
pv_metadata = pd.read_csv(
StringIO(pv_metadata_text),
lineterminator=";",
names=[
"name",
"system_DC_capacity_W",
"address",
"num_panels",
"panel_capacity_W_each",
"panel_brand",
"num_inverters",
"inverter_capacity_W",
"inverter_brand",
"orientation",
"array_tilt_degrees",
"shade",
"install_date",
"latitude",
"longitude",
"status_interval_minutes",
"secondary_num_panels",
"secondary_panel_capacity_W_each",
"secondary_orientation",
"secondary_array_tilt_degrees",
],
parse_dates=["install_date"],
nrows=1,
).squeeze()
pv_metadata["system_id"] = pv_system_id
pv_metadata.name = pv_system_id
return pv_metadata
def get_statistic(
self,
pv_system_id: int,
date_from: Optional[Union[str, date]] = None,
date_to: Optional[Union[str, date]] = None,
**kwargs
) -> pd.DataFrame:
"""Get summary stats for a single PV system.
Args:
pv_system_id: int
date_from
date_to
Returns:
pd.DataFrame:
total_energy_gen_Wh,
energy_exported_Wh,
average_daily_energy_gen_Wh,
minimum_daily_energy_gen_Wh,
maximum_daily_energy_gen_Wh,
average_efficiency_kWh_per_kW,
num_outputs, # The number of days for which there's >= 1 val.
actual_date_from,
actual_date_to,
record_efficiency_kWh_per_kW,
record_efficiency_date,
query_date_from,
query_date_to
"""
if date_from and not date_to:
date_to = pd.Timestamp.now().date()
if date_to and not date_from:
date_from = pd.Timestamp("1900-01-01").date()
api_params = {
"c": 0, # consumption and import
"crdr": 0, # credits / debits
"sid1": pv_system_id, # SystemID
}
_set_date_param(date_from, api_params, "df")
_set_date_param(date_to, api_params, "dt")
try:
pv_metadata_text = self._api_query(
service="getstatistic", api_params=api_params, **kwargs
)
except NoStatusFound:
pv_metadata_text = ""
columns = [
"total_energy_gen_Wh",
"energy_exported_Wh",
"average_daily_energy_gen_Wh",
"minimum_daily_energy_gen_Wh",
"maximum_daily_energy_gen_Wh",
"average_efficiency_kWh_per_kW",
"num_outputs",
"actual_date_from",
"actual_date_to",
"record_efficiency_kWh_per_kW",
"record_efficiency_date",
]
date_cols = ["actual_date_from", "actual_date_to", "record_efficiency_date"]
numeric_cols = set(columns) - set(date_cols)
pv_metadata = pd.read_csv(
StringIO(pv_metadata_text),
names=columns,
dtype={col: np.float32 for col in numeric_cols},
parse_dates=date_cols,
)
if pv_metadata.empty:
data = {col: np.float32(np.NaN) for col in numeric_cols}
data.update({col: pd.NaT for col in date_cols})
pv_metadata = pd.DataFrame(data, index=[pv_system_id])
else:
pv_metadata.index = [pv_system_id]
pv_metadata["query_date_from"] = pd.Timestamp(date_from) if date_from else pd.NaT
pv_metadata["query_date_to"] = pd.Timestamp(date_to) if date_to else pd.Timestamp.now()
return pv_metadata
def _get_statistic_with_cache(
self,
store_filename: str,
pv_system_id: int,
date_from: Optional[Union[str, date]] = None,
date_to: Optional[Union[str, date]] = None,
**kwargs
) -> pd.Series:
"""Will try to get stats from store_filename['statistics']. If this
fails, or if date_to > query_date_to, or if
date_from < query_date_from, then will call the API. Note that the aim
of this function is just to find the relevant actual_date_from and
actual_date_to, so this function does not respect the other params.
"""
if date_from:
date_from = pd.Timestamp(date_from).date()
if date_to:
date_to = pd.Timestamp(date_to).date()
def _get_fresh_statistic():
_LOG.info("pv_system %d: Getting fresh statistic.", pv_system_id)
stats = self.get_statistic(pv_system_id, **kwargs)
with pd.HDFStore(store_filename, mode="a") as store:
try:
store.remove(key="statistics", where="index=pv_system_id")
except KeyError:
pass
store.append(key="statistics", value=stats)
return stats
try:
stats = pd.read_hdf(store_filename, key="statistics", where="index=pv_system_id")
except (FileNotFoundError, KeyError):
return _get_fresh_statistic()
if stats.empty:
return _get_fresh_statistic()
query_date_from = stats.iloc[0]["query_date_from"]
query_date_to = stats.iloc[0]["query_date_to"]
if (
not pd.isnull(date_from)
and not pd.isnull(query_date_from)
and date_from < query_date_from.date()
):
return _get_fresh_statistic()
if not pd.isnull(date_to) and date_to > query_date_to.date():
return _get_fresh_statistic()
return stats
def download_multiple_systems_to_disk(
self,
system_ids: Iterable[int],
start_date: datetime,
end_date: datetime,
output_filename: str,
timezone: Optional[str] = None,
min_data_availability: Optional[float] = 0.5,
use_get_batch_status_if_available: Optional[bool] = True,
):
"""Download multiple PV system IDs to disk.
Data is saved to `output_filename` in HDF5 format. The exact data
format is documented in
https://github.com/openclimatefix/pvoutput/blob/master/docs/dataset.md
This function is designed to be run for days (!) downloading
gigabytes of PV data :) As such, this function can be safely
interrupted and re-started. All the state required to re-start
is stored in the HDF5 file.
Add appropriate handlers the Python logger `pvoutput` to see progress.
Args:
system_ids: List of PV system IDs to download.
start_date: Start of date range to download.
end_date: End of date range to download.
output_filename: HDF5 filename to write data to.
timezone: String representation of timezone of timeseries data.
e.g. 'Europe/London'.
min_data_availability: A float in the range [0, 1]. 1 means only
accept PV systems which have no days of missing data. 0 means
accept all PV systems, no matter if they have missing data.
Note that the data availability is measured against the date
range for which the PV system has data available, not from
the date range passed into this function.
use_get_batch_status_if_available: Bool. If true then will use
PVOutput's getbatchstatus API (which must be paid for, and
`data_service_url` must be set in `~/.pvoutput.yml` or when
initialising the PVOutput object).
"""
n = len(system_ids)
for i, pv_system_id in enumerate(system_ids):
_LOG.info("**********************")
msg = "system_id {:d}: {:d} of {:d} ({:%})".format(pv_system_id, i + 1, n, (i + 1) / n)
_LOG.info(msg)
print("\r", msg, end="", flush=True)
# Sorted list of DateRange objects. For each DateRange,
# we need to download from start_date to end_date inclusive.
date_ranges_to_download = get_date_ranges_to_download(
output_filename, pv_system_id, start_date, end_date
)
# How much data is actually available?
date_ranges_to_download = self._filter_date_range(
output_filename, pv_system_id, date_ranges_to_download, min_data_availability
)
if not date_ranges_to_download:
_LOG.info("system_id %d: No data left to download :)", pv_system_id)
continue
_LOG.info(
"system_id %d: Will download these date ranges: %s",
pv_system_id,
date_ranges_to_download,
)
if use_get_batch_status_if_available:
if self.data_service_url:
self._download_multiple_using_get_batch_status(
output_filename, pv_system_id, date_ranges_to_download, timezone
)
else:
raise ValueError("data_service_url is not set!")
else:
self._download_multiple_using_get_status(
output_filename, pv_system_id, date_ranges_to_download, timezone
)
def get_insolation_forecast(
self,
date: Union[str, datetime],
pv_system_id: Optional[int] = None,
timezone: Optional[str] = None,
lat: Optional[float] = None,
lon: Optional[float] = None,
**kwargs
):
"""Get Insolation data for a given site, or a given location defined by
longitude and latitude. This is the estimated output for the site
based on ideal weather conditions. Also factors in site age, reducing
ouput by 1% each year, shade and orientation. Need donation mode enabled.
See https://pvoutput.org/help.html#api-getinsolation
Args:
date: str in format YYYYMMDD; or datetime
(localtime of the PV system)
pv_system_id: int
timezone: str
lat: float e.g. -27.4676
lon: float e.g. 153.0279
**kwargs:
Returns:
"""
date = date_to_pvoutput_str(date)
_check_date(date, prediction=True)
api_params = {
"d": date, # date, YYYYMMDD, localtime of the PV system
"sid1": pv_system_id, # SystemID.
"tz": timezone, # defaults to configured timezone of system otherwise GMT
}
if lat is not None and lon is not None:
api_params["ll"] = "{:f},{:f}".format(lat, lon)
try:
pv_insolation_text = self._api_query(
service="getinsolation", api_params=api_params, **kwargs
)
except NoStatusFound:
_LOG.info("system_id %d: No status found for date %s", pv_system_id, date)
pv_insolation_text = ""
columns = ["predicted_power_gen_W", "predicted_cumulative_energy_gen_Wh"]
pv_insolation = pd.read_csv(
StringIO(pv_insolation_text),
lineterminator=";",
names=["time"] + columns,
dtype={col: np.float64 for col in columns},
).sort_index()
pv_insolation.index = pd.to_datetime(
date + " " + pv_insolation.time, format="%Y-%m-%d %H:%M"
)
pv_insolation.drop("time", axis=1, inplace=True)
return pv_insolation
def _filter_date_range(
self,
store_filename: str,
system_id: int,
date_ranges: Iterable[DateRange],
min_data_availability: Optional[float] = 0.5,
) -> List[DateRange]:
"""Check getstatistic to see if system_id has data for all date ranges.
Args:
system_id: PV system ID.
store_filename: HDF5 filename to cache statistics to / from.
date_ranges: List of DateRange objects.
min_data_availability: A float in the range [0, 1]. 1 means only
accept PV systems which have no days of missing data. 0 means
accept all PV systems, no matter if they have missing data.
"""
if not date_ranges:
return date_ranges
stats = self._get_statistic_with_cache(
store_filename,
system_id,
date_to=date_ranges[-1].end_date,
wait_if_rate_limit_exceeded=True,
).squeeze()
if pd.isnull(stats["actual_date_from"]) or pd.isnull(stats["actual_date_to"]):
_LOG.info("system_id %d: Stats say there is no data!", system_id)
return []
timeseries_date_range = DateRange(stats["actual_date_from"], stats["actual_date_to"])
data_availability = stats["num_outputs"] / (timeseries_date_range.total_days() + 1)
if data_availability < min_data_availability:
_LOG.info(
"system_id %d: Data availability too low! Only %.0f %%.",
system_id,
data_availability * 100,
)
return []
new_date_ranges = []
for date_range in date_ranges:
new_date_range = date_range.intersection(timeseries_date_range)
if new_date_range:
new_date_ranges.append(new_date_range)
return new_date_ranges
def _download_multiple_using_get_batch_status(
self, output_filename, pv_system_id, date_ranges_to_download, timezone: Optional[str] = None
):
years = merge_date_ranges_to_years(date_ranges_to_download)
dates_to = [year.end_date for year in years]
total_rows = self._download_multiple_worker(
output_filename, pv_system_id, dates_to, timezone, use_get_status=False
)
# Re-load data, sort, remove duplicate indicies, append back
if total_rows:
with pd.HDFStore(output_filename, mode="a", complevel=9) as store:
sort_and_de_dupe_pv_system(store, pv_system_id)
def _download_multiple_using_get_status(
self, output_filename, pv_system_id, date_ranges_to_download, timezone: Optional[str] = None
):
for date_range in date_ranges_to_download:
dates = date_range.date_range()
self._download_multiple_worker(
output_filename, pv_system_id, dates, timezone, use_get_status=True
)
def _download_multiple_worker(
self, output_filename, pv_system_id, dates, timezone, use_get_status
) -> int:
"""
Returns:
total number of rows downloaded
"""
total_rows = 0
for date_to_load in dates:
_LOG.info("system_id %d: Requesting date: %s", pv_system_id, date_to_load)
datetime_of_api_request = pd.Timestamp.utcnow()
if use_get_status:
timeseries = self.get_status(
pv_system_id, date_to_load, wait_if_rate_limit_exceeded=True
)
else:
timeseries = self.get_batch_status(pv_system_id, date_to=date_to_load)
if timeseries.empty:
_LOG.info(
"system_id %d: Got empty timeseries back for %s", pv_system_id, date_to_load
)
if use_get_status:
_append_missing_date_range(
output_filename,
pv_system_id,
date_to_load,
date_to_load,
datetime_of_api_request,
)
else:
_append_missing_date_range(
output_filename,
pv_system_id,
date_to_load - timedelta(days=365),
date_to_load,
datetime_of_api_request,
)
else:
total_rows += len(timeseries)
timeseries = timeseries.tz_localize(timezone)
_LOG.info(
"system_id: %d: %d rows retrieved: %s to %s",
pv_system_id,
len(timeseries),
timeseries.index[0],
timeseries.index[-1],
)
if use_get_status:
check_pv_system_status(timeseries, date_to_load)
else:
_record_gaps(
output_filename,
pv_system_id,
date_to_load,
timeseries,
datetime_of_api_request,
)
timeseries["datetime_of_API_request"] = datetime_of_api_request
timeseries["query_date"] = pd.Timestamp(date_to_load)
key = system_id_to_hdf_key(pv_system_id)
with pd.HDFStore(output_filename, mode="a", complevel=9) as store:
with warnings.catch_warnings():
warnings.simplefilter("ignore", tables.NaturalNameWarning)
store.append(key=key, value=timeseries, data_columns=True)
_LOG.info("system_id %d: %d total rows downloaded", pv_system_id, total_rows)
return total_rows
def _api_query(
self,
service: str,
api_params: Dict,
wait_if_rate_limit_exceeded: bool = False,
use_data_service: bool = False,
) -> str:
"""Send API request to PVOutput.org and return content text.
Args:
service: string, e.g. 'search' or 'getstatus'
api_params: dict
wait_if_rate_limit_exceeded: bool
use_data_service: bool
Raises:
NoStatusFound
RateLimitExceeded
"""
get_response_func = (
self._get_data_service_response if use_data_service else self._get_api_response
)
try:
response = get_response_func(service, api_params)
except Exception as e:
_LOG.exception(e)
raise
try:
return self._process_api_response(response)
except RateLimitExceeded:
msg = "PVOutput.org API rate limit exceeded!" " Rate limit will be reset at {}".format(
self.rate_limit_reset_time
)
_print_and_log(msg)
if wait_if_rate_limit_exceeded:
self.wait_for_rate_limit_reset()
return self._api_query(service, api_params, wait_if_rate_limit_exceeded=False)
raise RateLimitExceeded(response, msg)
def _get_api_response(self, service: str, api_params: Dict) -> requests.Response:
"""
Args:
service: string, e.g. 'search', 'getstatus'
api_params: dict
"""
self._check_api_params()
# Create request headers
headers = {
"X-Rate-Limit": "1",
"X-Pvoutput-Apikey": self.api_key,
"X-Pvoutput-SystemId": self.system_id,
}
api_url = urljoin(BASE_URL, "service/r2/{}.jsp".format(service))
return _get_response(api_url, api_params, headers)
def _get_data_service_response(self, service: str, api_params: Dict) -> requests.Response:
"""
Args:
service: string, e.g. 'getbatchstatus'
api_params: dict
"""
self._check_api_params()
if self.data_service_url is None:
raise ValueError("data_service_url must be set to use the data service!")
headers = {"X-Rate-Limit": "1"}
api_params = api_params.copy()
api_params["key"] = self.api_key
api_params["sid"] = self.system_id
api_url = urljoin(self.data_service_url, "service/r2/{}.jsp".format(service))
return _get_response(api_url, api_params, headers)
def _check_api_params(self):
# Check we have relevant login details:
for param_name in ["api_key", "system_id"]:
if getattr(self, param_name) is None:
raise ValueError("Please set the {} parameter.".format(param_name))
def _set_rate_limit_params(self, headers):
for param_name, header_key in RATE_LIMIT_PARAMS_TO_API_HEADERS.items():
header_value = int(headers[header_key])
setattr(self, param_name, header_value)
self.rate_limit_reset_time = pd.Timestamp.utcfromtimestamp(self.rate_limit_reset_time)
self.rate_limit_reset_time = self.rate_limit_reset_time.tz_localize("utc")
_LOG.debug("%s", self.rate_limit_info())
def rate_limit_info(self) -> Dict:
info = {}
for param_name in RATE_LIMIT_PARAMS_TO_API_HEADERS:
info[param_name] = getattr(self, param_name)
return info
def _process_api_response(self, response: requests.Response) -> str:
"""Turns an API response into text.
Args:
response: from _get_api_response()
Returns:
content of the response.
Raises:
UnicodeDecodeError
NoStatusFound
RateLimitExceeded
"""
if response.status_code == 400:
raise NoStatusFound(response=response)
if response.status_code != 403:
try:
response.raise_for_status()
except Exception as e:
msg = "Bad status code! Response content = {}. Exception = {}".format(
response.content, e
)
_LOG.exception(msg)
raise e.__class__(msg)
self._set_rate_limit_params(response.headers)
# Did we overshoot our quota?
if response.status_code == 403 and self.rate_limit_remaining <= 0:
raise RateLimitExceeded(response=response)
try:
content = response.content.decode("latin1").strip()
except Exception as e:
msg = "Error decoding this string: {}\n{}".format(response.content, e)
_LOG.exception(msg)
raise
# If we get to here then the content is valid :)
return content
def wait_for_rate_limit_reset(self):
utc_now = pd.Timestamp.utcnow()
timedelta_to_wait = self.rate_limit_reset_time - utc_now
timedelta_to_wait += timedelta(minutes=3) # Just for safety
secs_to_wait = timedelta_to_wait.total_seconds()
retry_time_utc = utc_now + timedelta_to_wait
_print_and_log(
"Waiting {:.0f} seconds. Will retry at {}".format(secs_to_wait, retry_time_utc)
)
time.sleep(secs_to_wait)
def date_to_pvoutput_str(date: Union[str, datetime]) -> str:
"""Convert datetime to date string for PVOutput.org in YYYYMMDD format."""
if isinstance(date, str):
try:
datetime.strptime(date, PV_OUTPUT_DATE_FORMAT)
except ValueError:
return pd.Timestamp(date).strftime(PV_OUTPUT_DATE_FORMAT)
else:
return date
return date.strftime(PV_OUTPUT_DATE_FORMAT)
def _check_date(date: str, prediction=False):
"""Check that date string conforms to YYYYMMDD format,
and that the date isn't in the future.
Raises:
ValueError if the date is 'bad'.
"""
dt = datetime.strptime(date, PV_OUTPUT_DATE_FORMAT)
if dt > datetime.now() and not prediction:
raise ValueError(
""
"date should not be in the future. Got {}. Current date is {}.".format(
date, datetime.now()
)
)
def _set_date_param(dt, api_params, key):
if dt is not None:
dt = date_to_pvoutput_str(dt)
_check_date(dt)
api_params[key] = dt
def check_pv_system_status(pv_system_status: pd.DataFrame, requested_date: date):
"""Checks the DataFrame returned by get_pv_system_status.
Args:
pv_system_status: DataFrame returned by get_pv_system_status
requested_date: date.
Raises:
ValueError if the DataFrame is incorrect.
"""
if not isinstance(pv_system_status, pd.DataFrame):
raise ValueError("pv_system_status must be a dataframe")
if not pv_system_status.empty:
index = pv_system_status.index
for d in [index[0], index[-1]]:
if not requested_date <= d.date() <= requested_date + ONE_DAY:
raise ValueError(
"A date in the index is outside the expected range."
" Date from index={}, requested_date={}".format(d, requested_date)
)
def _process_batch_status(pv_system_status_text):
# See https://pvoutput.org/help.html#dataservice-getbatchstatus
# PVOutput uses a non-standard format for the data. The text
# needs some processing before it can be read as a CSV.
processed_lines = []
for line in pv_system_status_text.split("\n"):
line_sections = line.split(";")
date = line_sections[0]
time_and_data = line_sections[1:]
processed_line = [
"{date},{payload}".format(date=date, payload=payload) for payload in time_and_data
]
processed_lines.extend(processed_line)
if processed_lines:
first_line = processed_lines[0]
num_cols = len(first_line.split(","))
if num_cols >= 8:
raise NotImplementedError("Handling of consumption data is not implemented!")
processed_text = "\n".join(processed_lines)
del processed_lines
columns = ["cumulative_energy_gen_Wh", "instantaneous_power_gen_W", "temperature_C", "voltage"]
pv_system_status = pd.read_csv(
StringIO(processed_text),
names=["date", "time"] + columns,
parse_dates={"datetime": ["date", "time"]},
index_col=["datetime"],
dtype={col: np.float64 for col in columns},
).sort_index()
return pv_system_status
def _append_missing_date_range(
output_filename, pv_system_id, missing_start_date, missing_end_date, datetime_of_api_request
):
data = {
"missing_start_date_PV_localtime":
|
pd.Timestamp(missing_start_date)
|
pandas.Timestamp
|
import pandas as pd
import re
from thsData.fetchDataFromTHS import CFetchDataFromTHS
import datetime
import logging
DAILY_COLUMNS_MAP= {
'股票代码' : '^股票代码',
'股票简称' :'^股票简称',
'涨跌幅':"^涨跌幅:前复权",
'开盘价' : '^开盘价:前复权',
'收盘价' : '^收盘价:前复权',
'最高价' : '^最高价:前复权',
'最低价' : '^最低价:前复权',
'成交量': '^成交量',
'成交额': '^成交额',
'上市日期': '^上市日期',
'所属概念': '^所属概念$',
"所属概念数量":"^所属概念数量",
'行业':'^所属同花顺行业',
'流通市值':"^a股流通市值",
'上市天数':"^上市天数",
}
logger = logging.getLogger()
class CFetchDailyDataFromTHS(object):
def __init__(self,cookie,v):
self.dataFrame = None
self.date = None
self.keyWords = '前复权开盘价,前复权收盘价,前复权最高价,前复权最低价,前复权涨跌幅, 成交量,成交额,上市天数,所属概念'
self.url = 'http://x.iwencai.com/stockpick/search?ts=1&f=1&qs=stockhome_topbar_click&w=%E5%89%8D%E5%A4%8D%E6%9D%83%E5%BC%80%E7%9B%98%E4%BB%B7%EF%BC%8C%E5%89%8D%E5%A4%8D%E6%9D%83%E6%94%B6%E7%9B%98%E4%BB%B7%EF%BC%8C%E5%89%8D%E5%A4%8D%E6%9D%83%E6%9C%80%E9%AB%98%E4%BB%B7%EF%BC%8C%E5%89%8D%E5%A4%8D%E6%9D%83%E6%9C%80%E4%BD%8E%E4%BB%B7%EF%BC%8C%E5%89%8D%E5%A4%8D%E6%9D%83%E6%B6%A8%E8%B7%8C%E5%B9%85,%20%E6%88%90%E4%BA%A4%E9%87%8F%EF%BC%8C%E6%88%90%E4%BA%A4%E9%A2%9D%EF%BC%8C%E4%B8%8A%E5%B8%82%E5%A4%A9%E6%95%B0,%E6%89%80%E5%B1%9E%E6%A6%82%E5%BF%B5'
self.referer = 'http://x.iwencai.com/stockpick/search?typed=1&preParams=&ts=1&f=1&qs=result_rewrite&selfsectsn=&querytype=stock&searchfilter=&tid=stockpick&w=%E5%89%8D%E5%A4%8D%E6%9D%83%E5%BC%80%E7%9B%98%E4%BB%B7%EF%BC%8C%E5%89%8D%E5%A4%8D%E6%9D%83%E6%94%B6%E7%9B%98%E4%BB%B7%EF%BC%8C%E5%89%8D%E5%A4%8D%E6%9D%83%E6%9C%80%E9%AB%98%E4%BB%B7%EF%BC%8C%E5%89%8D%E5%A4%8D%E6%9D%83%E6%9C%80%E4%BD%8E%E4%BB%B7%EF%BC%8C%E5%89%8D%E5%A4%8D%E6%9D%83%E6%B6%A8%E8%B7%8C%E5%B9%85%2C%20%E6%88%90%E4%BA%A4%E9%87%8F%EF%BC%8C%E6%88%90%E4%BA%A4%E9%A2%9D%EF%BC%8C%E4%B8%8A%E5%B8%82%E6%97%A5%E6%9C%9F%2C%E6%89%80%E5%B1%9E%E6%A6%82%E5%BF%B5&queryarea='
self.cookie = cookie
self.v = v
def GetDailyData(self):
fetcher = CFetchDataFromTHS(self.cookie,self.url, self.referer, self.v)
result = fetcher.FetchAllInOne()
map = self.keywordTranslator(result)
self.dataFrame =
|
pd.DataFrame()
|
pandas.DataFrame
|
from __future__ import print_function
from builtins import str
from builtins import range
import sys
sys.path.insert(0, '/home/mike/git/streampulse/server_copy/sp')
import rrcf
import pandas as pd
from helpers import email_msg
import numpy as np
# import logging
# logging.getLogger('fbprophet').setLevel(logging.ERROR)
# import warnings
# warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
# matplotlib.use('TkAgg')
# matplotlib.use('GTKAgg')
import math
import copy
import re
script_name, notificationEmail, tmpcode, region, site = sys.argv
# userID=35; tmpfile='e076b8930278'
# region='NC'; site='FF'; notificationEmail='<EMAIL>'
# dumpfile = '../spdumps/confirmcolumns' + tmpfile + '.json'
# # dumpfile = '/home/mike/git/streampulse/server_copy/spdumps/confirmcolumns35_AQ_WB_e076b8930278.json'
# with open(dumpfile) as d:
# up_data = json.load(d)
# %% trained outl detector
q = np.load('/home/mike/Downloads/telemanom_hmm/data/test/F-2.npy')
qq = pd.DataFrame(q)
qq.to_csv('/home/mike/temp/arse.csv')
trainR = pd.read_csv('~/Downloads/telemanom/training_dev/train.csv',
index_col='solar.time')
testR = pd.read_csv('~/Downloads/telemanom/training_dev/test.csv',
index_col='solar.time')
testR = pd.read_csv('~/Downloads/telemanom/training_dev/testAT.csv',
index_col='solar.time')
dd =
|
pd.concat([trainR, testR])
|
pandas.concat
|
import PySimpleGUI as sg
from PIL import Image
import os
import io
from typing import Literal, Tuple
import pandas as pd
import json
import numpy as np
CACHE = "cachefile.json"
ANNOTATION = "annotation/"
INDENT = 4
SPACE = " "
NEWLINE = "\n"
# This program includes software developed by jterrace and <NAME>
# in https://stackoverflow.com/questions/10097477/python-json-array-newlines
# Huge thanks to them!
# Changed basestring to str, and dict uses items() instead of iteritems().
def to_json(o, level=0):
ret = ""
if isinstance(o, dict):
ret += "{" + NEWLINE
comma = ""
for k, v in o.items():
ret += comma
comma = ",\n"
ret += SPACE * INDENT * (level + 1)
ret += '"' + str(k) + '":' + SPACE
ret += to_json(v, level + 1)
ret += NEWLINE + SPACE * INDENT * level + "}"
elif isinstance(o, str):
ret += '"' + o + '"'
elif isinstance(o, list):
ret += "[" + ",".join([to_json(e, level + 1) for e in o]) + "]"
# Tuples are interpreted as lists
elif isinstance(o, tuple):
ret += "[" + ",".join(to_json(e, level + 1) for e in o) + "]"
elif isinstance(o, bool):
ret += "true" if o else "false"
elif isinstance(o, int):
ret += str(o)
elif isinstance(o, float):
ret += '%.7g' % o
elif isinstance(o, np.ndarray) and np.issubdtype(o.dtype, np.integer):
ret += "[" + ','.join(map(str, o.flatten().tolist())) + "]"
elif isinstance(o, np.ndarray) and np.issubdtype(o.dtype, np.inexact):
ret += "[" + ','.join(map(lambda x: '%.7g' % x,
o.flatten().tolist())) + "]"
elif o is None:
ret += 'null'
else:
raise TypeError("Unknown type '%s' for json serialization" %
str(type(o)))
return ret
def inspect_annotation_json(
Dir, num_lm, WINDOW_LOC=(None, None)) -> Tuple[str, bool]:
annotation_csv = os.path.join(ANNOTATION, os.path.basename(Dir) + ".csv")
annotation_json = os.path.join(ANNOTATION, os.path.basename(Dir) + ".json")
if not os.path.isfile(annotation_json) or not os.path.isfile(
annotation_csv):
# Create empty json file
pretty_dump({}, annotation_json)
# If csv exist, load json from csv.
# Since we don't know window size yet, only load "xy".
# Will load "mouse_xy" once StateMachine is initiated.
if os.path.isfile(annotation_csv):
dic = {}
df = pd.read_csv(annotation_csv, header=0)
n = len(df)
for i in range(n):
row = df.iloc[i]
xy_data = []
j = 1
row_keys = list(row.keys())
while True:
if f"x{j}" not in row_keys or
|
pd.isnull(row[f"x{j}"])
|
pandas.isnull
|
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
|
pd.Series(0, index=data.index)
|
pandas.Series
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.