prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import glob
import os
import sys
# these imports and usings need to be in the same order
sys.path.insert(0, "../")
sys.path.insert(0, "TP_model")
sys.path.insert(0, "TP_model/fit_and_forecast")
from Reff_functions import *
from Reff_constants import *
from sys import argv
from datetime import timedelta, datetime
from scipy.special import expit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
def forecast_TP(data_date):
from scenarios import scenarios, scenario_dates
from params import (
num_forecast_days,
alpha_start_date,
delta_start_date,
omicron_start_date,
truncation_days,
start_date,
sim_start_date,
third_start_date,
mob_samples,
)
data_date = pd.to_datetime(data_date)
# Define inputs
sim_start_date = pd.to_datetime(sim_start_date)
# Add 3 days buffer to mobility forecast
num_forecast_days = num_forecast_days + 3
# data_date = pd.to_datetime('2022-01-25')
print("============")
print("Generating forecasts using data from", data_date)
print("============")
# convert third start date to the correct format
third_start_date = pd.to_datetime(third_start_date)
third_end_date = data_date - timedelta(truncation_days)
# a different end date to deal with issues in fitting
third_end_date_diff = data_date - timedelta(18 + 7 + 7)
third_states = sorted(["NSW", "VIC", "ACT", "QLD", "SA", "TAS", "NT", "WA"])
# third_states = sorted(['NSW', 'VIC', 'ACT', 'QLD', 'SA', 'NT'])
# choose dates for each state for third wave
# NOTE: These need to be in date sorted order
third_date_range = {
"ACT": pd.date_range(start="2021-08-15", end=third_end_date).values,
"NSW": pd.date_range(start="2021-06-25", end=third_end_date).values,
"NT": pd.date_range(start="2021-12-20", end=third_end_date).values,
"QLD": pd.date_range(start="2021-07-30", end=third_end_date).values,
"SA": pd.date_range(start="2021-12-10", end=third_end_date).values,
"TAS": pd.date_range(start="2021-12-20", end=third_end_date).values,
"VIC": pd.date_range(start="2021-07-10", end=third_end_date).values,
"WA": pd.date_range(start="2022-01-01", end=third_end_date).values,
}
# Get Google Data - Don't use the smoothed data?
df_google_all = read_in_google(Aus_only=True, moving=True, local=True)
third_end_date = pd.to_datetime(data_date) - pd.Timedelta(days=truncation_days)
results_dir = (
"results/"
+ data_date.strftime("%Y-%m-%d")
+ "/"
)
# Load in vaccination data by state and date which should have the same date as the
# NNDSS/linelist data use the inferred VE
vaccination_by_state_delta = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_delta = vaccination_by_state_delta[["state", "date", "effect"]]
vaccination_by_state_delta = vaccination_by_state_delta.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_delta_array = vaccination_by_state_delta.to_numpy()
vaccination_by_state_omicron = pd.read_csv(
results_dir + "adjusted_vaccine_ts_omicron" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
vaccination_by_state_omicron = vaccination_by_state_omicron[["state", "date", "effect"]]
vaccination_by_state_omicron = vaccination_by_state_omicron.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# Convert to simple array for indexing
vaccination_by_state_omicron_array = vaccination_by_state_omicron.to_numpy()
# Get survey data
surveys = pd.DataFrame()
path = "data/md/Barometer wave*.csv"
for file in glob.glob(path):
surveys = surveys.append(pd.read_csv(file, parse_dates=["date"]))
surveys = surveys.sort_values(by="date")
print("Latest microdistancing survey is {}".format(surveys.date.values[-1]))
surveys.loc[surveys.state != "ACT", "state"] = (
surveys.loc[surveys.state != "ACT", "state"]
.map(states_initials)
.fillna(surveys.loc[surveys.state != "ACT", "state"])
)
surveys["proportion"] = surveys["count"] / surveys.respondents
surveys.date = pd.to_datetime(surveys.date)
always = surveys.loc[surveys.response == "Always"].set_index(["state", "date"])
always = always.unstack(["state"])
# fill in date range
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
always = always.reindex(idx, fill_value=np.nan)
always.index.name = "date"
always = always.fillna(method="bfill")
always = always.stack(["state"])
# Zero out before first survey 20th March
always = always.reset_index().set_index("date")
always.loc[:"2020-03-20", "count"] = 0
always.loc[:"2020-03-20", "respondents"] = 0
always.loc[:"2020-03-20", "proportion"] = 0
always = always.reset_index().set_index(["state", "date"])
survey_X = pd.pivot_table(
data=always, index="date", columns="state", values="proportion"
)
prop_all = survey_X
## read in and process mask wearing data
mask_wearing = pd.DataFrame()
path = "data/face_coverings/face_covering_*_.csv"
for file in glob.glob(path):
mask_wearing = mask_wearing.append(pd.read_csv(file, parse_dates=["date"]))
mask_wearing = mask_wearing.sort_values(by="date")
print("Latest mask wearing survey is {}".format(mask_wearing.date.values[-1]))
# mask_wearing['state'] = mask_wearing['state'].map(states_initials).fillna(mask_wearing['state'])
mask_wearing.loc[mask_wearing.state != "ACT", "state"] = (
mask_wearing.loc[mask_wearing.state != "ACT", "state"]
.map(states_initials)
.fillna(mask_wearing.loc[mask_wearing.state != "ACT", "state"])
)
mask_wearing["proportion"] = mask_wearing["count"] / mask_wearing.respondents
mask_wearing.date = pd.to_datetime(mask_wearing.date)
mask_wearing_always = mask_wearing.loc[
mask_wearing.face_covering == "Always"
].set_index(["state", "date"])
mask_wearing_always = mask_wearing_always.unstack(["state"])
idx = pd.date_range("2020-03-01", pd.to_datetime("today"))
mask_wearing_always = mask_wearing_always.reindex(idx, fill_value=np.nan)
mask_wearing_always.index.name = "date"
# fill back to earlier and between weeks.
# Assume survey on day x applies for all days up to x - 6
mask_wearing_always = mask_wearing_always.fillna(method="bfill")
mask_wearing_always = mask_wearing_always.stack(["state"])
# Zero out before first survey 20th March
mask_wearing_always = mask_wearing_always.reset_index().set_index("date")
mask_wearing_always.loc[:"2020-03-20", "count"] = 0
mask_wearing_always.loc[:"2020-03-20", "respondents"] = 0
mask_wearing_always.loc[:"2020-03-20", "proportion"] = 0
mask_wearing_X = pd.pivot_table(
data=mask_wearing_always, index="date", columns="state", values="proportion"
)
mask_wearing_all = mask_wearing_X
# Get posterior
df_samples = read_in_posterior(
date=data_date.strftime("%Y-%m-%d"),
)
states = sorted(["NSW", "QLD", "SA", "VIC", "TAS", "WA", "ACT", "NT"])
plot_states = states.copy()
one_month = data_date + timedelta(days=num_forecast_days)
days_from_March = (one_month - pd.to_datetime(start_date)).days
# filter out future info
prop = prop_all.loc[:data_date]
masks = mask_wearing_all.loc[:data_date]
df_google = df_google_all.loc[df_google_all.date <= data_date]
# use this trick of saving the google data and then reloading it to kill
# the date time values
df_google.to_csv("results/test_google_data.csv")
df_google = pd.read_csv("results/test_google_data.csv")
# remove the temporary file
# os.remove("results/test_google_data.csv")
# Simple interpolation for missing vlaues in Google data
df_google = df_google.interpolate(method="linear", axis=0)
df_google.date = pd.to_datetime(df_google.date)
# forecast time parameters
today = data_date.strftime("%Y-%m-%d")
# add days to forecast if we are missing data
if df_google.date.values[-1] < data_date:
n_forecast = num_forecast_days + (data_date - df_google.date.values[-1]).days
else:
n_forecast = num_forecast_days
training_start_date = datetime(2020, 3, 1, 0, 0)
print(
"Forecast ends at {} days after 1st March".format(
(pd.to_datetime(today) - pd.to_datetime(training_start_date)).days
+ num_forecast_days
)
)
print(
"Final date is {}".format(pd.to_datetime(today) + timedelta(days=num_forecast_days))
)
df_google = df_google.loc[df_google.date >= training_start_date]
outdata = {"date": [], "type": [], "state": [], "mean": [], "std": []}
predictors = mov_values.copy()
# predictors.remove("residential_7days")
# Setup Figures
axes = []
figs = []
for var in predictors:
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
# fig.suptitle(var)
figs.append(fig)
# extra fig for microdistancing
var = "Proportion people always microdistancing"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# # extra fig for mask wearing
var = "Proportion people always wearing masks"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
var = "Reduction in Reff due to vaccination"
fig, ax_states = plt.subplots(figsize=(7, 8), nrows=4, ncols=2, sharex=True)
axes.append(ax_states)
figs.append(fig)
# Forecasting Params
n_training = 21 # Period to examine trend
n_baseline = 150 # Period to create baseline
n_training_vaccination = 30 # period to create trend for vaccination
# since this can be useful, predictor ordering is:
# [
# 'retail_and_recreation_7days',
# 'grocery_and_pharmacy_7days',
# 'parks_7days',
# 'transit_stations_7days',
# 'workplaces_7days'
# ]
# Loop through states and run forecasting.
print("============")
print("Forecasting macro, micro and vaccination")
print("============")
state_Rmed = {}
state_sims = {}
for i, state in enumerate(states):
rownum = int(i / 2)
colnum = np.mod(i, 2)
rows = df_google.loc[df_google.state == state].shape[0]
# Rmed currently a list, needs to be a matrix
Rmed_array = np.zeros(shape=(rows, len(predictors), mob_samples))
for j, var in enumerate(predictors):
for n in range(mob_samples):
# historically we want a little more noise. In the actual forecasting of trends
# we don't want this to be quite that prominent.
Rmed_array[:, j, n] = df_google[df_google["state"] == state][
var
].values.T + np.random.normal(
loc=0, scale=df_google[df_google["state"] == state][var + "_std"]
)
dates = df_google[df_google["state"] == state]["date"]
# cap min and max at historical or (-50,0)
# 1 by predictors by mob_samples size
minRmed_array = np.minimum(-50, np.amin(Rmed_array, axis=0))
maxRmed_array = np.maximum(10, np.amax(Rmed_array, axis=0))
# days by predictors by samples
sims = np.zeros(shape=(n_forecast, len(predictors), mob_samples))
for n in range(mob_samples): # Loop through simulations
Rmed = Rmed_array[:, :, n]
minRmed = minRmed_array[:, n]
maxRmed = maxRmed_array[:, n]
if maxRmed[1] < 20:
maxRmed[1] = 50
R_baseline_mean = np.mean(Rmed[-n_baseline:, :], axis=0)
if state not in {"WA"}:
R_baseline_mean[-1] = 0
R_diffs = np.diff(Rmed[-n_training:, :], axis=0)
mu = np.mean(R_diffs, axis=0)
cov = np.cov(R_diffs, rowvar=False) # columns are vars, rows are obs
# Forecast mobility forward sequentially by day.
# current = np.mean(Rmed[-9:-2, :], axis=0) # Start from last valid days
# current = np.mean(Rmed[-1, :], axis=0) # Start from last valid days
current = Rmed[-1, :] # Start from last valid days
for i in range(n_forecast):
# ## SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast - i) / (n_forecast)
# Generate a single forward realisation of trend
trend_force = np.random.multivariate_normal(mu, cov)
# Generate a single forward realisation of baseline regression
# regression to baseline force stronger in standard forecasting
regression_to_baseline_force = np.random.multivariate_normal(
0.05 * (R_baseline_mean - current), cov
)
new_forcast_points = (
current + p_force * trend_force + (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] != "":
# Make baseline cov for generating points
cov_baseline = np.cov(Rmed[-42:-28, :], rowvar=False)
mu_current = Rmed[-1, :]
mu_victoria = np.array(
[
-55.35057887,
-22.80891056,
-46.59531636,
-75.99942378,
-44.71119293,
]
)
mu_baseline = np.mean(Rmed[-42:-28, :], axis=0)
# mu_baseline = 0*np.mean(Rmed[-42:-28, :], axis=0)
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + (n_forecast - 42)
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# take a continuous median to account for noise in recent observations (such as sunny days)
# mu_current = np.mean(Rmed[-7:, :], axis=0)
# cov_baseline = np.cov(Rmed[-28:, :], rowvar=False)
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
elif scenarios[state] == "no_reversion_continuous_lockdown":
# add the new scenario here
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
# No Lockdown
elif scenarios[state] == "full_reversion":
# a full reversion scenario changes the social mobility and microdistancing
# behaviours at the scenario change date and then applies a return to baseline force
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
R_baseline_0 = mu_baseline
# set adjusted baselines by eyeline for now, need to get this automated
# R_baseline_0[1] = 10 # baseline of +10% for Grocery based on other jurisdictions
# # apply specific baselines to the jurisdictions progressing towards normal restrictions
# if state == 'NSW':
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'ACT':
# R_baseline_0[1] = 20 # baseline of +20% for Grocery based on other jurisdictions
# R_baseline_0[3] = -25 # baseline of -25% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# elif state == 'VIC':
# R_baseline_0[0] = -15 # baseline of -15% for R&R based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[3] = -30 # baseline of -30% for Transit based on 2021-April to 2021-July (pre-third-wave lockdowns)
# R_baseline_0[4] = -15 # baseline of -15% for workplaces based on 2021-April to 2021-July (pre-third-wave lockdowns)
# the force we trend towards the baseline above with
p_force = (n_forecast - i) / (n_forecast)
trend_force = np.random.multivariate_normal(
mu, cov
) # Generate a single forward realisation of trend
# baseline scalar is smaller for this as we want slow returns
adjusted_baseline_drift_mean = R_baseline_0 - current
# we purposely scale the transit measure so that we increase a little more quickly
# tmp = 0.05 * adjusted_baseline_drift_mean[3]
adjusted_baseline_drift_mean *= 0.005
# adjusted_baseline_drift_mean[3] = tmp
regression_to_baseline_force = np.random.multivariate_normal(
adjusted_baseline_drift_mean, cov
) # Generate a single forward realisation of baseline regression
new_forcast_points = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Find overall simulation step
# new_forcast_points = current + regression_to_baseline_force # Find overall simulation step
# Apply minimum and maximum
new_forcast_points = np.maximum(minRmed, new_forcast_points)
new_forcast_points = np.minimum(maxRmed, new_forcast_points)
current = new_forcast_points
elif scenarios[state] == "immediately_baseline":
# this scenario is used to return instantly to the baseline levels
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
# baseline is within lockdown period so take a new baseline of 0's and trend towards this
R_baseline_0 = np.zeros_like(R_baseline_mean)
# jump immediately to baseline
new_forcast_points = np.random.multivariate_normal(
R_baseline_0, cov_baseline
)
# Temporary Lockdown
elif scenarios[state] == "half_reversion":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
(mu_current + mu_baseline) / 2, cov_baseline
)
# Stage 4
elif scenarios[state] == "stage4":
if i < scenario_change_point:
new_forcast_points = np.random.multivariate_normal(
mu_current, cov_baseline
)
else:
new_forcast_points = np.random.multivariate_normal(
mu_victoria, cov_baseline
)
# Set this day in this simulation to the forecast realisation
sims[i, :, n] = new_forcast_points
dd = [dates.tolist()[-1] + timedelta(days=x) for x in range(1, n_forecast + 1)]
sims_med = np.median(sims, axis=2) # N by predictors
sims_q25 = np.percentile(sims, 25, axis=2)
sims_q75 = np.percentile(sims, 75, axis=2)
# forecast mircodistancing
# Get a baseline value of microdistancing
mu_overall = np.mean(prop[state].values[-n_baseline:])
md_diffs = np.diff(prop[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_md = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(prop[state].index.values[-1])
).days
# Set all values to current value.
current = [prop[state].values[-1]] * mob_samples
new_md_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_md):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (n_forecast + extra_days_md)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.05 * (mu_overall - current), std_diffs
)
current = (
current
+ p_force * trend_force
+ (1 - p_force) * regression_to_baseline_force
) # Balance forces
# current = current+p_force*trend_force # Balance forces
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(prop[state].values[-42:-28])
mu_baseline = np.mean(prop[state].values[-42:-28], axis=0)
mu_current = prop[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_md
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(prop[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# Proportion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_md - i) / (
n_forecast + extra_days_md
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(prop[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_md_forecast.append(current)
md_sims = np.vstack(new_md_forecast) # Put forecast days together
md_sims = np.minimum(1, md_sims)
md_sims = np.maximum(0, md_sims)
dd_md = [
prop[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_md + 1)
]
## currently not forecasting masks — may return in the future but will need to assess.
# forecast mask wearing compliance
# Get a baseline value of microdistancing
mu_overall = np.mean(masks[state].values[-n_baseline:])
md_diffs = np.diff(masks[state].values[-n_training:])
mu_diffs = np.mean(md_diffs)
std_diffs = np.std(md_diffs)
extra_days_masks = (
pd.to_datetime(df_google.date.values[-1])
- pd.to_datetime(masks[state].index.values[-1])
).days
# Set all values to current value.
current = [masks[state].values[-1]] * mob_samples
new_masks_forecast = []
# Forecast mobility forward sequentially by day.
for i in range(n_forecast + extra_days_masks):
# SCENARIO MODELLING
# This code chunk will allow you manually set the distancing params for a state to allow for modelling.
if scenarios[state] == "":
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_diffs, size=mob_samples)
# Generate realisations that draw closer to baseline
# regression_to_baseline_force = np.random.normal(0.05*(mu_overall - current), std_diffs)
# current = current + p_force*trend_force + (1-p_force)*regression_to_baseline_force # Balance forces
current = current + trend_force
elif scenarios[state] != "":
current = np.array(current)
# Make baseline cov for generating points
std_baseline = np.std(masks[state].values[-42:-28])
mu_baseline = np.mean(masks[state].values[-42:-28], axis=0)
mu_current = masks[state].values[-1]
if scenario_dates[state] != "":
scenario_change_point = (
pd.to_datetime(scenario_dates[state]) - data_date
).days + extra_days_masks
# Constant Lockdown
if (
scenarios[state] == "no_reversion"
or scenarios[state] == "school_opening_2022"
):
# use only more recent data to forecast under a no-reversion scenario
# std_lockdown = np.std(masks[state].values[-24:-4])
# current = np.random.normal(mu_current, std_lockdown)
current = np.random.normal(mu_current, std_baseline)
# No Lockdown
elif scenarios[state] == "full_reversion":
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# masksortion of trend_force to regression_to_baseline_force
p_force = (n_forecast + extra_days_masks - i) / (
n_forecast + extra_days_masks
)
# take a mean of the differences over the last 2 weeks
mu_diffs = np.mean(np.diff(masks[state].values[-14:]))
# Generate step realisations in training trend direction
trend_force = np.random.normal(mu_diffs, std_baseline)
# Generate realisations that draw closer to baseline
regression_to_baseline_force = np.random.normal(
0.005 * (mu_baseline_0 - current), std_baseline
)
current = current + regression_to_baseline_force # Balance forces
elif scenarios[state] == "immediately_baseline":
# this scenario is an immediate return to baseline values
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
mu_baseline_0 = 0.2
# jump immediately to baseline
current = np.random.normal(mu_baseline_0, std_baseline)
# Temporary Lockdown
elif scenarios[state] == "half_reversion": # No Lockdown
if i < scenario_change_point:
current = np.random.normal(mu_current, std_baseline)
else:
# Revert to values halfway between the before and after
current = np.random.normal(
(mu_current + mu_baseline) / 2, std_baseline
)
new_masks_forecast.append(current)
masks_sims = np.vstack(new_masks_forecast) # Put forecast days together
masks_sims = np.minimum(1, masks_sims)
masks_sims = np.maximum(0, masks_sims)
dd_masks = [
masks[state].index[-1] + timedelta(days=x)
for x in range(1, n_forecast + extra_days_masks + 1)
]
# Forecasting vaccine effect
# if state == "WA":
# last_fit_date = pd.to_datetime(third_end_date)
# else:
last_fit_date = pd.to_datetime(third_date_range[state][-1])
extra_days_vacc = (pd.to_datetime(df_google.date.values[-1]) - last_fit_date).days
total_forecasting_days = n_forecast + extra_days_vacc
# get the VE on the last day
mean_delta = vaccination_by_state_delta.loc[state][last_fit_date + timedelta(1)]
mean_omicron = vaccination_by_state_omicron.loc[state][last_fit_date + timedelta(1)]
current = np.zeros_like(mob_samples)
new_delta = []
new_omicron = []
# variance on the vaccine forecasts is equivalent to what we use in the fitting
var_vax = 0.00005
a_vax = np.zeros_like(mob_samples)
b_vax = np.zeros_like(mob_samples)
for d in pd.date_range(
last_fit_date + timedelta(1),
pd.to_datetime(today) + timedelta(days=num_forecast_days),
):
mean_delta = vaccination_by_state_delta.loc[state][d]
a_vax = mean_delta * (mean_delta * (1 - mean_delta) / var_vax - 1)
b_vax = (1 - mean_delta) * (mean_delta * (1 - mean_delta) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_delta.append(current.tolist())
mean_omicron = vaccination_by_state_omicron.loc[state][d]
a_vax = mean_omicron * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
b_vax = (1 - mean_omicron) * (mean_omicron * (1 - mean_omicron) / var_vax - 1)
current = np.random.beta(a_vax, b_vax, mob_samples)
new_omicron.append(current.tolist())
vacc_sims_delta = np.vstack(new_delta)
vacc_sims_omicron = np.vstack(new_omicron)
dd_vacc = [
last_fit_date + timedelta(days=x)
for x in range(1, n_forecast + extra_days_vacc + 1)
]
for j, var in enumerate(
predictors
+ ["md_prop"]
+ ["masks_prop"]
+ ["vaccination_delta"]
+ ["vaccination_omicron"]
):
# Record data
axs = axes[j]
if (state == "AUS") and (var == "md_prop"):
continue
if var == "md_prop":
outdata["type"].extend([var] * len(dd_md))
outdata["state"].extend([state] * len(dd_md))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_md])
outdata["mean"].extend(np.mean(md_sims, axis=1))
outdata["std"].extend(np.std(md_sims, axis=1))
elif var == "masks_prop":
outdata["type"].extend([var] * len(dd_masks))
outdata["state"].extend([state] * len(dd_masks))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_masks])
outdata["mean"].extend(np.mean(masks_sims, axis=1))
outdata["std"].extend(np.std(masks_sims, axis=1))
elif var == "vaccination_delta":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_delta, axis=1))
outdata["std"].extend(np.std(vacc_sims_delta, axis=1))
elif var == "vaccination_omicron":
outdata["type"].extend([var] * len(dd_vacc))
outdata["state"].extend([state] * len(dd_vacc))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd_vacc])
outdata["mean"].extend(np.mean(vacc_sims_omicron, axis=1))
outdata["std"].extend(np.std(vacc_sims_omicron, axis=1))
else:
outdata["type"].extend([var] * len(dd))
outdata["state"].extend([state] * len(dd))
outdata["date"].extend([d.strftime("%Y-%m-%d") for d in dd])
outdata["mean"].extend(np.mean(sims[:, j, :], axis=1))
outdata["std"].extend(np.std(sims[:, j, :], axis=1))
if state in plot_states:
if var == "md_prop":
# md plot
axs[rownum, colnum].plot(prop[state].index, prop[state].values, lw=1)
axs[rownum, colnum].plot(dd_md, np.median(md_sims, axis=1), "k", lw=1)
axs[rownum, colnum].fill_between(
dd_md,
np.quantile(md_sims, 0.25, axis=1),
np.quantile(md_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "masks_prop":
# masks plot
axs[rownum, colnum].plot(masks[state].index, masks[state].values, lw=1)
axs[rownum, colnum].plot(
dd_masks, np.median(masks_sims, axis=1), "k", lw=1
)
axs[rownum, colnum].fill_between(
dd_masks,
np.quantile(masks_sims, 0.25, axis=1),
np.quantile(masks_sims, 0.75, axis=1),
color="k",
alpha=0.1,
)
elif var == "vaccination_delta":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].index,
vaccination_by_state_delta.loc[
state, ~vaccination_by_state_delta.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_delta, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_delta, 0.25, axis=1),
np.quantile(vacc_sims_delta, 0.75, axis=1),
color="C1",
alpha=0.1,
)
elif var == "vaccination_omicron":
# vaccination plot
axs[rownum, colnum].plot(
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].index,
vaccination_by_state_omicron.loc[
state, ~vaccination_by_state_omicron.loc[state].isna()
].values,
lw=1,
)
axs[rownum, colnum].plot(
dd_vacc, np.median(vacc_sims_omicron, axis=1), color="C1", lw=1
)
axs[rownum, colnum].fill_between(
dd_vacc,
np.quantile(vacc_sims_omicron, 0.25, axis=1),
np.quantile(vacc_sims_omicron, 0.75, axis=1),
color="C1",
alpha=0.1,
)
else:
# all other predictors
axs[rownum, colnum].plot(
dates, df_google[df_google["state"] == state][var].values, lw=1
)
axs[rownum, colnum].fill_between(
dates,
np.percentile(Rmed_array[:, j, :], 25, axis=1),
np.percentile(Rmed_array[:, j, :], 75, axis=1),
alpha=0.5,
)
axs[rownum, colnum].plot(dd, sims_med[:, j], color="C1", lw=1)
axs[rownum, colnum].fill_between(
dd, sims_q25[:, j], sims_q75[:, j], color="C1", alpha=0.1
)
# axs[rownum,colnum].axvline(dd[-num_forecast_days], ls = '--', color = 'black', lw=1) # plotting a vertical line at the end of the data date
# axs[rownum,colnum].axvline(dd[-(num_forecast_days+truncation_days)], ls = '-.', color='grey', lw=1) # plotting a vertical line at the forecast date
axs[rownum, colnum].set_title(state)
# plotting horizontal line at 1
axs[rownum, colnum].axhline(1, ls="--", c="k", lw=1)
axs[rownum, colnum].set_title(state)
axs[rownum, colnum].tick_params("x", rotation=90)
axs[rownum, colnum].tick_params("both", labelsize=8)
# plot the start date of the data and indicators of the data we are actually fitting to (in grey)
axs[rownum, colnum].axvline(data_date, ls="-.", color="black", lw=1)
if j < len(predictors):
axs[rownum, colnum].set_ylabel(
predictors[j].replace("_", " ")[:-5], fontsize=7
)
elif var == "md_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n micro-distancing", fontsize=7
)
elif var == "masks_prop":
axs[rownum, colnum].set_ylabel(
"Proportion of respondents\n wearing masks", fontsize=7
)
elif var == "vaccination_delta" or var == "vaccination_omicron":
axs[rownum, colnum].set_ylabel(
"Reduction in TP \n from vaccination", fontsize=7
)
# historically we want to store the higher variance mobilities
state_Rmed[state] = Rmed_array
state_sims[state] = sims
os.makedirs(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts",
exist_ok=True,
)
for i, fig in enumerate(figs):
fig.text(0.5, 0.02, "Date", ha="center", va="center", fontsize=15)
if i < len(predictors): # this plots the google mobility forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/"
+ str(predictors[i])
+ ".png",
dpi=400,
)
elif i == len(predictors): # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/micro_dist.png",
dpi=400,
)
elif i == len(predictors) + 1: # this plots the microdistancing forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing.png",
dpi=400,
)
elif i == len(predictors) + 2: # finally this plots the delta VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/delta_vaccination.png",
dpi=400,
)
else: # finally this plots the omicron VE forecasts
fig.tight_layout()
fig.savefig(
"figs/mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/omicron_vaccination.png",
dpi=400,
)
df_out = pd.DataFrame.from_dict(outdata)
df_md = df_out.loc[df_out.type == "md_prop"]
df_masks = df_out.loc[df_out.type == "masks_prop"]
df_out = df_out.loc[df_out.type != "vaccination_delta"]
df_out = df_out.loc[df_out.type != "vaccination_omicron"]
df_out = df_out.loc[df_out.type != "md_prop"]
df_out = df_out.loc[df_out.type != "masks_prop"]
df_forecast = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["mean"]
)
df_std = pd.pivot_table(
df_out, columns=["type"], index=["date", "state"], values=["std"]
)
df_forecast_md = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_md_std = pd.pivot_table(
df_md, columns=["state"], index=["date"], values=["std"]
)
df_forecast_masks = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["mean"]
)
df_forecast_masks_std = pd.pivot_table(
df_masks, columns=["state"], index=["date"], values=["std"]
)
# align with google order in columns
df_forecast = df_forecast.reindex([("mean", val) for val in predictors], axis=1)
df_std = df_std.reindex([("std", val) for val in predictors], axis=1)
df_forecast.columns = predictors # remove the tuple name of columns
df_std.columns = predictors
df_forecast = df_forecast.reset_index()
df_std = df_std.reset_index()
df_forecast.date = pd.to_datetime(df_forecast.date)
df_std.date = pd.to_datetime(df_std.date)
df_forecast_md = df_forecast_md.reindex([("mean", state) for state in states], axis=1)
df_forecast_md_std = df_forecast_md_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_md.columns = states
df_forecast_md_std.columns = states
df_forecast_md = df_forecast_md.reset_index()
df_forecast_md_std = df_forecast_md_std.reset_index()
df_forecast_md.date = pd.to_datetime(df_forecast_md.date)
df_forecast_md_std.date = pd.to_datetime(df_forecast_md_std.date)
df_forecast_masks = df_forecast_masks.reindex(
[("mean", state) for state in states], axis=1
)
df_forecast_masks_std = df_forecast_masks_std.reindex(
[("std", state) for state in states], axis=1
)
df_forecast_masks.columns = states
df_forecast_masks_std.columns = states
df_forecast_masks = df_forecast_masks.reset_index()
df_forecast_masks_std = df_forecast_masks_std.reset_index()
df_forecast_masks.date = pd.to_datetime(df_forecast_masks.date)
df_forecast_masks_std.date = pd.to_datetime(df_forecast_masks_std.date)
df_R = df_google[["date", "state"] + mov_values + [val + "_std" for val in mov_values]]
df_R = pd.concat([df_R, df_forecast], ignore_index=True, sort=False)
df_R["policy"] = (df_R.date >= "2020-03-20").astype("int8")
df_md = pd.concat([prop, df_forecast_md.set_index("date")])
df_masks = pd.concat([masks, df_forecast_masks.set_index("date")])
# now we read in the ve time series and create an adjusted timeseries from March 1st
# that includes no effect prior
vaccination_by_state = pd.read_csv(
results_dir + "adjusted_vaccine_ts_delta" + data_date.strftime("%Y-%m-%d") + ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_delta = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, vaccination_by_state.columns[0] - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_delta[state] = pd.concat(
[before_vacc_Reff_reduction.loc[state].T, vaccination_by_state.loc[state].T]
)
# clip off extra days
df_ve_delta = df_ve_delta[
df_ve_delta.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_delta.to_csv(
results_dir
+ "forecasted_vaccination_delta"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
vaccination_by_state = pd.read_csv(
results_dir
+ "adjusted_vaccine_ts_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv",
parse_dates=["date"],
)
# there are a couple NA's early on in the time series but is likely due to slightly different start dates
vaccination_by_state.fillna(1, inplace=True)
vaccination_by_state = vaccination_by_state[["state", "date", "effect"]]
vaccination_by_state = vaccination_by_state.pivot(
index="state", columns="date", values="effect"
) # Convert to matrix form
# initialise a complete dataframe which will be the full VE timeseries plus the forecasted VE
df_ve_omicron = pd.DataFrame()
# loop over states and get the offset compoonenets of the full VE
before_vacc_dates = pd.date_range(
start_date, pd.to_datetime(omicron_start_date) - timedelta(days=1), freq="d"
)
# this is just a df of ones with all the missing dates as indices (8 comes from 8 jurisdictions)
before_vacc_Reff_reduction = pd.DataFrame(np.ones(((1, len(before_vacc_dates)))))
before_vacc_Reff_reduction.columns = before_vacc_dates
for state in states:
before_vacc_Reff_reduction.index = {state}
# merge the vaccine data and the 1's dataframes
df_ve_omicron[state] = pd.concat(
[
before_vacc_Reff_reduction.loc[state].T,
vaccination_by_state.loc[state][
vaccination_by_state.loc[state].index
>= pd.to_datetime(omicron_start_date)
],
]
)
df_ve_omicron = df_ve_omicron[
df_ve_omicron.index <= pd.to_datetime(today) + timedelta(days=num_forecast_days)
]
# save the forecasted vaccination line
df_ve_omicron.to_csv(
results_dir
+ "forecasted_vaccination_omicron"
+ data_date.strftime("%Y-%m-%d")
+ ".csv"
)
print("============")
print("Plotting forecasted estimates")
print("============")
expo_decay = True
theta_md = np.tile(df_samples["theta_md"].values, (df_md["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
prop_sim = df_md[state].values
if expo_decay:
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
else:
md = 2 * expit(-1 * theta_md * prop_sim[:, np.newaxis])
row = i // 2
col = i % 2
ax[row, col].plot(
df_md[state].index, np.median(md, axis=1), label="Microdistancing"
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.25, axis=1),
np.quantile(md, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_md[state].index,
np.quantile(md, 0.05, axis=1),
np.quantile(md, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_md[state].index.values[-n_forecast - extra_days_md]],
minor=True,
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of micro-distancing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/md_factor.png",
dpi=144,
)
theta_masks = np.tile(df_samples["theta_masks"].values, (df_masks["NSW"].shape[0], 1))
fig, ax = plt.subplots(figsize=(12, 9), nrows=4, ncols=2, sharex=True, sharey=True)
for i, state in enumerate(plot_states):
# np.random.normal(df_md[state].values, df_md_std.values)
masks_prop_sim = df_masks[state].values
if expo_decay:
mask_wearing_factor = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
else:
mask_wearing_factor = 2 * expit(
-1 * theta_masks * masks_prop_sim[:, np.newaxis]
)
row = i // 2
col = i % 2
ax[row, col].plot(
df_masks[state].index,
np.median(mask_wearing_factor, axis=1),
label="Microdistancing",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.25, axis=1),
np.quantile(mask_wearing_factor, 0.75, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].fill_between(
df_masks[state].index,
np.quantile(mask_wearing_factor, 0.05, axis=1),
np.quantile(mask_wearing_factor, 0.95, axis=1),
label="Microdistancing",
alpha=0.4,
color="C0",
)
ax[row, col].set_title(state)
ax[row, col].tick_params("x", rotation=45)
ax[row, col].set_xticks(
[df_masks[state].index.values[-n_forecast - extra_days_masks]], minor=True
)
ax[row, col].xaxis.grid(which="minor", linestyle="-.", color="grey", linewidth=1)
fig.text(
0.03,
0.5,
"Multiplicative effect \n of mask-wearing $M_d$",
ha="center",
va="center",
rotation="vertical",
fontsize=20,
)
fig.text(0.5, 0.04, "Date", ha="center", va="center", fontsize=20)
plt.tight_layout(rect=[0.05, 0.04, 1, 1])
fig.savefig(
"figs/"
+ "mobility_forecasts/"
+ data_date.strftime("%Y-%m-%d")
+ "_mobility_forecasts/mask_wearing_factor.png",
dpi=144,
)
df_R = df_R.sort_values("date")
# samples = df_samples.sample(n_samples) # test on sample of 2
# keep all samples
samples = df_samples.iloc[:mob_samples, :]
# for strain in ("Delta", "Omicron"):
# samples = df_samples
# flags for advanced scenario modelling
advanced_scenario_modelling = False
save_for_SA = False
# since this can be useful, predictor ordering is:
# ['retail_and_recreation_7days', 'grocery_and_pharmacy_7days', 'parks_7days', 'transit_stations_7days', 'workplaces_7days']
typ = "R_L"
forecast_type = ["R_L"]
for strain in ("Delta", "Omicron"):
print("============")
print("Calculating", strain, "TP")
print("============")
state_Rs = {
"state": [],
"date": [],
"type": [],
"median": [],
"lower": [],
"upper": [],
"bottom": [],
"top": [],
"mean": [],
"std": [],
}
ban = "2020-03-20"
# VIC and NSW allow gatherings of up to 20 people, other jurisdictions allow for
new_pol = "2020-06-01"
expo_decay = True
# start and end date for the third wave
# Subtract 10 days to avoid right truncation
third_end_date = data_date - pd.Timedelta(days=truncation_days)
typ_state_R = {}
mob_forecast_date = df_forecast.date.min()
state_key = {
"ACT": "1",
"NSW": "2",
"NT": "3",
"QLD": "4",
"SA": "5",
"TAS": "6",
"VIC": "7",
"WA": "8",
}
total_N_p_third_omicron = 0
for v in third_date_range.values():
tmp = sum(v >= pd.to_datetime(omicron_start_date))
# add a plus one for inclusion of end date (the else 0 is due to QLD having no Omicron potential)
total_N_p_third_omicron += tmp if tmp > 0 else 0
state_R = {}
for (kk, state) in enumerate(states):
# sort df_R by date so that rows are dates. rows are dates, columns are predictors
df_state = df_R.loc[df_R.state == state]
dd = df_state.date
post_values = samples[predictors].values.T
prop_sim = df_md[state].values
# grab vaccination data
vacc_ts_delta = df_ve_delta[state]
vacc_ts_omicron = df_ve_omicron[state]
# take right size of md to be N by N
theta_md = np.tile(samples["theta_md"].values, (df_state.shape[0], 1))
theta_masks = np.tile(samples["theta_masks"].values, (df_state.shape[0], 1))
r = samples["r[" + str(kk + 1) + "]"].values
tau = samples["tau[" + str(kk + 1) + "]"].values
m0 = samples["m0[" + str(kk + 1) + "]"].values
m1 = samples["m1[" + str(kk + 1) + "]"].values
# m1 = 1.0
md = ((1 + theta_md).T ** (-1 * prop_sim)).T
masks = ((1 + theta_masks).T ** (-1 * masks_prop_sim)).T
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append(
[0], np.cumsum([v for v in third_days.values()])
)
vax_idx_ranges = {
k: range(third_days_cumulative[i], third_days_cumulative[i + 1])
for (i, k) in enumerate(third_days.keys())
}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples[
["ve_delta[" + str(j + 1) + "]" for j in range(third_days_tot)]
].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index < third_date_range[state][0]]]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[vacc_ts_delta.loc[vacc_ts_delta.index > third_date_range[state][-1]]]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_delta = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# construct a range of dates for omicron which starts at the maximum of the start date for that state or the Omicron start date
third_omicron_date_range = {
k: pd.date_range(
start=max(v[0], pd.to_datetime(omicron_start_date)), end=v[-1]
).values
for (k, v) in third_date_range.items()
}
third_omicron_days = {
k: v.shape[0] for (k, v) in third_omicron_date_range.items()
}
third_omicron_days_cumulative = np.append(
[0], np.cumsum([v for v in third_omicron_days.values()])
)
omicron_ve_idx_ranges = {
k: range(
third_omicron_days_cumulative[i],
third_omicron_days_cumulative[i + 1],
)
for (i, k) in enumerate(third_omicron_days.keys())
}
third_omicron_days_tot = sum(v for v in third_omicron_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = (
samples[
["ve_omicron[" + str(j + 1) + "]" for j in range(third_omicron_days_tot)]
].T
)
vacc_tmp = sampled_vax_effects_all.iloc[omicron_ve_idx_ranges[state], :]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index < third_omicron_date_range[state][0]
]
]
* mob_samples,
axis=1,
).to_numpy()
vacc_ts_data_after = pd.concat(
[
vacc_ts_omicron.loc[
vacc_ts_omicron.index > third_date_range[state][-1]
]
]
* mob_samples,
axis=1,
).to_numpy()
# merge in order
vacc_ts_omicron = np.vstack(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after]
)
# setup some variables for handling the omicron starts
third_states_indices = {
state: index + 1 for (index, state) in enumerate(third_states)
}
omicron_start_day = (
pd.to_datetime(omicron_start_date) - pd.to_datetime(start_date)
).days
days_into_omicron = np.cumsum(
np.append(
[0],
[
(v >= pd.to_datetime(omicron_start_date)).sum()
for v in third_date_range.values()
],
)
)
idx = {}
kk = 0
for k in third_date_range.keys():
idx[k] = range(days_into_omicron[kk], days_into_omicron[kk + 1])
kk += 1
# tile the reduction in vaccination effect for omicron (i.e. VE is (1+r)*VE)
voc_vacc_product = np.zeros_like(vacc_ts_delta)
# calculate the voc effects
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# sample the right R_L
sim_R = samples["R_Li[" + state_key[state] + "]"].values
for n in range(mob_samples):
# add gaussian noise to predictors before forecast
# df_state.loc[
df_state.loc[df_state.date < mob_forecast_date, predictors] = (
state_Rmed[state][:, :, n] / 100
)
# add gaussian noise to predictors after forecast
df_state.loc[df_state.date >= mob_forecast_date, predictors] = (
state_sims[state][:, :, n] / 100
)
## ADVANCED SCENARIO MODELLING - USE ONLY FOR POINT ESTIMATES
# set non-grocery values to 0
if advanced_scenario_modelling:
df_state.loc[:, predictors[0]] = 0
df_state.loc[:, predictors[2]] = 0
df_state.loc[:, predictors[3]] = 0
df_state.loc[:, predictors[4]] = 0
df1 = df_state.loc[df_state.date <= ban]
X1 = df1[predictors] # N by K
md[: X1.shape[0], :] = 1
if n == 0:
# initialise arrays (loggodds)
# N by K times (Nsamples by K )^T = Ndate by Nsamples
logodds = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
logodds = np.append(logodds, X2 @ post_values[:, n], axis=0)
logodds = np.append(logodds, X3 @ post_values[:, n], axis=0)
else:
# concatenate to pre-existing logodds martrix
logodds1 = X1 @ post_values[:, n]
df2 = df_state.loc[
(df_state.date > ban) & (df_state.date < new_pol)
]
df3 = df_state.loc[df_state.date >= new_pol]
X2 = df2[predictors]
X3 = df3[predictors]
prop2 = df_md.loc[ban:new_pol, state].values
prop3 = df_md.loc[new_pol:, state].values
logodds2 = X2 @ post_values[:, n]
logodds3 = X3 @ post_values[:, n]
logodds_sample = np.append(logodds1, logodds2, axis=0)
logodds_sample = np.append(logodds_sample, logodds3, axis=0)
# concatenate to previous
logodds = np.vstack((logodds, logodds_sample))
# create an matrix of mob_samples realisations which is an indicator of the voc (delta right now)
# which will be 1 up until the voc_start_date and then it will be values from the posterior sample
voc_multiplier_alpha = samples["voc_effect_alpha"].values
voc_multiplier_delta = samples["voc_effect_delta"].values
voc_multiplier_omicron = samples["voc_effect_omicron"].values
# number of days into omicron forecast
tt = 0
# loop over days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
tmp_date = pd.to_datetime("2020-03-01")
# get the correct Omicron start date
# omicron_start_date_tmp = np.maximum(
# pd.to_datetime(omicron_start_date),
# pd.to_datetime(third_date_range[state][0]),
# )
omicron_start_date_tmp = | pd.to_datetime(omicron_start_date) | pandas.to_datetime |
"""Spatial statistical tools to estimate uncertainties related to DEMs"""
from __future__ import annotations
import math as m
import multiprocessing as mp
import os
import warnings
from functools import partial
from typing import Callable, Union, Iterable, Optional, Sequence, Any
import itertools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numba import njit
import numpy as np
import pandas as pd
from scipy import integrate
from scipy.optimize import curve_fit
from skimage.draw import disk
from scipy.interpolate import RegularGridInterpolator, LinearNDInterpolator, griddata
from scipy.stats import binned_statistic, binned_statistic_2d, binned_statistic_dd
from geoutils.spatial_tools import subsample_raster, get_array_and_mask
from geoutils.georaster import RasterType, Raster
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import skgstat as skg
from skgstat import models
def nmad(data: np.ndarray, nfact: float = 1.4826) -> float:
"""
Calculate the normalized median absolute deviation (NMAD) of an array.
Default scaling factor is 1.4826 to scale the median absolute deviation (MAD) to the dispersion of a normal
distribution (see https://en.wikipedia.org/wiki/Median_absolute_deviation#Relation_to_standard_deviation, and
e.g. http://dx.doi.org/10.1016/j.isprsjprs.2009.02.003)
:param data: input data
:param nfact: normalization factor for the data
:returns nmad: (normalized) median absolute deviation of data.
"""
if isinstance(data, np.ma.masked_array):
data_arr = get_array_and_mask(data, check_shape=False)[0]
else:
data_arr = np.asarray(data)
return nfact * np.nanmedian(np.abs(data_arr - np.nanmedian(data_arr)))
def interp_nd_binning(df: pd.DataFrame, list_var_names: Union[str,list[str]], statistic : Union[str, Callable[[np.ndarray],float]] = nmad,
min_count: Optional[int] = 100) -> Callable[[tuple[np.ndarray, ...]], np.ndarray]:
"""
Estimate an interpolant function for an N-dimensional binning. Preferably based on the output of nd_binning.
For more details on the input dataframe, and associated list of variable name and statistic, see nd_binning.
If the variable pd.DataSeries corresponds to an interval (as the output of nd_binning), uses the middle of the interval.
Otherwise, uses the variable as such.
Workflow of the function:
Fills the no-data present on the regular N-D binning grid with nearest neighbour from scipy.griddata, then provides an
interpolant function that linearly interpolates/extrapolates using scipy.RegularGridInterpolator.
:param df: dataframe with statistic of binned values according to explanatory variables (preferably output of nd_binning)
:param list_var_names: explanatory variable data series to select from the dataframe (containing interval or float dtype)
:param statistic: statistic to interpolate, stored as a data series in the dataframe
:param min_count: minimum number of samples to be used as a valid statistic (replaced by nodata)
:return: N-dimensional interpolant function
:examples
# Using a dataframe created from scratch
>>> df = pd.DataFrame({"var1": [1, 2, 3, 1, 2, 3, 1, 2, 3], "var2": [1, 1, 1, 2, 2, 2, 3, 3, 3], "statistic": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# In 2 dimensions, the statistic array looks like this
# array([
# [1, 2, 3],
# [4, 5, 6],
# [7, 8, 9]
# ])
>>> fun = interp_nd_binning(df, list_var_names=["var1", "var2"], statistic="statistic", min_count=None)
# Right on point.
>>> fun((2, 2))
array(5.)
# Interpolated linearly inside the 2D frame.
>>> fun((1.5, 1.5))
array(3.)
# Extrapolated linearly outside the 2D frame.
>>> fun((-1, 1))
array(-1.)
"""
# if list of variable input is simply a string
if isinstance(list_var_names,str):
list_var_names = [list_var_names]
# check that the dataframe contains what we need
for var in list_var_names:
if var not in df.columns:
raise ValueError('Variable "'+var+'" does not exist in the provided dataframe.')
statistic_name = statistic if isinstance(statistic,str) else statistic.__name__
if statistic_name not in df.columns:
raise ValueError('Statistic "' + statistic_name + '" does not exist in the provided dataframe.')
if min_count is not None and 'count' not in df.columns:
raise ValueError('Statistic "count" is not in the provided dataframe, necessary to use the min_count argument.')
if df.empty:
raise ValueError('Dataframe is empty.')
df_sub = df.copy()
# if the dataframe is an output of nd_binning, keep only the dimension of interest
if 'nd' in df_sub.columns:
df_sub = df_sub[df_sub.nd == len(list_var_names)]
# compute the middle values instead of bin interval if the variable is a pandas interval type
for var in list_var_names:
check_any_interval = [isinstance(x, pd.Interval) for x in df_sub[var].values]
if any(check_any_interval):
df_sub[var] = pd.IntervalIndex(df_sub[var]).mid.values
# otherwise, leave as is
# check that explanatory variables have valid binning values which coincide along the dataframe
df_sub = df_sub[np.logical_and.reduce([np.isfinite(df_sub[var].values) for var in list_var_names])]
if df_sub.empty:
raise ValueError('Dataframe does not contain a nd binning with the variables corresponding to the list of variables.')
# check that the statistic data series contain valid data
if all(~np.isfinite(df_sub[statistic_name].values)):
raise ValueError('Dataframe does not contain any valid statistic values.')
# remove statistic values calculated with a sample count under the minimum count
if min_count is not None:
df_sub.loc[df_sub['count'] < min_count,statistic_name] = np.nan
values = df_sub[statistic_name].values
ind_valid = np.isfinite(values)
# re-check that the statistic data series contain valid data after filtering with min_count
if all(~ind_valid):
raise ValueError("Dataframe does not contain any valid statistic values after filtering with min_count = "+str(min_count)+".")
# get a list of middle values for the binning coordinates, to define a nd grid
list_bmid = []
shape = []
for var in list_var_names:
bmid = sorted(np.unique(df_sub[var][ind_valid]))
list_bmid.append(bmid)
shape.append(len(bmid))
# griddata first to perform nearest interpolation with NaNs (irregular grid)
# valid values
values = values[ind_valid]
# coordinates of valid values
points_valid = tuple([df_sub[var].values[ind_valid] for var in list_var_names])
# grid coordinates
bmid_grid = np.meshgrid(*list_bmid, indexing='ij')
points_grid = tuple([bmid_grid[i].flatten() for i in range(len(list_var_names))])
# fill grid no data with nearest neighbour
values_grid = griddata(points_valid, values, points_grid, method='nearest')
values_grid = values_grid.reshape(shape)
# RegularGridInterpolator to perform linear interpolation/extrapolation on the grid
# (will extrapolate only outside of boundaries not filled with the nearest of griddata as fill_value = None)
interp_fun = RegularGridInterpolator(tuple(list_bmid), values_grid, method='linear', bounds_error=False, fill_value=None)
return interp_fun
def nd_binning(values: np.ndarray, list_var: Iterable[np.ndarray], list_var_names=Iterable[str], list_var_bins: Optional[Union[int,Iterable[Iterable]]] = None,
statistics: Iterable[Union[str, Callable, None]] = ['count', np.nanmedian ,nmad], list_ranges : Optional[Iterable[Sequence]] = None) \
-> pd.DataFrame:
"""
N-dimensional binning of values according to one or several explanatory variables.
Values input is a (N,) array and variable input is a list of flattened arrays of similar dimensions (N,).
For more details on the format of input variables, see documentation of scipy.stats.binned_statistic_dd.
:param values: values array (N,)
:param list_var: list (L) of explanatory variables array (N,)
:param list_var_names: list (L) of names of the explanatory variables
:param list_var_bins: count, or list (L) of counts or custom bin edges for the explanatory variables; defaults to 10 bins
:param statistics: list (X) of statistics to be computed; defaults to count, median and nmad
:param list_ranges: list (L) of minimum and maximum ranges to bin the explanatory variables; defaults to min/max of the data
:return:
"""
# we separate 1d, 2d and nd binning, because propagating statistics between different dimensional binning is not always feasible
# using scipy because it allows for several dimensional binning, while it's not straightforward in pandas
if list_var_bins is None:
list_var_bins = (10,) * len(list_var_names)
elif isinstance(list_var_bins,int):
list_var_bins = (list_var_bins,) * len(list_var_names)
# flatten the arrays if this has not been done by the user
values = values.ravel()
list_var = [var.ravel() for var in list_var]
# remove no data values
valid_data = np.logical_and.reduce([np.isfinite(values)]+[np.isfinite(var) for var in list_var])
values = values[valid_data]
list_var = [var[valid_data] for var in list_var]
statistics_name = [f if isinstance(f,str) else f.__name__ for f in statistics]
# get binned statistics in 1d: a simple loop is sufficient
list_df_1d = []
for i, var in enumerate(list_var):
df_stats_1d = pd.DataFrame()
# get statistics
for j, statistic in enumerate(statistics):
stats_binned_1d, bedges_1d = binned_statistic(var,values,statistic=statistic,bins=list_var_bins[i],range=list_ranges)[:2]
# save in a dataframe
df_stats_1d[statistics_name[j]] = stats_binned_1d
# we need to get the middle of the bins from the edges, to get the same dimension length
df_stats_1d[list_var_names[i]] = pd.IntervalIndex.from_breaks(bedges_1d,closed='left')
# report number of dimensions used
df_stats_1d['nd'] = 1
list_df_1d.append(df_stats_1d)
# get binned statistics in 2d: all possible 2d combinations
list_df_2d = []
if len(list_var)>1:
combs = list(itertools.combinations(list_var_names, 2))
for i, comb in enumerate(combs):
var1_name, var2_name = comb
# corresponding variables indexes
i1, i2 = list_var_names.index(var1_name), list_var_names.index(var2_name)
df_stats_2d = pd.DataFrame()
for j, statistic in enumerate(statistics):
stats_binned_2d, bedges_var1, bedges_var2 = binned_statistic_2d(list_var[i1],list_var[i2],values,statistic=statistic
,bins=[list_var_bins[i1],list_var_bins[i2]]
,range=list_ranges)[:3]
# get statistics
df_stats_2d[statistics_name[j]] = stats_binned_2d.flatten()
# derive interval indexes and convert bins into 2d indexes
ii1 = pd.IntervalIndex.from_breaks(bedges_var1,closed='left')
ii2 = pd.IntervalIndex.from_breaks(bedges_var2,closed='left')
df_stats_2d[var1_name] = [i1 for i1 in ii1 for i2 in ii2]
df_stats_2d[var2_name] = [i2 for i1 in ii1 for i2 in ii2]
# report number of dimensions used
df_stats_2d['nd'] = 2
list_df_2d.append(df_stats_2d)
# get binned statistics in nd, without redoing the same stats
df_stats_nd = pd.DataFrame()
if len(list_var)>2:
for j, statistic in enumerate(statistics):
stats_binned_2d, list_bedges = binned_statistic_dd(list_var,values,statistic=statistic,bins=list_var_bins,range=list_ranges)[0:2]
df_stats_nd[statistics_name[j]] = stats_binned_2d.flatten()
list_ii = []
# loop through the bin edges and create IntervalIndexes from them (to get both
for bedges in list_bedges:
list_ii.append(pd.IntervalIndex.from_breaks(bedges,closed='left'))
# create nd indexes in nd-array and flatten for each variable
iind = np.meshgrid(*list_ii)
for i, var_name in enumerate(list_var_names):
df_stats_nd[var_name] = iind[i].flatten()
# report number of dimensions used
df_stats_nd['nd'] = len(list_var_names)
# concatenate everything
list_all_dfs = list_df_1d + list_df_2d + [df_stats_nd]
df_concat = pd.concat(list_all_dfs)
# commenting for now: pd.MultiIndex can be hard to use
# df_concat = df_concat.set_index(list_var_names)
return df_concat
def create_circular_mask(shape: Union[int, Sequence[int]], center: Optional[list[float]] = None,
radius: Optional[float] = None) -> np.ndarray:
"""
Create circular mask on a raster, defaults to the center of the array and it's half width
:param shape: shape of array
:param center: center
:param radius: radius
:return:
"""
w, h = shape
if center is None: # use the middle of the image
center = (int(w / 2), int(h / 2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
# skimage disk is not inclusive (correspond to distance_from_center < radius and not <= radius)
mask = np.zeros(shape, dtype=bool)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered in true_divide")
rr, cc = disk(center=center,radius=radius,shape=shape)
mask[rr, cc] = True
# manual solution
# Y, X = np.ogrid[:h, :w]
# dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
# mask = dist_from_center < radius
return mask
def create_ring_mask(shape: Union[int, Sequence[int]], center: Optional[list[float]] = None, in_radius: float = 0.,
out_radius: Optional[float] = None) -> np.ndarray:
"""
Create ring mask on a raster, defaults to the center of the array and a circle mask of half width of the array
:param shape: shape of array
:param center: center
:param in_radius: inside radius
:param out_radius: outside radius
:return:
"""
w, h = shape
if out_radius is None:
center = (int(w / 2), int(h / 2))
out_radius = min(center[0], center[1], w - center[0], h - center[1])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered in true_divide")
mask_inside = create_circular_mask((w,h),center=center,radius=in_radius)
mask_outside = create_circular_mask((w,h),center=center,radius=out_radius)
mask_ring = np.logical_and(~mask_inside,mask_outside)
return mask_ring
def _subsample_wrapper(values: np.ndarray, coords: np.ndarray, shape: tuple[int,int] = None, subsample: int = 10000,
subsample_method: str = 'pdist_ring', inside_radius = None, outside_radius = None,
random_state: None | np.random.RandomState | np.random.Generator | int = None) -> tuple[np.ndarray, np.ndarray]:
"""
(Not used by default)
Wrapper for subsampling pdist methods
"""
nx, ny = shape
# Define state for random subsampling (to fix results during testing)
if random_state is None:
rnd = np.random.default_rng()
elif isinstance(random_state, (np.random.RandomState, np.random.Generator)):
rnd = random_state
else:
rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(random_state)))
# Subsample spatially for disk/ring methods
if subsample_method in ['pdist_disk', 'pdist_ring']:
# Select random center coordinates
center_x = rnd.choice(nx, 1)[0]
center_y = rnd.choice(ny, 1)[0]
if subsample_method == 'pdist_ring':
subindex = create_ring_mask((nx, ny), center=[center_x, center_y], in_radius=inside_radius,
out_radius=outside_radius)
else:
subindex = create_circular_mask((nx, ny), center=[center_x, center_y], radius=inside_radius)
index = subindex.flatten()
values_sp = values[index]
coords_sp = coords[index, :]
else:
values_sp = values
coords_sp = coords
index = subsample_raster(values_sp, subsample=subsample, return_indices=True, random_state=rnd)
values_sub = values_sp[index[0]]
coords_sub = coords_sp[index[0], :]
return values_sub, coords_sub
def _aggregate_pdist_empirical_variogram(values: np.ndarray, coords: np.ndarray, subsample: int, shape: tuple,
subsample_method: str, gsd: float,
pdist_multi_ranges: Optional[list[float]] = None, **kwargs) -> pd.DataFrame:
"""
(Not used by default)
Aggregating subfunction of sample_empirical_variogram for pdist methods.
The pairwise differences are calculated within each subsample.
"""
# If no multi_ranges are provided, define a logical default behaviour with the pixel size and grid size
if subsample_method in ['pdist_disk', 'pdist_ring']:
if pdist_multi_ranges is None:
# Define list of ranges as exponent 2 of the resolution until the maximum range
pdist_multi_ranges = []
# We start at 10 times the ground sampling distance
new_range = gsd * 10
while new_range < kwargs.get('maxlag') / 2:
pdist_multi_ranges.append(new_range)
new_range *= 2
pdist_multi_ranges.append(kwargs.get('maxlag'))
# Define subsampling parameters
list_inside_radius, list_outside_radius = ([] for i in range(2))
binned_ranges = [0] + pdist_multi_ranges
for i in range(len(binned_ranges) - 1):
# Radiuses need to be passed as pixel sizes, dividing by ground sampling distance
outside_radius = binned_ranges[i + 1]/gsd
if subsample_method == 'pdist_ring':
inside_radius = binned_ranges[i]/gsd
else:
inside_radius = None
list_outside_radius.append(outside_radius)
list_inside_radius.append(inside_radius)
else:
# For random point selection, no need for multi-range parameters
pdist_multi_ranges = [kwargs.get('maxlag')]
list_outside_radius = [None]
list_inside_radius = [None]
# Estimate variogram with specific subsampling at multiple ranges
list_df_range = []
for j in range(len(pdist_multi_ranges)):
values_sub, coords_sub = _subsample_wrapper(values, coords, shape = shape, subsample = subsample,
subsample_method = subsample_method,
inside_radius = list_inside_radius[j],
outside_radius = list_outside_radius[j],
random_state= kwargs.get('random_state'))
if len(values_sub) == 0:
continue
df_range = _get_pdist_empirical_variogram(values=values_sub, coords=coords_sub, **kwargs)
# Aggregate runs
list_df_range.append(df_range)
df = pd.concat(list_df_range)
return df
def _get_pdist_empirical_variogram(values: np.ndarray, coords: np.ndarray, **kwargs) -> pd.DataFrame:
"""
Get empirical variogram from skgstat.Variogram object calculating pairwise distances within the sample
:param values: values
:param coords: coordinates
:return: empirical variogram (variance, lags, counts)
"""
# Remove random_state keyword argument that is not used
kwargs.pop('random_state')
# Get arguments of Variogram class init function
vgm_args = skg.Variogram.__init__.__code__.co_varnames[:skg.Variogram.__init__.__code__.co_argcount]
# Check no other argument is left to be passed
remaining_kwargs = kwargs.copy()
for arg in vgm_args:
remaining_kwargs.pop(arg, None)
if len(remaining_kwargs) != 0:
warnings.warn('Keyword arguments: '+','.join(list(remaining_kwargs.keys()))+ ' were not used.')
# Filter corresponding arguments before passing
filtered_kwargs = {k:kwargs[k] for k in vgm_args if k in kwargs}
# Derive variogram with default MetricSpace (equivalent to scipy.pdist)
V = skg.Variogram(coordinates=coords, values=values, normalize=False, fit_method=None, **filtered_kwargs)
# Get bins, empirical variogram values, and bin count
bins, exp = V.get_empirical()
count = V.bin_count
# Write to dataframe
df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_mamm(self):
"""
# unit test for function ld50_bl_mamm (LD50ft-2 for broadcast liquid)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='',
verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_mamm(self):
"""
# unit test for function ld50_bg_mamm (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Granular',
'Broadcast-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_mamm(self):
"""
# unit test for function ld50_rl_mamm (LD50ft-2 for Row/Band/In-furrow liquid mammals)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 0.6119317, 0.0024497], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular',
'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
result = trex_empty.ld50_rl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_mamm(self):
"""
# unit test for function ld50_rg_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([33.9737, 7.192681, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_max(self):
"""
combined unit test for methods eec_diet_max & eec_diet_timeseries;
* this test calls eec_diet_max, which in turn calls eec_diet_timeseries (which produces
concentration timeseries), which in turn calls conc_initial and conc_timestep
* eec_diet_max processes the timeseries and extracts the maximum values
* this test tests both eec_diet_max & eec_diet_timeseries together (ok, so this violates the exact definition
* of 'unittest', get over it)
* the assertion check is that the maximum values from the timeseries match expectations
* this assumes that for the maximums to be 'as expected' then the timeseries are as well
* note: the 1st application day ('day_out') for the 2nd model simulation run is set to 0 here
* to make sure the timeseries processing works when an application occurs on 1st day of year
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([1.734, 145.3409, 0.702], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [0, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'series of app-rates and app_days do not match'
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02])
trex_empty.food_multiplier_init_sg = 15.
trex_empty.foliar_diss_hlife = pd.Series([25., 5., 45.])
result = trex_empty.eec_diet_max(trex_empty.food_multiplier_init_sg)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_dose_bird(self):
"""
unit test for function eec_dose_bird;
internal call to 'eec_diet_max' --> 'eed_diet_timeseries' --> conc_initial' and 'conc_timestep' are included;
internal call to 'fi_bird' included
unit tests of this routine include the following approach:
* this test verifies that the logic & calculations performed within the 'eec_dose_bird' are correctly implemented
* methods called inside of 'eec_dose_bird' are not retested/recalculated
* only the correct passing of variables/values is verified (calculations having been verified in previous unittests)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([7.763288, 2693.2339, 22.20837], dtype='float')
num_app_days = | pd.Series([], dtype='int') | pandas.Series |
""" This file contains a class and methods for Non-REM EEG segments
Notes:
- Analysis should output # of NaNs in the data
TO DO:
- For self.detect_spindles(), move attributes into metadata['analysis_info'] dict
- Optimize self.create_spindfs() method
- Assign NREM attributes to slots on init
- Update docstrings
- !! recalculate ISI for 2-hr blocks
- Update export for spindle_psd_i
"""
import datetime
import glob
#import joblib
import json
import os
import numpy as np
import pandas as pd
import warnings
import xlsxwriter
from mne.time_frequency import psd_array_multitaper
from scipy.signal import butter, sosfiltfilt, sosfreqz
from scipy.optimize import OptimizeWarning, curve_fit
class NREM:
""" General class for nonREM EEG segments """
def __init__(self, fname=None, fpath=None, match=None, in_num=None, epoched=False, batch=False):
""" Initialize NREM object
Parameters
----------
fname: str
filename (if loading a single dataframe)
fpath: str
absolute path to file(s) directory
match: str
string to match within the filename of all files to load (Ex: '_s2_')
in_num: str
IN number, for batch loading
epoched: bool (default: False)
whether data has been epoched (if loading a single dataframe)
batch: bool (default: True)
whether to load all matching files from the fpath directory
"""
if batch:
self.load_batch(fpath, match, in_num)
else:
filepath = os.path.join(fpath, fname)
in_num, start_date, slpstage, cycle = fname.split('_')[:4]
self.metadata = {'file_info':{'in_num': in_num, 'fname': fname, 'path': filepath,
'sleep_stage': slpstage,'cycle': cycle} }
if epoched is True:
self.metadata['file_info']['epoch'] = fname.split('_')[4]
self.load_segment()
def load_segment(self):
""" Load eeg segment and extract sampling frequency. """
data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)
# Check cycle length against 5 minute duration minimum
cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()
self.data = data
diff = data.index.to_series().diff()[1:2]
s_freq = 1000000/diff[0].microseconds
self.metadata['file_info']['start_time'] = str(data.index[0])
self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs}
self.s_freq = s_freq
print('EEG successfully imported.')
def load_batch(self, fpath, match, in_num):
""" Load a batch of EEG segments & reset index from absolute to relative time
TO DO: Throw error if IN doesn't match any files in folder
"""
if in_num == None:
in_num = input('Please specify IN number: ')
if match == None:
match = input('Please specify filename string to match for batch loading (ex. \'_s2_\'): ')
# get a list of all matching files
glob_match = f'{fpath}/*{match}*'
files = glob.glob(glob_match)
# load & concatenate files into a single dataframe
data = pd.concat((pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True, low_memory=False) for file in files)).sort_index()
# extract sampling frequency
s_freq = 1/(data.index[1] - data.index[0]).total_seconds()
# reset the index to continuous time
ind_freq = str(int(1/s_freq*1000000))+'us'
ind_start = '1900-01-01 00:00:00.000'
ind = pd.date_range(start = ind_start, periods=len(data), freq=ind_freq)
data.index = ind
# set metadata & attributes
self.metadata = {'file_info':{'in_num': in_num, 'files': files, 'dir': fpath,
'match_phrase': match},
'analysis_info':{'s_freq': s_freq} }
self.data = data
self.s_freq = s_freq
## Spindle Detection Methods ##
# make attributes
def spindle_attributes(self):
""" create attributes for spindle detection """
try:
self.channels
except AttributeError:
# create if doesn't exist
self.channels = [x[0] for x in self.data.columns]
dfs =['spfiltEEG', 'spRMS', 'spRMSmavg'] # for > speed, don't store spRMS as an attribute
[setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]
self.spThresholds = pd.DataFrame(index=['Mean RMS', 'Low Threshold', 'High Threshold'])
self.spindle_events = {}
self.spindle_rejects = {}
# step 1: make filter
def make_butter_sp(self, wn, order):
""" Make Butterworth bandpass filter [Parameters/Returns]"""
nyquist = self.s_freq/2
wn_arr=np.asarray(wn)
if np.any(wn_arr <=0) or np.any(wn_arr >=1):
wn_arr = wn_arr/nyquist # must remake filter for each pt bc of differences in s_freq
self.sp_sos = butter(order, wn_arr, btype='bandpass', output='sos')
print(f"Zero phase butterworth filter successfully created: order = {order}x{order} bandpass = {wn}")
# step 2: filter channels
def spfilt(self, i):
""" Apply Butterworth bandpass to signal by channel """
# separate NaN and non-NaN values to avoid NaN filter output on cleaned data
data_nan = self.data[i][self.data[i]['Raw'].isna()]
data_notnan = self.data[i][self.data[i]['Raw'].isna() == False]
# filter notNaN data & add column to notNaN df
data_notnan_filt = sosfiltfilt(self.sp_sos, data_notnan.to_numpy(), axis=0)
data_notnan['Filt'] = data_notnan_filt
# merge NaN & filtered notNaN values, sort on index
filt_chan = data_nan['Raw'].append(data_notnan['Filt']).sort_index()
# add channel to main dataframe
self.spfiltEEG[i] = filt_chan
# steps 3-4: calculate RMS & smooth
def rms_smooth(self, i, sp_mw):
""" Calculate moving RMS (rectify) & smooth the EEG """
mw = int(sp_mw*self.s_freq) # convert moving window size from seconds to samples
# convolve for rolling RMS
datsq = np.power(self.spfiltEEG[i], 2)
window = np.ones(mw)/float(mw)
# convolution mode 'valid' will remove edge effects, but also introduce a time shift
# and downstream erors because it changes the length of the rms data
rms = np.sqrt(np.convolve(datsq, window, 'same'))
#spinfilt_RMS = pd.DataFrame(rms, index=self.data.index) --> add this back for > speed
self.spRMS[i] = rms # for > speed, don't store spinfilt_RMS[i] as an attribute
# smooth with moving average
rms_avg = self.spRMS[i].rolling(mw, center=True).mean()
self.spRMSmavg[i] = rms_avg
# step 5: set thresholds
def set_thres(self, i):
""" set spindle detection threshold levels, in terms of multiples of RMS SD """
mean_rms = float(np.mean(self.spRMSmavg[i]))
det_lo = float(mean_rms + self.metadata['spindle_analysis']['sp_loSD']*np.std(self.spRMSmavg[i]))
det_hi = float(mean_rms + self.metadata['spindle_analysis']['sp_hiSD']*np.std(self.spRMSmavg[i]))
self.spThresholds[i] = [mean_rms, det_lo, det_hi]
# step 6: detect spindles
def get_spindles(self, i, min_sep):
# vectorize data for detection looping
lo, hi = self.spThresholds[i]['Low Threshold'], self.spThresholds[i]['High Threshold']
mavg_varr, mavg_iarr = np.asarray(self.spRMSmavg[i]), np.asarray(self.spRMSmavg[i].index)
# initialize spindle event list & set pointer to 0
#self.spindle_events[i] = []
spindle_events = []
x=0
while x < len(self.data):
# if value crosses high threshold, start a fresh spindle
if mavg_varr[x] >= hi:
spindle = []
# count backwards to find previous low threshold crossing
for h in range(x, -1, -1):
# if a nan is encountered before the previous low crossing, break
if np.isnan(mavg_varr[h]):
break
elif mavg_varr[h] >= lo:
spindle.insert(0, mavg_iarr[h]) # add value to the beginning of the spindle
else:
break
# count forwards to find next low threshold crossing
for h in range(x+1, len(self.data), 1):
# if a nan is encountered before the next low crossing, break
if np.isnan(mavg_varr[h]):
break
# if above low threshold, add to current spindle
elif mavg_varr[h] >= lo and x < (len(self.data)-1):
spindle.append(mavg_iarr[h])
# if above low threshold and last value OR if nan, add to current spindle and add spindle to events list
elif (mavg_varr[h] >= lo and x == (len(self.data)-1)) or np.isnan(mavg_varr[h]): ## untested
spindle.append(mavg_iarr[h])
spindle_events.append(spindle)
#self.spindle_events[i].append(spindle)
# otherwise finish spindle & add to spindle events list
elif mavg_varr[h] < lo:
spindle_events.append(spindle)
#self.spindle_events[i].append(spindle)
break
# advance the pointer to the end of the spindle
x = h
# if value doesn't cross high threshold, advance
else:
x += 1
# combine spindles less than min_sep
spindle_events_msep = []
x = 0
while x < len(spindle_events)-1:
# if the following spindle is less than min_sep away
if (spindle_events[x+1][0] - spindle_events[x][-1])/np.timedelta64(1, 's') < min_sep:
# combine the two, append to list, and advance pointer by two
spindle_comb = spindle_events[x] + spindle_events[x+1]
spindle_events_msep.append(spindle_comb)
x += 2
else:
# otherwise, append spindle to list, advance pointer by 1
spindle_events_msep.append(spindle_events[x])
x += 1
self.spindle_events[i] = spindle_events_msep
# step 7: apply rejection criteria
def reject_spins(self, min_chans_r, min_chans_d, duration):
""" Reject spindles that occur over fewer than 3 channels. Apply duration thresholding to
spindles that occur over fewer than X channels.
[chans < min_chans_r = reject; min_chans_r < chans < min_chans_d = apply max/min duration threshold; X < chans = apply max duration threshold]
Parameters
----------
min_chans_r: int
minimum number of channels for spindles to occur accross concurrently to bypass
automatic rejection
min_chans_d: int
minimum number of channels for spindles to occur across concurrently in order to
bypass duration criterion. performs best at 1/4 of total chans
duration: list of float
duration range (seconds) for spindle thresholding
Returns
-------
modified self.spindle_events and self.spindle_rejects attributes
"""
# convert duration from seconds to samples
sduration = [x*self.s_freq for x in duration]
# make boolean mask for spindle presence
spin_bool = pd.DataFrame(index = self.data.index)
for chan in self.spindle_events:
if chan not in ['EOG_L', 'EOG_R', 'EKG']:
spins_flat = [time for spindle in self.spindle_events[chan] for time in spindle]
spin_bool[chan] = np.isin(self.data.index.values, spins_flat)
spin_bool['chans_present'] = spin_bool.sum(axis=1)
# check individual spindles
for chan in self.spindle_events:
self.spindle_rejects[chan] = []
for spin in self.spindle_events[chan]:
# reject if present over less than min_chans_r channels
if not np.any(spin_bool['chans_present'].loc[spin] >= min_chans_r):
self.spindle_rejects[chan].append(spin)
self.spindle_events[chan].remove(spin)
# Apply duration threshold if not present over more than minimum # of channels
elif not np.any(spin_bool['chans_present'].loc[spin] >= min_chans_d):
# apply duration thresholding
if not sduration[0] <= len(spin) <= sduration[1]:
self.spindle_rejects[chan].append(spin)
self.spindle_events[chan].remove(spin)
# Apply max duration threshold to all spindles left (regardless of # of chans)
else:
if len(spin) > sduration[1]:
self.spindle_rejects[chan].append(spin)
self.spindle_events[chan].remove(spin)
# set multiIndex
def spMultiIndex(self):
""" combine dataframes into a multiIndex dataframe"""
# reset column levels
self.spfiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])
self.spRMS.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMS'), len(self.channels))],names=['Channel','datatype'])
self.spRMSmavg.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMSmavg'), len(self.channels))],names=['Channel','datatype'])
# list df vars for index specs
dfs =[self.spfiltEEG, self.spRMS, self.spRMSmavg] # for > speed, don't store spinfilt_RMS as an attribute
calcs = ['Filtered', 'RMS', 'RMSmavg']
lvl0 = np.repeat(self.channels, len(calcs))
lvl1 = calcs*len(self.channels)
# combine & custom sort
self.spindle_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])
def detect_spindles(self, wn=[8, 16], order=4, sp_mw=0.2, loSD=0, hiSD=1.5, min_sep=0.2, duration=[0.5, 3.0], min_chans_r=3, min_chans_d=9):
""" Detect spindles by channel [Params/Returns]
Parameters
----------
min_sep: float (default: 0.1)
minimum separation (in seconds) for spindles to be considered distinct, otherwise combine
Returns
-------
"""
self.metadata['spindle_analysis'] = {'sp_filtwindow': wn, 'sp_filtorder_half': order,
'sp_RMSmw': sp_mw, 'sp_loSD': loSD, 'sp_hiSD': hiSD, 'min_sep': min_sep, 'sp_duration': duration,
'sp_minchans_toskipautoreject': min_chans_r, 'sp_minchans_toskipduration': min_chans_d}
#self.s_freq = self.metadata['analysis_info']['s_freq']
# set attributes
self.spindle_attributes()
# Make filter
self.make_butter_sp(wn, order)
print('Detecting spindles...')
# loop through channels (all channels for plotting ease)
for i in self.channels:
# if i not in ['EOG_L', 'EOG_R', 'EKG']:
#print(f'Detecting spindles on {i}...')
# Filter
self.spfilt(i)
# Calculate RMS & smooth
self.rms_smooth(i, sp_mw)
# Set detection thresholds
self.set_thres(i)
# Detect spindles
self.get_spindles(i, min_sep)
# Apply rejection criteria
print('Pruning spindle detections...')
self.reject_spins(min_chans_r, min_chans_d, duration)
print('Spindle detection complete.')
# combine dataframes
print('Combining dataframes...')
self.spMultiIndex()
print('done.\n')
def create_spindfs(self, zmethod, trough_dtype, buff, buffer_len):
""" Create individual dataframes for individual spindles +/- a timedelta buffer
** NOTE: buffer doesn't have spinso filter incorporated
Parameters
----------
zmethod: str (default: 'trough')
method used to assign 0-center to spindles [options: 'trough', 'middle']. Trough assigns zero-center to
the deepest negative trough. Middle assigns zero center to the midpoint in time.
trough_dtype: str (default: 'spfilt')
Which data to use for picking the most negative trough for centering [options: 'Raw', 'spfilt']
buff: bool (default: False)
calculate spindle dataframes with buffer
buffer_len: int
length in seconds of buffer to calculate around 0-center of spindle
self.spindle_events: dict
dict of timestamps when spindles occur (created from self.detect_spindles())
self.data: pd.DataFrame
df containing raw EEG data
Returns
-------
self.spindles: nested dict of dfs
nested dict with spindle data by channel {channel: {spindle_num:spindle_data}}
self.spindles_wbuffer: nested dict of dfs
nested dict with spindle data w/ timedelta buffer by channel {channel: {spindle_num:spindle_data}}
"""
## create dict of dataframes for spindle analysis
print('Creating individual spindle dataframes...')
self.metadata['spindle_analysis']['zmethod'] = zmethod
self.metadata['spindle_analysis']['trough_datatype'] = trough_dtype
spindles = {}
for chan in self.spindle_events.keys():
spindles[chan] = {}
for i, spin in enumerate(self.spindle_events[chan]):
# create individual df for each spindle
spin_data = self.data[chan]['Raw'].loc[self.spindle_events[chan][i]]
spfilt_data = self.spfiltEEG[chan]['Filtered'].loc[self.spindle_events[chan][i]]
# try:
# spsofilt_data = self.spsofiltEEG[chan]['Filtered'].loc[self.spindle_events[chan][i]]
# # skip spsofilt if not yet calculated (if SO detections haven't been performed)
# except AttributeError:
# pass
# set new index so that each spindle is centered around zero
if zmethod == 'middle':
# this method could use some work
half_length = len(spin)/2
t_id = np.linspace(-half_length, half_length, int(2*half_length//1))
# convert from samples to ms
id_ms = t_id * (1/self.metadata['analysis_info']['s_freq']*1000)
elif zmethod == 'trough' and trough_dtype == 'Raw':
id_ms = (spin_data.index - spin_data.idxmin()).total_seconds()*1000
elif zmethod == 'trough' and trough_dtype == 'spfilt':
id_ms = (spfilt_data.index - spfilt_data.idxmin()).total_seconds()*1000
# create new dataframe
spindles[chan][i] = pd.DataFrame(index=id_ms)
spindles[chan][i].index = [int(x) for x in spindles[chan][i].index]
spindles[chan][i].index.name='id_ms'
spindles[chan][i]['time'] = spin_data.index
spindles[chan][i]['Raw'] = spin_data.values
spindles[chan][i]['spfilt'] = spfilt_data.values
try:
spindle[chan][i]['spsofilt'] = spsofilt_data.values
# skip spsofilt if not yet calculated (if SO detections haven't been performed)
except NameError:
pass
self.spindles = spindles
print('Spindle dataframes created. Spindle data stored in obj.spindles.')
if buff:
# now make buffered dataframes
print(f'Creating spindle dataframes with {buffer_len}s buffer...')
spindles_wbuffer = {}
for chan in self.spindles.keys():
spindles_wbuffer[chan] = {}
for i in self.spindles[chan].keys():
# get +/- buffer length from zero-center of spindle
start = self.spindles[chan][i]['time'].loc[0] - pd.Timedelta(seconds=buffer_len)
end = self.spindles[chan][i]['time'].loc[0] + pd.Timedelta(seconds=buffer_len)
spin_buffer_data = self.data[chan]['Raw'].loc[start:end]
# assign the delta time index
id_ms = (spin_buffer_data.index - self.spindles[chan][i]['time'].loc[0]).total_seconds()*1000
# create new dataframe
spindles_wbuffer[chan][i] = pd.DataFrame(index=id_ms)
spindles_wbuffer[chan][i].index = [int(x) for x in spindles_wbuffer[chan][i].index]
spindles_wbuffer[chan][i].index.name='id_ms'
spindles_wbuffer[chan][i]['time'] = spin_buffer_data.index
spindles_wbuffer[chan][i]['Raw'] = spin_buffer_data.values
self.spindles_wbuffer = spindles_wbuffer
print('Spindle dataframes with buffer stored in obj.spindles_wbuffer.')
def calc_spindle_means(self):
""" Calculate mean, std, and sem at each timedelta from negative spindle peak per channel
Returns
-------
self.spindle_means: nested dict
dictionary of raw and filtered spindle means by channel
format: {'Raw':{channel:pd.DataFrame}}, 'spfilt':{channel:pd.DataFrame}}
"""
print('Aligning spindles...')
# align spindles accoridng to timedelta & combine into single dataframe
spindle_aggregates = {}
datatypes = ['Raw', 'spfilt']
for chan in self.spindles.keys():
# only use channels that have spindles
if self.spindles[chan]:
spindle_aggregates[chan] = {}
for datatype in datatypes:
# set the base df
agg_df = pd.DataFrame(self.spindles[chan][0][datatype])
agg_df = agg_df.rename(columns={datatype:'spin_0'})
rsuffix = list(range(1, len(self.spindles[chan])))
# join on the index for each spindle
agg_df = agg_df.join([self.spindles[chan][x][datatype].rename('spin_'+str(x)) for x in rsuffix], how='outer')
spindle_aggregates[chan][datatype] = agg_df
print('Calculating spindle statistics...')
# create a new multiindex dataframe for calculations
spindle_means = {}
calcs = ['count', 'mean', 'std' ,'sem']
tuples = [(chan, calc) for chan in spindle_aggregates.keys() for calc in calcs]
columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])
for datatype in datatypes:
spindle_means[datatype] = pd.DataFrame(columns=columns)
# fill the dataframe
for chan in spindle_aggregates.keys():
spindle_means[datatype][(chan, 'count')] = spindle_aggregates[chan][datatype].notna().sum(axis=1)
spindle_means[datatype][(chan, 'mean')] = spindle_aggregates[chan][datatype].mean(axis=1)
spindle_means[datatype][(chan, 'std')] = spindle_aggregates[chan][datatype].std(axis=1)
spindle_means[datatype][(chan, 'sem')] = spindle_aggregates[chan][datatype].sem(axis=1)
self.spindle_aggregates = spindle_aggregates
self.spindle_means = spindle_means
print('Done. Spindles aggregated by channel in obj.spindle_aggregates dict. Spindle statisics stored in obj.spindle_means dataframe.\n')
def calc_spindle_buffer_means(self):
""" Calculate mean, std, and sem at each timedelta from negative spindle peak per channel
NOTE: This needs to be updated to include datatype parameter to stay aligned with calc_spin_means
Also fix the join command for speed (see above)
"""
print('Aligning spindles...')
# align spindles accoridng to timedelta & combine into single dataframe
spindle_buffer_aggregates = {}
for chan in self.spindles.keys():
# only use channels that have spindles
if self.spindles_wbuffer[chan]:
# set the base df
agg_df = pd.DataFrame(self.spindles_wbuffer[chan][0]['Raw'])
rsuffix = list(range(1, len(self.spindles_wbuffer[chan])))
# join on the index for each spindle
for x in range(1, len(self.spindles_wbuffer[chan])):
mean_df = agg_df.join(self.spindles_wbuffer[chan][x]['Raw'], how='outer', rsuffix=rsuffix[x-1])
spindle_buffer_aggregates[chan] = mean_df
print('Calculating statistics...')
# create a new multiindex dataframe for calculations
calcs = ['mean', 'std' ,'sem']
tuples = [(chan, calc) for chan in spindle_buffer_aggregates.keys() for calc in calcs]
columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])
spindle_buffer_means = pd.DataFrame(columns=columns)
# fill the dataframe
for chan in spindle_buffer_aggregates.keys():
spindle_buffer_means[(chan, 'mean')] = spindle_buffer_aggregates[chan].mean(axis=1)
spindle_buffer_means[(chan, 'std')] = spindle_buffer_aggregates[chan].std(axis=1)
spindle_buffer_means[(chan, 'sem')] = spindle_buffer_aggregates[chan].sem(axis=1)
self.spindle_buffer_aggregates = spindle_buffer_aggregates
self.spindle_buffer_means = spindle_buffer_means
print('Done. Spindles aggregated by channel in obj.spindle_buffer_aggregates dict. Spindle statisics stored in obj.spindle_buffer_means dataframe.')
def calc_spin_tstats(self, spin_range):
""" calculate time-domain spindle feature statistics
Parameters
----------
spin_range: list of int
spindle frequency range to be used for calculating center frequency
Returns
-------
self.spindle_tstats: pd.DataFrame
MultiIndex dataframe with calculated spindle time statistics
"""
print('Calculating spindle time-domain statistics...')
# create multi-index dataframe
# lvl1 = ['Count', 'Duration', 'Duration', 'Amplitude_raw', 'Amplitude_raw', 'Amplitude_spfilt', 'Amplitude_spfilt', 'Density', 'ISI', 'ISI', 'Power', 'Power']
# lvl2 = ['total', 'mean', 'sd', 'rms', 'sd', 'rms', 'sd', 'spin_per_min', 'mean', 'sd', 'center_freq', 'total_pwr']
lvl1 = ['Count', 'Duration', 'Duration', 'Amplitude_raw', 'Amplitude_raw', 'Amplitude_spfilt', 'Amplitude_spfilt', 'Density', 'ISI', 'ISI']
lvl2 = ['total', 'mean', 'sd', 'rms', 'sd', 'rms', 'sd', 'spin_per_min', 'mean', 'sd']
columns = pd.MultiIndex.from_arrays([lvl1, lvl2])
spindle_stats = pd.DataFrame(columns=columns)
#exclude non-EEG channels
exclude = ['EOG_L', 'EOG_R', 'EKG']
# fill dataframe
for chan in self.spindles:
if chan not in exclude:
# calculate spindle count
count = len(self.spindles[chan])
if count == 0:
spindle_stats.loc[chan] = [count, None, None, None, None, None, None, None]
else:
# calculate spindle duration
durations = np.array([(self.spindles[chan][spin].time.iloc[-1] - self.spindles[chan][spin].time.iloc[0]).total_seconds() for spin in self.spindles[chan]])
duration_mean = durations.mean()
duration_sd = durations.std()
# calculate amplitude
amplitudes_raw = np.concatenate([self.spindles[chan][x].Raw.values for x in self.spindles[chan]])
amp_rms_raw = np.sqrt(np.array([x**2 for x in amplitudes_raw]).mean())
amp_sd_raw = amplitudes_raw.std()
amplitudes_spfilt = np.concatenate([self.spindles[chan][x].spfilt.values for x in self.spindles[chan]])
amp_rms_spfilt = np.sqrt(np.array([x**2 for x in amplitudes_spfilt]).mean())
amp_sd_spfilt = amplitudes_spfilt.std()
# calculate density
density = count/((self.data.index[-1] - self.data.index[0]).total_seconds()/60)
# calculate inter-spindle-interval (ISI) --> NOT ACCURATE FOR 2HR BLOCKS
isi_arr = np.array([(self.spindles[chan][x+1].time.iloc[0] - self.spindles[chan][x].time.iloc[-1]).total_seconds() for x in self.spindles[chan] if x < len(self.spindles[chan])-1])
isi_mean = isi_arr.mean()
isi_sd = isi_arr.std()
# calculate center frequency & total spindle power
# spindle_power = self.spindle_psd_norm[chan]['normed_pwr'][(self.spindle_psd[chan].index >= spin_range[0]) & (self.spindle_psd[chan].index <= spin_range[1])]
# center_freq = spindle_power.idxmax()
# total_pwr = spindle_power.sum()
spindle_stats.loc[chan] = [count, duration_mean, duration_sd, amp_rms_raw, amp_sd_raw, amp_rms_spfilt, amp_sd_spfilt, density, isi_mean, isi_sd]
# spindle_stats.loc[chan] = [count, duration_mean, duration_sd, amp_rms_raw, amp_sd_raw, amp_rms_spfilt, amp_sd_spfilt, density, isi_mean, isi_sd, center_freq, total_pwr]
self.spindle_tstats = spindle_stats
print('Spindle time stats stored in obj.spindle_tstats.\n')
def calc_spindle_psd_concat(self, psd_bandwidth):
""" Calculate multitaper power spectrum of concated spindles for each channel
Params
------
bandwidth: float
frequency resolution in Hz
Returns
-------
self.spindle_psd: dict
format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs: pd.DataFrame
calculations used to calculated multitaper power spectral estimates for each channel
"""
print('Calculating power spectra (this may take a few minutes)...')
self.metadata['spindle_analysis']['psd_dtype'] = 'raw_concat'
self.metadata['spindle_analysis']['psd_method'] = 'multitaper'
self.metadata['spindle_analysis']['psd_bandwidth'] = psd_bandwidth
sf = self.metadata['analysis_info']['s_freq']
spindle_psd = {}
spindle_multitaper_calcs = pd.DataFrame(index=['data_len', 'N', 'W', 'NW', 'K'])
for chan in self.spindles:
#print(f'Calculating spectra for {chan}...')
if len(self.spindles[chan]) > 0:
# concatenate spindles
spindles = [self.spindles[chan][x].Raw.values for x in self.spindles[chan]]
data = np.concatenate(spindles)
# record PS params [K = 2NW-1]
N = len(data)/sf
W = psd_bandwidth
K = int((2*N*W)-1)
spindle_multitaper_calcs[chan] = [len(data), N, W, N*W, K]
# calculate power spectrum
pwr, freqs = psd_array_multitaper(data, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25,
normalization='full', verbose=0)
# convert to series & add to dict
psd = pd.Series(pwr, index=freqs)
spindle_psd[chan] = psd
self.spindle_multitaper_calcs = spindle_multitaper_calcs
self.spindle_psd_concat = spindle_psd
print('Done. Spectra stored in obj.spindle_psd_concat. Calculations stored in obj.spindle_multitaper_calcs.\n')
def calc_gottselig_norm(self, norm_range):
""" calculated normalized spindle power on EEG channels (from Gottselig et al., 2002). works with
calc_spindle_psd_concat.
TO DO: change p0 value if optimize warning
Parameters
----------
norm_range: list of tuple
frequency ranges for gottselig normalization
Returns
-------
self.spindle_psd_concat_norm: nested dict
format {chan: pd.Series(normalized power, index=frequency)}
"""
print('Calculating Gottselig normalization...')
def exponential_func(x, a, b, c):
return a*np.exp(-b*x)+c
self.metadata['spindle_analysis']['gottselig_range'] = norm_range
exclude = ['EOG_L', 'EOG_R', 'EKG']
spindle_psd_norm = {}
chans_norm_failed = []
for chan in self.spindle_psd:
if chan not in exclude:
spindle_psd_norm[chan] = {}
# specify data to be fit (only data in norm range)
incl_freqs = np.logical_or(((self.spindle_psd[chan].index >= norm_range[0][0]) & (self.spindle_psd[chan].index <= norm_range[0][1])),
((self.spindle_psd[chan].index >= norm_range[1][0]) & (self.spindle_psd[chan].index <= norm_range[1][1])))
pwr_fit = self.spindle_psd[chan][incl_freqs]
# set x and y values (convert y to dB)
x_pwr_fit = pwr_fit.index
y_pwr_fit = 10 * np.log10(pwr_fit.values)
# fit exponential -- try second fit line if first throws infinite covariance
with warnings.catch_warnings():
warnings.simplefilter("error", OptimizeWarning)
try:
popt, pcov = curve_fit(exponential_func, xdata=x_pwr_fit, ydata=y_pwr_fit, p0=(1, 0, 1))
except (OptimizeWarning, RuntimeError):
try:
popt, pcov = curve_fit(exponential_func, xdata=x_pwr_fit, ydata=y_pwr_fit, p0=(1, 1e-6, 1))
except (OptimizeWarning, RuntimeError):
popt = np.full(3, np.nan)
chans_norm_failed.append(chan)
print(f'scipy.optimize.curvefit encountered RuntimeError on channel {chan}. Normalization skipped for this channel.')
pass
xx = self.spindle_psd[chan].index
yy = exponential_func(xx, *popt)
# subtract the fit line
psd_norm = pd.Series(10*np.log10(self.spindle_psd[chan].values) - yy, index=self.spindle_psd[chan].index)
# save the values
spindle_psd_norm[chan]['normed_pwr'] = psd_norm
spindle_psd_norm[chan]['values_to_fit'] = pd.Series(y_pwr_fit, index=x_pwr_fit)
spindle_psd_norm[chan]['exp_fit_line'] = pd.Series(yy, index=xx)
self.spindle_psd_concat_norm = spindle_psd_norm
self.metadata['spindle_analysis']['chans_concat_norm_failed'] = chans_norm_failed
print('Gottselig normalization data stored in obj.spindle_psd_concat_norm.\n')
def calc_spindle_psd_i(self, psd_bandwidth, zpad=False, zpad_len=3):
""" Calculate multitaper power spectrum for individual spindles across all channels
Params
------
bandwidth: float
frequency resolution in Hz
zpad: bool (default: False)
whether to zeropad the data (for increased spectral resolution)
zpad_len: float
length to zero-pad the data to (in seconds)
Returns
-------
self.spindles_zpad: dict
zero-padded spindle values
self.spindle_psd_i: dict
format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs: dict of pd.DataFrame
calculations used to calculated multitaper power spectral estimates for each spindle by channel
"""
print('Calculating power spectra (this may take a few minutes)...')
self.metadata['spindle_analysis']['psd_dtype'] = 'raw_individual'
self.metadata['spindle_analysis']['psd_method'] = 'multitaper'
self.metadata['spindle_analysis']['psd_bandwidth'] = psd_bandwidth
self.metadata['spindle_analysis']['zeropad'] = zpad
self.metadata['spindle_analysis']['zeropad_len_sec'] = zpad_len
sf = self.metadata['analysis_info']['s_freq']
spindles_zpad = {}
spindle_psd = {}
spindle_multitaper_calcs = {}
for chan in self.spindles:
spindles_zpad[chan] = {}
spindle_psd[chan] = {}
# waveform resolution is dependent on length of signal, regardless of zero-padding
spindle_multitaper_calcs[chan] = pd.DataFrame(columns=['spin_samples', 'spin_seconds', 'zpad_samples', 'zpad_seconds', 'waveform_resoultion_Hz',
'psd_resolution_Hz', 'N_taper_len', 'W_bandwidth', 'K_tapers'])
spindle_multitaper_calcs[chan].index.name = 'spindle_num'
if len(self.spindles[chan]) > 0:
for x in self.spindles[chan]:
# subtract mean to zero-center spindle for zero-padding
data = self.spindles[chan][x].Raw.values - np.mean(self.spindles[chan][x].Raw.values)
zpad_samples=0
zpad_seconds=0
tx=0
# option to zero-pad the spindle
if zpad:
total_len = zpad_len*sf
zpad_samples = total_len - len(data)
zpad_seconds = zpad_samples/sf
if zpad_samples > 0:
padding = np.repeat(0, zpad_samples)
data_pad = np.append(data, padding)
else:
spin_len = len(data)/sf
print(f'Spindle {chan}:{x} length {spin_len} seconds longer than pad length {zpad_len}')
data_pad = data
# or leave as-is
else:
data_pad = data
# record PS params [K = 2NW-1]
spin_samples = len(data)
spin_seconds = len(data)/sf
waveform_res = 1/spin_seconds
psd_res = 1/(len(data_pad)/sf)
N_taper_len = len(data_pad)/sf
W_bandwidth = psd_bandwidth
K_tapers = int((2*N_taper_len*W_bandwidth)-1)
spindle_multitaper_calcs[chan].loc[x] = [spin_samples, spin_seconds, zpad_samples, zpad_seconds, waveform_res, psd_res, N_taper_len, W_bandwidth, K_tapers]
# calculate power spectrum
try:
pwr, freqs = psd_array_multitaper(data_pad, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25,
normalization='full', verbose=0)
except ValueError:
print(f'Specified bandwidth too small for data length. Skipping spindle {chan}:{x}.')
continue
# convert to series & add to dict
psd = pd.Series(pwr, index=freqs)
spindle_psd[chan][x] = psd
spindles_zpad[chan][x] = data_pad
self.spindles_zpad = spindles_zpad
self.spindle_multitaper_calcs = spindle_multitaper_calcs
self.spindle_psd_i = spindle_psd
print('Done. \nSpectra stored in obj.spindle_psd_i. Calculations stored in obj.spindle_multitaper_calcs. Zero-padded spindle data in obj.spindles_zpad.\n')
def analyze_spindles(self, zmethod='trough', trough_dtype='spfilt', buff=False, buffer_len=3, psd_type='i', psd_bandwidth=1.0,
zpad=True, zpad_len=3.0, norm_range=[(4,6), (18, 25)], spin_range=[9, 16]):
"""
Starting code for spindle statistics/visualizations
Parameters
----------
zmethod: str (default: 'trough')
method used to assign 0-center to spindles [options: 'trough', 'middle']. Trough assigns zero-center to
the deepest negative trough. Middle assigns zero center to the midpoint in time.
trough_dtype: str (default: 'spfilt')
Which data to use for picking the most negative trough for centering [options: 'Raw', 'spfilt']
buff: bool (default: False)
calculate spindle data dataframes with a delta time buffer around center of spindle
buffer_len: int
length in seconds of buffer to calculate around 0-center of spindle
psd_type: str (default: 'i')
What data to use for psd calculations [Options: 'i' (individual spindles), 'concat' (spindles concatenated by channel)]
psd_bandwidth: float
frequency bandwidth for power spectra calculations (Hz)
zpad: bool (default: False)
whether to zeropad the spindle data (for increased spectral resolution)
zpad_len: float
length to zero-pad spindles to (in seconds)
norm_range: list of tuple
frequency ranges for gottselig normalization
spin_range: list of int
spindle frequency range to be used for calculating center frequency
Returns
-------
self.spindles: nested dict of dfs
nested dict with spindle data by channel {channel: {spindle_num:spindle_data}}
self.spindles_wbuffer: nested dict of dfs
nested dict with spindle data w/ timedelta buffer by channel {channel: {spindle_num:spindle_data}}
self.spindle_psd_concat: dict
power spectra for concatenated spindles by channel (Only if psd_type == 'concat')
format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_psd_concat_norm: nested dict (Only if psd_type == 'concat')
format {chan: pd.Series(normalized power, index=frequency)}
self.spindle_psd_i: nested dict
power spectra for individual spindles by channel (Only if psd_type == 'i')
format {channel: {spindle: pd.Series}} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs: pd.DataFrame
calculations used to calculated multitaper power spectral estimates for each channel
self.spindle_features: pd.DataFrame
MultiIndex dataframe with calculated spindle statistics
"""
# create individual datframes for each spindle
self.create_spindfs(zmethod, trough_dtype, buff, buffer_len)
# calculate spindle & spindle buffer means
self.calc_spindle_means()
if buff:
self.calc_spindle_buffer_means()
# run time-domain spindle statistics by channel
self.calc_spin_tstats(spin_range)
# calculate power spectra
if psd_type == 'concat':
# calc psd on concated spindles
self.calc_spindle_psd_concat(psd_bandwidth)
# normalize power spectra for quantification
self.calc_gottselig_norm(norm_range)
elif psd_type == 'i':
# calc psd on individual spindles
self.calc_spindle_psd_i(psd_bandwidth, zpad, zpad_len)
def export_spindles(self, export_dir):
""" Export spindle analyses
NOTE: Update for spindle_psd_i
Parameters
----------
export_dir: str
Directory to save exported files
Returns
-------
export_dir/fname_metadata.txt: json dump file
export_dir/calcs/fname_multitaper_calcs.csv: csv file
export_dir/calcs/fname_spindle_psd.txt: json dump file
export_dir/calcs/fname_spindle_psd_norm.txt: json dump file
export_dir/fname_spindle_aggregates.csv: multi-tab excel file
export_dir/fname_spindle_means.csv: csv file
export_dir/fname_spindle_stats.csv: csv file
"""
print('Exporting spindle analyses..')
# make export directory if doesn't exit
if not os.path.exists(export_dir):
os.makedirs(export_dir)
# make subdirectory for calculations
calc_dir = os.path.join(export_dir, 'calcs')
if not os.path.exists(calc_dir):
os.makedirs(calc_dir)
# set base for savename
fname = self.metadata['file_info']['fname'].split('.')[0]
# dump metadata
filename = f'{fname}_spindle_metadata.txt'
savename = os.path.join(export_dir, filename)
with open(savename, 'w') as f:
json.dump(self.metadata, f, indent=4)
# export multitaper calcs
filename = f'{fname}_spindle_mt_calcs.csv'
savename = os.path.join(calc_dir, filename)
self.spindle_multitaper_calcs.to_csv(savename)
# export psd (concat)
if self.metadata['spindle_analysis']['psd_dtype'] == 'raw_concat':
# convert series to dicts for json dump
psd_export = {}
for name, series in self.spindle_psd.items():
psd_export[name] = series.to_dict()
filename = f'{fname}_spindle_psd_concat.txt'
savename = os.path.join(calc_dir, filename)
with open(savename, 'w') as f:
json.dump(psd_export, f, indent=4)
# export psd norm
# convert series to dicts for json dump
psd_norm_export = {}
for chan in self.spindle_psd_norm.keys():
psd_norm_export[chan]={}
for name, series in self.spindle_psd_norm[chan].items():
psd_norm_export[chan][name] = series.to_dict()
filename = f'{fname}_spindle_psd_norm.txt'
savename = os.path.join(calc_dir, filename)
with open(savename, 'w') as f:
json.dump(psd_norm_export, f, indent=4)
# export psd (individual)
### INDIVIDUAL EXPORT HERE
# export spindle aggregates
filename = f'{fname}_spindle_aggregates.xlsx'
savename = os.path.join(export_dir, filename)
writer = | pd.ExcelWriter(savename, engine='xlsxwriter') | pandas.ExcelWriter |
#!/usr/bin/python
# coding=utf-8
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import jieba
import jieba.analyse
import os
from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.charts import Pie
from pyecharts.charts import Bar
from pyecharts.charts import TreeMap
from pyecharts.charts import Line
from pyecharts.faker import Faker
from pyecharts.render import make_snapshot
# 使用 snapshot-selenium 渲染图片
from snapshot_selenium import snapshot
from snownlp import SnowNLP
def get_current_time():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
class MovieInfoAnalyse(object):
"""
TOP500电影信息分析类
"""
def __init__(self):
if not os.path.exists('analyse_data'):
os.mkdir('analyse_data')
print("所有分析结果保存在 analyse_data 文件夹下...")
def make_geo_map(self):
"""
生成世界地图,根据各国电影发行量
:return:
"""
# print(get_current_time() + '|-------> 正在生成 世界各国电影发行量 图表...')
# 导入TOP500电影数据
csv_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir, "moviespider", "movie_info_top500.csv"))
rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)
# 分析并统计数据
col_country = rows['国别'].to_frame()
res = col_country.groupby('国别')['国别'].count().sort_values(ascending=False)
raw_data = [i for i in res.items()]
# 导入映射数据,英文名 -> 中文名
country_name = pd.read_json('countries_zh_to_en.json', orient='index')
stand_data = [i for i in country_name[0].items()]
# 数据转换
res_code = []
for raw_country in raw_data:
for stand_country in stand_data:
if stand_country[1] in raw_country[0]:
res_code.append(stand_country[0])
code = pd.DataFrame(res_code).groupby(0)[0].count().sort_values(ascending=False)
data = []
for k, v in code.items():
data.append([k, v])
# 制作图表
c = Map()
c.add("电影发行量", data, "world")
c.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
c.set_global_opts(title_opts=opts.TitleOpts(title="电影TOP500榜单中 - 世界各国电影发行量"),
visualmap_opts=opts.VisualMapOpts(max_=55))
htmlPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "世界各国电影发行量.html"))
pngPath = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), "analyse_data", "世界各国电影发行量.png"))
# 生成html
c.render(htmlPath)
# 生成png
# make_snapshot(snapshot, c.render(), pngPath)
# print(get_current_time() + '|-------> 已生成 世界各国电影发行量 图表...')
return c
def make_pid_charts(self):
"""
根据电影类型生成饼图
:return:
"""
# print(get_current_time() + '|-------> 正在生成 各类型占比 图表...')
# 导入数据并初始化
csv_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir, "moviespider", "movie_info_top500.csv"))
rows = pd.read_csv(csv_path, encoding='utf-8', dtype=str)
to_drop = ['名称', '导演', '演员', '国别', '年份', '语言', '评分', '评分人数', '五星占比', '四星占比', '三星占比', '二星占比', '一星占比', '短评数',
'简介']
res = rows.drop(to_drop, axis=1)
# 数据分割
type_list = []
for i in res.itertuples():
for j in i[1].split(','):
type_list.append(j)
# 数据统计
df = | pd.DataFrame(type_list, columns=['类型']) | pandas.DataFrame |
# %%
######################
## Import libraries ##
######################
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from PTWGuidedLatentDirichletAllocation import PTWGuidedLatentDirichletAllocation # Customised sub-class of sklearn LDA
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
import matplotlib.pyplot as plt
import numpy as np
import os
import math
import joblib
# %%
###########################
## Ingest ML papers data ##
###########################
data_fname = '../../data/papers.csv'
data = pd.read_csv(data_fname)
data.dropna(subset=['full_text'], inplace=True)
# %%
####################################################################
## Ingest and incorporate custom data science specific stop words ##
####################################################################
stopwords_fname = '../../data/ml_stopwords.csv'
add_stop_words = | pd.read_csv(stopwords_fname) | pandas.read_csv |
from flask import Flask,render_template,url_for,request,redirect
from flask_bootstrap import Bootstrap
from flask_dance.contrib.twitter import make_twitter_blueprint, twitter
import pandas as pd
import numpy as np
#ML Packges
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
app.config['SECRET_KEY'] = 'thisisasecret'
Bootstrap(app)
twitter_blueprint = make_twitter_blueprint(api_key='<KEY>',api_secret='<KEY>')
app.register_blueprint(twitter_blueprint, url_prefix='/twitter_login')
@app.route('/twitter')
def twitter_login():
if not twitter.authorized:
return redirect(url_for('twitter.login'))
account_info = twitter.get('account/settings.json')
if account_info.ok:
account_info_json = account_info.json()
return '<h1>Your twitter name is @{}'.format(account_info_json['screen_name'])
return '<h1>Request Failed</h1>'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict',methods=['GET','POST'])
def predict():
naivebayes_model = open("models/NBClassifier.pkl","rb")
clf = joblib.load(naivebayes_model)
if request.method == 'POST':
namequery = request.form['namequery']
emailid = request.form['emailid']
country = request.form['country']
employement = request.form['employement']
education = request.form['education']
major = request.form['major']
devtype = request.form['devtype']
#COUNTRIES NUMERICAL
if country == "Argentina":
countryx = 0
elif country == "Australia":
countryx = 1
elif country == "Austria":
countryx = 2
elif country == "Belgium":
countryx = 3
elif country == "Brazil":
countryx = 4
elif country == "Canada":
countryx = 5
elif country == "Chile":
countryx = 6
elif country == "Colombia":
countryx = 7
elif country == "Croatia":
countryx = 8
elif country == "Denmark":
countryx = 9
elif country == "France":
countryx = 10
elif country == "Germany":
countryx = 11
elif country == "Greece":
countryx = 12
elif country == "India":
countryx = 13
elif country == "Indonesia":
countryx = 14
elif country == "Ireland":
countryx = 15
elif country == "Israel":
countryx = 16
elif country == "Italy":
countryx = 17
elif country == "Kenya":
countryx = 18
elif country == "Lebanon":
countryx = 19
elif country == "Netherlands":
countryx = 20
elif country == "Nigeria":
countryx = 21
elif country == "Poland":
countryx = 22
elif country == "Russian":
countryx = 23
elif country == "Slovakia":
countryx = 24
elif country == "South Africa":
countryx = 25
elif country == "Spain":
countryx = 26
elif country == "Sweden":
countryx = 27
elif country == "Thailand":
countryx = 28
elif country == "Turkey":
countryx = 29
elif country == "Ukraine":
countryx = 30
elif country == "United Kingdom":
countryx = 31
elif country == "United States":
countryx = 32
elif country == "Vietnam":
countryx = 33
#EMPLOYEMENT NUMERICAL
if employement == "Full Time":
employementx = 0
elif employement == "Part Time":
employementx = 1
#EDUCATION NUMERICAL
if education == "Associate degree":
educationx = 0
elif education == "Bachelor's degree (BA, BS, B.Eng., etc.)":
educationx = 1
elif education == "Master’s degree (MA, MS, M.Eng., MBA, etc.)":
educationx = 2
elif education == "Secondary school (e.g. American high school, German Realschule or Gymnasium, etc.)":
educationx = 3
elif education == "Some college/university study without earning a degree":
educationx = 4
#MAJOR NUMERICAL
if major == "Computer science, computer engineering, or software engineering":
majorx = 0
elif major == "Another engineering discipline (ex. civil, electrical, mechanical)":
majorx = 1
elif major == "A business discipline (ex. accounting, finance, marketing)":
majorx = 2
elif major == "A humanities discipline (ex. literature, history, philosophy)":
majorx = 3
elif major == "A natural science (ex. biology, chemistry, physics)":
majorx = 4
elif major == "A social science (ex. anthropology, psychology, political science)":
majorx = 5
elif major == "Fine arts or performing arts (ex. graphic design, music, studio art)":
majorx = 6
elif major == "Information systems, information technology, or system administration":
majorx = 7
elif major == "Mathematics or statistics":
majorx = 8
#DEVTYPE NUMERICAL
if devtype == "Python, TensorFlow, PyTorch":
devtypex = 0
elif devtype == "HTML, CSS, JavaScript, SQL":
devtypex = 1
elif devtype == "PHP, Ruby, Java":
devtypex = 2
elif devtype == "HTML, CSS, SASS":
devtypex = 3
elif devtype == "React Native, Android Studio":
devtypex = 4
elif devtype == "Art, Illustration":
devtypex = 5
elif devtype == "MySQL, Oracle":
devtypex = 6
elif devtype == "OOP, Scrum":
devtypex = 7
#FEATURE SCALING
sc = StandardScaler()
X_Test = | pd.DataFrame([[countryx,employementx,educationx,majorx,devtypex]], columns = [countryx,employementx,educationx,majorx,devtypex]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
import plotly.express as px
from dash.dependencies import Input, Output
from plotly.subplots import make_subplots
global_ndays_range = 27
# --- Start --- Reading base data for the Sunburst
industry_sentiment = pd.read_json('dashapps/sunburst/covidsm_agg_sentiment_industry.json.zip', orient='records')
industry_sentiment['published_at_date'] = pd.to_datetime(industry_sentiment['published_at_date'], unit='ms')
global_start_day = industry_sentiment['published_at_date'].max() - pd.DateOffset(days=global_ndays_range)
industries_hrchy = | pd.read_csv('dashapps/sunburst/industries-hrchy.csv') | pandas.read_csv |
#!/usr/bin/env python3
#
# Compare automatic block id using "training" set from human transcription
#
import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser
from glob import glob
from pathlib import Path
from psycopg2.extras import RealDictCursor
from time import localtime, strftime
from fuzzywuzzy import fuzz
import pandas as pd
from datetime import date
from tqdm import tqdm
import numpy as np
from multiprocessing import Pool
ver = "0.2.1"
##Import settings from settings.py file
import settings
############################################
# Logging
############################################
if not os.path.exists('logs'):
os.makedirs('logs')
current_time = strftime("%Y%m%d%H%M%S", localtime())
logfile_name = 'comparison_{}.log'.format(current_time)
logfile = 'logs/{logfile_name}'.format(logfile_name = logfile_name)
# from http://stackoverflow.com/a/9321890
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=logfile,
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger1 = logging.getLogger("compare")
logging.getLogger('compare').addHandler(console)
logger1.info("compare version {}".format(ver))
############################################
#OCR Database
conn = psycopg2.connect(host = settings.ocr_host, database = settings.ocr_db, user = settings.ocr_user, password = settings.ocr_password, connect_timeout = 60)
conn.autocommit = True
db_cursor = conn.cursor(cursor_factory=RealDictCursor)
query_transcription = """
SELECT
DISTINCT collector as data,
'collector' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT verbatim_date as data,
'verbatim_date' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT verbatim_locality as data,
'verbatim_locality' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT country as data,
'country' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT state_territory as data,
'state_territory' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT district_county as data,
'district_county' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT precise_locality as data,
'precise_locality' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT elevation as data,
'elevation' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
"""
query_test = """
SELECT
b.document_id,
replace(d.filename, '.jpg', '') as filename,
b.block::text,
string_agg(a.word_text, ' ') as block_text
FROM
ocr_blocks b,
ocr_documents d,
(
SELECT
document_id,
block,
word_line,
word,
word_text
FROM
ocr_entries
WHERE
document_id IN
(
SELECT document_id FROM ocr_documents WHERE replace(filename, '.jpg', '') IN
(
SELECT
filename
FROM
ocr_auto_sample
WHERE
ref_or_test = 'test' AND
reference_size = %(refsize)s
)
)
ORDER BY
page, block, word_line, word
) a
WHERE
d.document_id = b.document_id AND
a.document_id = b.document_id AND
a.block = b.block AND
b.confidence >= 0.85
GROUP BY
b.document_id,
b.block,
d.filename
"""
# db_cursor.execute("DELETE FROM ocr_transcription_ento_auto")
# db_cursor.execute("VACUUM ocr_transcription_ento_auto")
# #for refsize in ['0.05', '0.1', '0.2', '0.3', '0.4', '0.5']:
# for refsize in ['0.05', '0.1', '0.2']:
# print(refsize)
# db_cursor.execute(query_transcription, {'refsize': refsize})
# logger1.debug(db_cursor.query.decode("utf-8"))
# transcription_data = pd.DataFrame(db_cursor.fetchall())
# db_cursor.execute(query_test, {'refsize': refsize})
# logger1.debug(db_cursor.query.decode("utf-8"))
# test_data = pd.DataFrame(db_cursor.fetchall())
# for data_type in transcription_data['field'].unique():
# print("Processing {}...\n".format(data_type))
# for index, record in test_data.iterrows():
# #split string into all possible sequences
# logger1.debug(record['block_text'])
# block_text = record['block_text'].split(' ')
# len_block_text = len(block_text)
# text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
# for i in range(len_block_text-1):
# for j in range(i+1, len_block_text):
# #print(i, j)
# this_text = ' '.join(block_text[i:j])
# #Get alpha chars only
# alpha_block = re.sub(r'\W+ ,-/', '', this_text)
# #Add space after periods
# alpha_block = ' '.join(alpha_block.split()).replace(' .', '.').replace('.', '. ').strip()
# if len(alpha_block) > 3:
# #print(this_text)
# text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
# logger1.debug(this_text)
# results_df = pd.DataFrame(columns=('data', 'field', 'text', 'score1', 'score2', 'score3', 'score', 'string_len'))
# for ind, rcrd in text_to_test.iterrows():
# #tr_data = transcription_data.copy()
# tr_data = transcription_data[transcription_data.field == data_type].copy()
# tr_data['score1'] = tr_data.apply(lambda row : fuzz.partial_ratio(rcrd['text'].lower(), row['data'].lower()), axis = 1)
# tr_data['score2'] = tr_data.apply(lambda row : fuzz.ratio(rcrd['text'].lower(), row['data'].lower()), axis = 1)
# tr_data['score'] = tr_data.apply(lambda row : row['score1'] + row['score2'], axis = 1).astype(int)
# tr_data['score3'] = tr_data.apply(lambda row : fuzz.token_set_ratio(rcrd['text'].lower(), row['data'].lower()), axis = 1)
# tr_data['text'] = rcrd['text']
# tr_data['string_len'] = rcrd['string_len']
# results_df = results_df.append(tr_data)
# results_df['score'] = pd.to_numeric(results_df['score'])
# results_df['score3'] = pd.to_numeric(results_df['score3'])
# results_df['string_len'] = pd.to_numeric(results_df['string_len'])
# res = results_df.nlargest(1, ['score', 'string_len'])
# if res.shape[0] > 0:
# if res.iloc[0]['score'] > settings.insert_min:
# db_cursor.execute("INSERT INTO ocr_transcription_ento_auto (filename, {field}, reference_size) VALUES (%(document_id)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s".format(field = res.iloc[0]['field']), {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize})
# logger1.info(db_cursor.query.decode("utf-8"))
# else:
# #Check for token_set_ratio
# max_score = results_df['score3'].max()
# res_top = results_df[results_df.score3 == max_score]
# #Choose string with the least number of words that has the max score
# res = results_df.nsmallest(1, 'string_len')
# if res.shape[0] > 0:
# if res.iloc[0]['score3'] > settings.token_set_ratio_min:
# db_cursor.execute("INSERT INTO ocr_transcription_ento_auto (filename, {field}, reference_size) VALUES (%(document_id)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s".format(field = res.iloc[0]['field']), {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize})
# logger1.info(db_cursor.query.decode("utf-8"))
# #Cleanup
# for refsize in ['0.05', '0.1', '0.2']:
# db_cursor.execute(query_transcription, {'refsize': refsize})
# transcription_data = pd.DataFrame(db_cursor.fetchall())
# for data_type in transcription_data['field'].unique():
# db_cursor.execute("UPDATE ocr_transcription_ento_auto SET {field} = REPLACE({field}, '. , ', '., ')".format(field = data_type))
# logger1.info(db_cursor.query.decode("utf-8"))
##################
#GIS database
conn2 = psycopg2.connect(host = settings.gis_host, database = settings.gis_db, user = settings.gis_user, password = settings.gis_password, connect_timeout = 60)
db_cursor2 = conn2.cursor(cursor_factory=RealDictCursor)
# #Get state/provinces from GIS database
# db_cursor2.execute("SELECT name_1 as name, name_0 as country, 'locality:state' as name_type, uid FROM gadm1")
# states = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# #Get countries from GIS database
db_cursor2.execute("SELECT name_0 as name, 'locality:country' as name_type, uid FROM gadm0")
countries = pd.DataFrame(db_cursor2.fetchall())
logger1.debug(db_cursor2.query.decode("utf-8"))
# #Get counties, state
# db_cursor2.execute("SELECT name_2 || ' Co., ' || name_1 as name, 'locality:county' as name_type, uid FROM gadm2 WHERE name_0 = 'United States' AND type_2 = 'County'")
# counties = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# counties_list = pd.DataFrame(counties)
# db_cursor2.execute("SELECT name_2 || ' ' || type_2 || ', ' || name_1 as name, 'locality:county' as name_type, uid FROM gadm2 WHERE name_0 = 'United States'")
# counties = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# counties_list = counties_list.append(counties, ignore_index=True)
# db_cursor2.execute("SELECT DISTINCT g.name_2 || ', ' || s.abbreviation as name, 'locality:county' as name_type, g.uid FROM gadm2 g, us_state_abbreviations s WHERE g.name_1 = s.state AND g.name_0 = 'United States'")
# counties = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# counties_list = counties_list.append(counties, ignore_index=True)
# db_cursor2.execute("SELECT DISTINCT g.name_2 || ' Co., ' || s.abbreviation as name, 'locality:county' as name_type, g.name_1 AS state, g.name_0 as country, g.uid FROM gadm2 g, us_state_abbreviations s WHERE g.name_1 = s.state AND g.name_0 = 'United States'")
# counties = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# counties_list = counties_list.append(counties, ignore_index=True)
# #Close GIS database connection
db_cursor2.close()
conn2.close()
# ##################
# db_cursor.execute("DROP TABLE ocr_transcription_ento_auto_geo")
# db_cursor.execute("CREATE TABLE ocr_transcription_ento_auto_geo AS SELECT * FROM ocr_transcription_ento_auto")
# db_cursor.execute("ALTER TABLE ocr_transcription_ento_auto_geo ADD CONSTRAINT ocr_tra_ento_auto_geo_c UNIQUE (filename, reference_size)")
# #country
query_country = """
SELECT
b.document_id,
replace(d.filename, '.jpg', '') as filename,
b.block::text,
string_agg(a.word_text, ' ') as block_text
FROM
ocr_blocks b,
ocr_documents d,
(
SELECT
document_id,
block,
word_line,
word,
word_text
FROM
ocr_entries
WHERE
document_id IN
(
SELECT document_id FROM ocr_documents WHERE replace(filename, '.jpg', '') IN
(
SELECT
filename
FROM
ocr_auto_sample
WHERE
ref_or_test = 'test' AND
reference_size = %(refsize)s
)
)
ORDER BY
page, block, word_line, word
) a
WHERE
d.document_id = b.document_id AND
a.document_id = b.document_id AND
a.block = b.block AND
b.confidence >= 0.85
GROUP BY
b.document_id,
b.block,
d.filename
"""
# query_state = """
# SELECT
# b.document_id,
# replace(d.filename, '.jpg', '') as filename,
# b.block::text,
# string_agg(a.word_text, ' ') as block_text
# FROM
# ocr_blocks b,
# ocr_documents d,
# (
# SELECT
# document_id,
# block,
# word_line,
# word,
# word_text
# FROM
# ocr_entries
# WHERE
# document_id IN
# (
# SELECT document_id FROM ocr_documents WHERE replace(filename, '.jpg', '') IN
# (
# SELECT
# filename
# FROM
# ocr_auto_sample
# WHERE
# ref_or_test = 'test' AND
# reference_size = %(refsize)s
# )
# )
# ORDER BY
# page, block, word_line, word
# ) a
# WHERE
# d.document_id = b.document_id AND
# a.document_id = b.document_id AND
# a.block = b.block AND
# b.confidence >= 0.85
# GROUP BY
# b.document_id,
# b.block,
# d.filename
# """
# query_county = """
# SELECT
# b.document_id,
# replace(d.filename, '.jpg', '') as filename,
# b.block::text,
# string_agg(a.word_text, ' ') as block_text
# FROM
# ocr_blocks b,
# ocr_documents d,
# (
# SELECT
# document_id,
# block,
# word_line,
# word,
# word_text
# FROM
# ocr_entries
# WHERE
# document_id IN
# (
# SELECT document_id FROM ocr_documents WHERE replace(filename, '.jpg', '') IN
# (
# SELECT
# filename
# FROM
# ocr_auto_sample
# WHERE
# ref_or_test = 'test' AND
# reference_size = %(refsize)s
# )
# )
# ORDER BY
# page, block, word_line, word
# ) a
# WHERE
# d.document_id = b.document_id AND
# a.document_id = b.document_id AND
# a.block = b.block AND
# b.confidence >= 0.85
# GROUP BY
# b.document_id,
# b.block,
# d.filename
# """
def match_country(this_record):
try:
record = this_record.iloc[0]
except:
return
logger1.debug(record['block_text'])
block_text = record['block_text'].split(' ')
len_block_text = len(block_text)
text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
for i in range(len_block_text-1):
for j in range(i+1, len_block_text):
#print(i, j)
#this_text = ' '.join(block_text[i:j])
this_text = ' '.join(map(str, block_text[i:j]))
alpha_block = re.sub(r'\W+ ,-/', '', this_text)
#Add space after periods
alpha_block = ' '.join(alpha_block.split()).replace(' .', '.').replace('.', '. ').strip()
if len(alpha_block) > 3:
#print(this_text)
text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
logger1.debug(this_text)
results_df = | pd.DataFrame(columns=('text', 'score1', 'score2', 'score3', 'score', 'string_len')) | pandas.DataFrame |
"""
Get Word2vec embeddings of Reddit text
RESOURCES:
- https://radimrehurek.com/gensim/auto_examples/core/run_core_concepts.html#sphx-glr-auto-examples-core-run-core-concepts-py
- https://radimrehurek.com/gensim/auto_examples/tutorials/run_word2vec.html#sphx-glr-auto-examples-tutorials-run-word2vec-py
- https://www.geeksforgeeks.org/removing-stop-words-nltk-python/
- https://stackoverflow.com/questions/15547409/how-to-get-rid-of-punctuation-using-nltk-tokenizer
"""
# Load dependencies
import os
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
import gensim.downloader as api
# Set file path
my_path = os.getcwd()
# Import data
df_truth = pd.read_csv(my_path + '/data/cleaned/features_temp/df_truth_dass.csv')
df_primary = pd.read_csv(my_path + '/data/cleaned/features_temp/df_primary_dass.csv')
#region GROUND TRUTH DATASET
#region PREPROCESS TEXT
# Create empty list
corpus_truth = []
# Set the stop words from NLTK
stop_words = set(stopwords.words('english'))
# Create a custom tokenizer to remove punctuation
tokenizer = RegexpTokenizer(r'\w+')
# Create corpus
for string in df_truth['text'].tolist():
# Remove strange characters
string = string.replace('\r', '')
string = string.replace('*', '')
# Get tokens (i.e., individual words)
tokens = tokenizer.tokenize(string)
# Set a list holder
filtered_sentence = []
# For each token, remove the stop words
for w in tokens:
if w not in stop_words:
filtered_sentence.append(w)
# Save list of tokens (i.e., sentences) to preprocessed corpus
corpus_truth.append(filtered_sentence)
#endregion
#region WORD2VEC MODEL
# Load the Word2vec model
wv = api.load('word2vec-google-news-300')
# List embeddings for each post
post_embeddings = []
# For every word in every sentence within the corpus
for sentence in corpus_truth:
# List of word embeddings
w2v_embeddings = []
# Get the word embeddings for each word
for word in sentence:
# See if there is a pretrained word embedding
try:
vector_representation = wv[word]
w2v_embeddings.append(vector_representation)
# If there is no pretrained word embedding
except KeyError:
vector_representation = np.repeat(0, 300)
w2v_embeddings.append(vector_representation)
# Save the word embeddings at the post level
post_embeddings.append(w2v_embeddings)
# Set a holder variable
avg_post_embeddings = []
# Aggregate word embeddings
for post in post_embeddings:
# Transform embedding into data frame where each row is a word and each column is the embedding dimension
df = pd.DataFrame(post)
# Square each element in the data frame to remove negatives
df = df.apply(np.square)
# Get the mean of each embedding dimension
df = df.apply(np.mean, axis=0)
# The average word embedding for the entire Reddit post
avg_embedding = df.tolist()
# Append to list
avg_post_embeddings.append(avg_embedding)
# Create a dataframe with the average word embeddings of each post
embedding_df = pd.DataFrame(avg_post_embeddings)
# Rename the columns
embedding_df = embedding_df.add_prefix('w2v_')
#endregion
# Add average word embeddings to the ground truth data set
df_truth1 = pd.concat([df_truth, embedding_df], axis=1)
# Save to file
df_truth1.to_csv(my_path + '/data/cleaned/with_features/df_truth.csv')
#endregion
#region PRIMARY DATASET
#region PREPROCESS TEXT
# Create empty list
corpus_primary = []
# Set the stop words from NLTK
stop_words = set(stopwords.words('english'))
# Create a custom tokenizer to remove punctuation
tokenizer = RegexpTokenizer(r'\w+')
# Create corpus
for string in df_primary['text'].tolist():
# Remove strange characters
string = string.replace('\r', '')
string = string.replace('*', '')
# Get tokens (i.e., individual words)
tokens = tokenizer.tokenize(string)
# Set a list holder
filtered_sentence = []
# For each token, remove the stop words
for w in tokens:
if w not in stop_words:
filtered_sentence.append(w)
# Save list of tokens (i.e., sentences) to preprocessed corpus
corpus_primary.append(filtered_sentence)
#endregion
#region WORD2VEC MODEL
# Load the Word2vec model
wv = api.load('word2vec-google-news-300')
# List embeddings for each post
post_embeddings = []
# For every word in every sentence within the corpus
for sentence in corpus_primary:
# List of word embeddings
w2v_embeddings = []
# Get the word embeddings for each word
for word in sentence:
# See if there is a pretrained word embedding
try:
vector_representation = wv[word]
w2v_embeddings.append(vector_representation)
# If there is no pretrained word embedding
except KeyError:
vector_representation = np.repeat(0, 300)
w2v_embeddings.append(vector_representation)
# Save the word embeddings at the post level
post_embeddings.append(w2v_embeddings)
# Set a holder variable
avg_post_embeddings = []
# Aggregate word embeddings
for post in post_embeddings:
# Transform embedding into data frame where each row is a word and each column is the embedding dimension
df = | pd.DataFrame(post) | pandas.DataFrame |
import os
import pandas as pd
import sys
import time
import pandas as pd
import argparse
sys.path.append("../")
from core.scraping import extract_img_selenium
# was tested originally on windows
driver_path = "downloads/chromedriver_win32//chromedriver"
save_dir = "../data/bilingual_save"
manga_files_path = "../data/manga_list.txt"
max_pages = 3
parser = argparse.ArgumentParser(description='running extraction code')
parser.add_argument("-d",
"--driver_path",
help="path to chrome driver",
default=driver_path)
parser.add_argument("-s",
"--save_dir",
help="path to save files and images",
default=save_dir)
parser.add_argument(
"-m",
"--manga_list",
help=
"path to list of urls with each line referencing a link to one page per different manga",
default=manga_files_path)
parser.add_argument("-n",
"--number_per_manga",
help="max number of pages to attempt to save per page",
default=int(max_pages),
type=int)
# python scraping/main_extraction.py -m "../data/manga_list.txt"
# --number_per_manga 3
def main(driver_path: str,
save_dir: str,
manga_list_pth: str,
max_pages_per_manga=1000):
"""
used to extract manga meta data and images specified in in the mangalist each line refers to a page in a different manga
Args:
driver_path: path to selenium driver
save_dir: directory to save all the mangas images
manga_list_pth: path to a text file where each line is a different manga link
max_pages_per_manga: cap how many pages you extract per manga
Returns:
None
"""
driver = extract_img_selenium.create_web_driver(driver_path)
f = open(manga_list_pth, "r")
manga_list = f.readlines()
for manga_initial_page in manga_list:
current_link = manga_initial_page.replace("\n", "")
manga_name = manga_initial_page.replace("https://", "").split("/")[2]
current = 0
all_frames = pd.DataFrame()
while current < max_pages_per_manga: # max cap on links
pd_results = None
driver.get(current_link)
try: # not all pages are extractable, so just skip them
results = extract_img_selenium.save_meta_data_eng_jp_pairs(
driver, current_link)
page_height = driver.find_elements_by_class_name(
"image-container")[0].size["height"]
page_width = driver.find_elements_by_class_name(
"image-container")[0].size["width"]
pd_results = | pd.DataFrame(results) | pandas.DataFrame |
"""
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta
from unittest import TestCase
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
import pandas as pd
from pandas.util.testing import assert_frame_equal
from toolz import keymap, valmap, concatv
from toolz.curried import operator as op
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.data import DataSet, BoundColumn
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.loaders.blaze import (
from_blaze,
BlazeLoader,
NoDeltasWarning,
NonNumpyField,
NonPipelineField,
)
from zipline.utils.numpy_utils import repeat_last_axis
from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info
nameof = op.attrgetter('name')
dtypeof = op.attrgetter('dtype')
asset_infos = (
(make_simple_asset_info(
tuple(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
(make_simple_asset_info(
tuple(map(ord, 'ABCD')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
)
with_extra_sid = parameterized.expand(asset_infos)
class BlazeToPipelineTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.dates = dates = pd.date_range('2014-01-01', '2014-01-03')
dates = cls.dates.repeat(3)
cls.sids = sids = ord('A'), ord('B'), ord('C')
cls.df = df = pd.DataFrame({
'sid': sids * 3,
'value': (0, 1, 2, 1, 2, 3, 2, 3, 4),
'asof_date': dates,
'timestamp': dates,
})
cls.dshape = dshape("""
var * {
sid: ?int64,
value: ?float64,
asof_date: datetime,
timestamp: datetime
}
""")
cls.macro_df = df[df.sid == 65].drop('sid', axis=1)
dshape_ = OrderedDict(cls.dshape.measure.fields)
del dshape_['sid']
cls.macro_dshape = var * Record(dshape_)
cls.garbage_loader = BlazeLoader()
def test_tabular(self):
name = 'expr'
expr = bz.Data(self.df, name=name, dshape=self.dshape)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertEqual(ds.__name__, name)
self.assertTrue(issubclass(ds, DataSet))
self.assertEqual(
{c.name: c.dtype for c in ds._columns},
{'sid': np.int64, 'value': np.float64},
)
for field in ('timestamp', 'asof_date'):
with self.assertRaises(AttributeError) as e:
getattr(ds, field)
self.assertIn("'%s'" % field, str(e.exception))
self.assertIn("'datetime'", str(e.exception))
# test memoization
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
ds,
)
def test_column(self):
exprname = 'expr'
expr = bz.Data(self.df, name=exprname, dshape=self.dshape)
value = from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertEqual(value.name, 'value')
self.assertIsInstance(value, BoundColumn)
self.assertEqual(value.dtype, np.float64)
# test memoization
self.assertIs(
from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
value,
)
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
).value,
value,
)
# test the walk back up the tree
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
value.dataset,
)
self.assertEqual(value.dataset.__name__, exprname)
def test_missing_asof(self):
expr = bz.Data(
self.df.loc[:, ['sid', 'value', 'timestamp']],
name='expr',
dshape="""
var * {
sid: ?int64,
value: float64,
timestamp: datetime,
}""",
)
with self.assertRaises(TypeError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertIn("'asof_date'", str(e.exception))
self.assertIn(repr(str(expr.dshape.measure)), str(e.exception))
def test_auto_deltas(self):
expr = bz.Data(
{'ds': self.df,
'ds_deltas': pd.DataFrame(columns=self.df.columns)},
dshape=var * Record((
('ds', self.dshape.measure),
('ds_deltas', self.dshape.measure),
)),
)
loader = BlazeLoader()
ds = from_blaze(expr.ds, loader=loader)
self.assertEqual(len(loader), 1)
exprdata = loader[ds]
self.assertTrue(exprdata.expr.isidentical(expr.ds))
self.assertTrue(exprdata.deltas.isidentical(expr.ds_deltas))
def test_auto_deltas_fail_warn(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
from_blaze(
expr,
loader=loader,
no_deltas_rule='warn',
)
self.assertEqual(len(ws), 1)
w = ws[0].message
self.assertIsInstance(w, NoDeltasWarning)
self.assertIn(str(expr), str(w))
def test_auto_deltas_fail_raise(self):
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=loader,
no_deltas_rule='raise',
)
self.assertIn(str(expr), str(e.exception))
def test_non_numpy_field(self):
expr = bz.Data(
[],
dshape="""
var * {
a: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(object.__getattribute__(ds, 'a'), NonNumpyField)
def test_non_pipeline_field(self):
# NOTE: This test will fail if we ever allow string types in
# the Pipeline API. If this happens, change the dtype of the `a` field
# of expr to another type we don't allow.
expr = bz.Data(
[],
dshape="""
var * {
a: string,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(
object.__getattribute__(ds, 'a'),
NonPipelineField,
)
def test_complex_expr(self):
expr = bz.Data(self.df, dshape=self.dshape)
# put an Add in the table
expr_with_add = bz.transform(expr, value=expr.value + 1)
# Test that we can have complex expressions with no deltas
from_blaze(
expr_with_add,
deltas=None,
loader=self.garbage_loader,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1, # put an Add in the column
deltas=None,
loader=self.garbage_loader,
)
deltas = bz.Data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
)
with self.assertRaises(TypeError):
from_blaze(
expr_with_add,
deltas=deltas,
loader=self.garbage_loader,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1,
deltas=deltas,
loader=self.garbage_loader,
)
def test_id(self):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
with tmp_asset_finder() as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
expected = self.df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
finder.retrieve_all(expected.index.levels[1]),
))
assert_frame_equal(result, expected, check_dtype=False)
def test_id_macro_dataset(self):
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
asset_info = asset_infos[0][0]
with tmp_asset_finder(asset_info) as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
nassets = len(asset_info)
expected = pd.DataFrame(
list(concatv([0] * nassets, [1] * nassets, [2] * nassets)),
index=pd.MultiIndex.from_product((
self.macro_df.timestamp,
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
assert_frame_equal(result, expected, check_dtype=False)
def _run_pipeline(self,
expr,
deltas,
expected_views,
expected_output,
finder,
calendar,
start,
end,
window_length,
compute_fn):
loader = BlazeLoader()
ds = from_blaze(
expr,
deltas,
loader=loader,
no_deltas_rule='raise',
)
p = Pipeline()
# prevent unbound locals issue in the inner class
window_length_ = window_length
class TestFactor(CustomFactor):
inputs = ds.value,
window_length = window_length_
def compute(self, today, assets, out, data):
assert_array_almost_equal(data, expected_views[today])
out[:] = compute_fn(data)
p.add(TestFactor(), 'value')
result = SimplePipelineEngine(
loader,
calendar,
finder,
).run_pipeline(p, start, end)
assert_frame_equal(
result,
expected_output,
check_dtype=False,
)
@with_extra_sid
def test_deltas(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = bz.Data(self.df, name='deltas', dshape=self.dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def test_deltas_macro(self):
asset_info = asset_infos[0][0]
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': repeat_last_axis(np.array([10.0, 1.0]), nassets),
'2014-01-03': repeat_last_axis(np.array([11.0, 2.0]), nassets),
})
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame({
'sid': self.sids * 2,
'value': (0, 1, 2, 1, 2, 3),
'asof_date': repeated_dates,
'timestamp': repeated_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.dshape)
deltas = bz.Data(baseline, name='deltas', dshape=self.dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0]]),
'2014-01-06': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[11.0, 12.0, 13.0]]),
})
if len(asset_info) == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan, np.nan]],
expected_views,
)
expected_output_buffer = [10, 11, 12, np.nan, 11, 12, 13, np.nan]
else:
expected_output_buffer = [10, 11, 12, 11, 12, 13]
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
| pd.Timestamp('2014-01-02') | pandas.Timestamp |
import pandas as pd
import datetime
import os
def prepare_data(time0, time9, symbol, fgCov=False, prep_new=True, mode='test'):
path = '/home/ubuntu/backtrader-binance-futures/Data/binance/futures/'
df9path = f'../data/{symbol}_1m_{mode}.csv'
if prep_new:
time0 = pd.to_datetime(time0)
time0 = datetime.datetime.date(time0)
time9 = pd.to_datetime(time9)
time9 = datetime.datetime.date(time9)
time5 = time0
df9 = pd.DataFrame()
while time5 <= time9:
try:
file = path + str(time5) + '/' + str(time5) + '_' + symbol + '_1m.csv'
print("====>>>",file)
df0 = pd.read_csv(file)
df0['datetime'] = [x[:19] for x in df0['candle_begin_time']]
df0.set_index('datetime', drop=True, inplace=True)
df0.index = pd.to_datetime(df0.index, format='%Y-%m-%d %H:%M:%S')
df0.sort_index(ascending=True, inplace=True)
except:
time5 = time5 + datetime.timedelta(days=1)
file = path + str(time5) + '/' + str(time5) + '_' + symbol + '_1m.csv'
df0 = pd.read_csv(file)
df0['datetime'] = [x[:19] for x in df0['candle_begin_time']]
df0.set_index('datetime', drop=True, inplace=True)
df0.index = | pd.to_datetime(df0.index, format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
from data_preparation import df_raw
import config as cfg
import pandas as pd
import pickle
from keras.models import load_model
import numpy as np
# convert time series into inputs
def series_to_input_production(data, n_in=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = [], []
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i-1))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = | pd.concat(cols, axis=1) | pandas.concat |
import glob
import os
import shutil
import subprocess
import time
import numpy as np
import pandas as pd
import yaml
def run_single(executable, cfg_path, out_dir, NB_TRIALS=5):
print('Running single experiment with:')
print(' executable:', executable)
print(' cfg_path:', cfg_path)
print(' out_dir:', out_dir)
print(' NB_TRIALS:', NB_TRIALS)
exec_dir = os.path.dirname(executable)
os.chdir(exec_dir)
pred_fname = os.path.join(exec_dir, 'preds.out')
# Delete cached files
model_paths = glob.glob(os.path.join(exec_dir, '*batch*'))
for path in model_paths:
os.remove(path)
if os.path.exists(pred_fname):
os.remove(pred_fname)
# Set up output dir, other stuff
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
cfg = yaml.safe_load(open(cfg_path, 'r').read())
print('Set up output dir')
# Run one for generating the model and saving the outputs
stdout_fname = os.path.join(out_dir, 'setup.stdout')
stderr_fname = os.path.join(out_dir, 'setup.stderr')
with open(stdout_fname, 'w') as stdout, open(stderr_fname, 'w') as stderr:
ret_code = subprocess.call([executable, cfg_path], stdout=stdout, stderr=stderr)
if ret_code != 0:
print('Uh oh, something went wrong')
time.sleep(1)
print('Generated model file and outputs')
shutil.copy(pred_fname, os.path.join(out_dir, 'preds.out'))
all_times = []
for i in range(NB_TRIALS):
print('Starting trial {}'.format(i))
stdout_fname = os.path.join(out_dir, '{}.stdout'.format(i))
stderr_fname = os.path.join(out_dir, '{}.stderr'.format(i))
with open(stdout_fname, 'w') as stdout, open(stderr_fname, 'w') as stderr:
ret_code = subprocess.call([executable, cfg_path], stdout=stdout, stderr=stderr)
if ret_code != 0:
print('Uh oh, something went wrong', ret_code)
continue
with open(stderr_fname, 'r') as f:
lines = f.readlines()
lines = filter(lambda x: 'Runtime' in x, lines)
lines = map(lambda x: x.split(' ')[-1], lines)
times = map(lambda x: float(x), lines)
times = list(times)
total_time = sum(times)
print('Took', total_time, 'seconds')
all_times.append(total_time)
print('Finished run')
print(all_times)
data = [all_times]
col_names = [str(i) for i in range(len(all_times))]
print(data)
print(col_names)
df = | pd.DataFrame(data, columns=col_names) | pandas.DataFrame |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = | pd.to_datetime('2010-01-02') | pandas.to_datetime |
import sys
from bs4 import BeautifulSoup
import urllib
import re
import time
import calendar
import datetime as dt
import pandas as pd
import numpy as np
import mysql.connector as sql
import progressbar
import logging
from nba_stats.scraping.base_functions import get_soup, get_bref_soup, get_bref_tables, get_table
from nba_stats.scraping.functions import split_first_last, get_split, convert_feet, combine_columns, is_starter, to_int, convert_mp, include_comments, column_time
from nba_stats.read_write.db_insert import SqlDataframes
from nba_stats.read_write.functions import export_txt, create_schema_str
CURRENT_YEAR = dt.datetime.now().year
CURRENT_SEASON = CURRENT_YEAR + 1 if dt.datetime.now().month > 7 else CURRENT_YEAR
BREF_HTML = 'https://www.basketball-reference.com'
CRAWL_DELAY = 3
SEASON_TEAMS = {1977: 22,
1981: 23,
1989: 25,
1990: 27,
1996: 29,
2005: 30}
PLAYOFF_TEAMS = {1954: 6,
1967: 8,
1975: 10,
1977: 12,
1984: 16}
stats_db = SqlDataframes()
logger_build = logging.getLogger(__name__)
# handler = logging.StreamHandler()
# file_handler = logging.FileHandler("logging\\%s.log" % dt.datetime.today().strftime('%Y%m%d'))
# formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-10s %(message)s')
# for a_handler in [handler, file_handler]:
# a_handler.setFormatter(formatter)
# logger_build.addHandler(handler)
# logger_build.addHandler(file_handler)
# logger_build.setLevel(logging.INFO)
def get_players_urls(players_url=None):
'''Returns soup objects of bref player pages (a-z)
Keyword arguments:
players_url - The url used to scrape the soups (default None)
'''
players_soups = []
if players_url == None:
players_url = BREF_HTML + '/players/'
letters = [chr(n) for n in range(97,123)]
success_count, http_error_count = 0, 0
start_time = time.time()
for letter in letters:
players_soup = get_soup(players_url + letter)
if players_soup != None:
players_soups.append(players_soup)
success_count += 1
else:
http_error_count += 1
end_time = time.time()
logger_build.info('Per run: {}, Successes: {}, Failures: {}'.format(
(end_time - start_time)/(success_count+http_error_count),
success_count,
http_error_count)
)
return players_soups
def get_all_players(players_soups):
'''Takes soups of bref players and returns a df containing info of all players
Keywork arguments:
players_soups - A list of all the soups to be processed.
'''
players_dfs = []
for p_soup in players_soups:
players_df = get_bref_tables(p_soup, ['all_players'])
players_dfs.append(players_df['all_players'])
players = | pd.concat(players_dfs) | pandas.concat |
"""
@author: The KnowEnG dev team
"""
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import normalize
import knpackage.toolbox as kn
import knpackage.distributed_computing_utils as dstutil
import knpackage.data_cleanup_toolbox as datacln
EPSILON_0 = 1e-7
def run_correlation(run_parameters):
""" perform feature prioritization
Args:
run_parameters: parameter set dictionary.
"""
max_cpu = run_parameters["max_cpu"]
run_parameters["results_tmp_directory"] = kn.create_dir(run_parameters["results_directory"], 'tmp')
phenotype_df = kn.get_spreadsheet_df(run_parameters["phenotype_name_full_path"])
spreadsheet_df = kn.get_spreadsheet_df(run_parameters["spreadsheet_name_full_path"])
phenotype_df = phenotype_df.T
len_phenotype = len(phenotype_df.index)
array_of_jobs = range(0, len_phenotype)
for i in range(0, len_phenotype, max_cpu):
jobs_id = array_of_jobs[i:i + max_cpu]
number_of_jobs = len(jobs_id)
zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_df, jobs_id)
dstutil.parallelize_processes_locally(run_correlation_worker, zipped_arguments, number_of_jobs)
write_phenotype_data_all(run_parameters)
kn.remove_dir(run_parameters["results_tmp_directory"])
def run_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, job_id):
""" core function for parallel run_correlation
Args:
run_parameters: dict of parameters
spreadsheet_df: spreadsheet data frame
phenotype_df: phenotype data frame
job_id: parallel iteration number
"""
# selects the ith row in phenotype_df
np.random.seed(job_id)
phenotype_df = phenotype_df.iloc[[job_id], :]
spreadsheet_df, phenotype_df, msg = datacln.check_input_value_for_gene_prioritazion(spreadsheet_df, phenotype_df)
pc_array = get_correlation(spreadsheet_df.values, phenotype_df.values[0], run_parameters)
feature_name_list = spreadsheet_df.index
phenotype_name = phenotype_df.index.values[0]
generate_correlation_output(pc_array, phenotype_name, feature_name_list, run_parameters)
def generate_correlation_output(pc_array, phenotype_name, feature_name_list, run_parameters):
""" Save final output of correlation
Args:
pc_array: pearson correlation coefficient array
phenotype_name: name of the phenotype
feature_name_list: list of the features correlated (size of pc_array
run_parameters: dictionary of run parameters with key 'results_directory'
"""
phenotype_name_list = np.repeat(phenotype_name, len(feature_name_list))
baseline_score = pc_array
pc_array = abs(pc_array)
viz_score = (pc_array - min(pc_array)) / (max(pc_array) - min(pc_array))
pc_array = np.round(pc_array, 8)
viz_score = np.round(viz_score, 8)
baseline_score = np.round(baseline_score, 8)
output_val = np.column_stack((phenotype_name_list, feature_name_list, pc_array, viz_score, baseline_score))
df_header = ['Response', 'Feature_ID', 'quantitative_sorting_score', 'visualization_score', 'baseline_score']
result_df = pd.DataFrame(columns=df_header)
result_df['Response'] = phenotype_name_list
result_df['Feature_ID'] = feature_name_list
result_df['quantitative_sorting_score'] = pc_array
result_df['visualization_score'] = viz_score
result_df['baseline_score'] = baseline_score
result_df = result_df.sort_values("visualization_score", ascending=0)
result_df.index = range(result_df.shape[0])
write_one_phenotype(result_df, phenotype_name, feature_name_list, run_parameters)
def run_bootstrap_correlation(run_parameters):
""" perform feature prioritization using bootstrap sampling
Args:
run_parameters: parameter set dictionary.
"""
max_cpu = run_parameters["max_cpu"]
run_parameters["results_tmp_directory"] = kn.create_dir(run_parameters["results_directory"], 'tmp')
phenotype_df = kn.get_spreadsheet_df(run_parameters["phenotype_name_full_path"])
spreadsheet_df = kn.get_spreadsheet_df(run_parameters["spreadsheet_name_full_path"])
phenotype_df = phenotype_df.T
n_bootstraps = run_parameters["number_of_bootstraps"]
len_phenotype = len(phenotype_df.index)
array_of_jobs = range(0, len_phenotype)
for i in range(0, len_phenotype, max_cpu):
jobs_id = array_of_jobs[i:i + max_cpu]
number_of_jobs = len(jobs_id)
zipped_arguments = dstutil.zip_parameters(run_parameters, spreadsheet_df, phenotype_df, n_bootstraps, jobs_id)
dstutil.parallelize_processes_locally(run_bootstrap_correlation_worker, zipped_arguments, number_of_jobs)
write_phenotype_data_all(run_parameters)
kn.remove_dir(run_parameters["results_tmp_directory"])
def run_bootstrap_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, n_bootstraps, job_id):
""" core function for parallel run_bootstrap_correlation
Args:
run_parameters: dict of parameters
spreadsheet_df: spreadsheet data frame
phenotype_df: phenotype data frame
n_bootstraps: number of bootstrap samples to use
job_id: parallel iteration number
"""
np.random.seed(job_id)
phenotype_df = phenotype_df.iloc[[job_id], :]
spreadsheet_df, phenotype_df, msg = datacln.check_input_value_for_gene_prioritazion(spreadsheet_df, phenotype_df)
pearson_array = get_correlation(spreadsheet_df.values, phenotype_df.values[0], run_parameters)
borda_count = np.zeros(spreadsheet_df.shape[0])
gm_accumulator = np.ones(spreadsheet_df.shape[0])
for bootstrap_number in range(0, n_bootstraps):
sample_random, sample_permutation = sample_a_matrix_pearson(spreadsheet_df.values, 1.0, run_parameters["cols_sampling_fraction"])
phenotype_response = phenotype_df.values[0, None]
phenotype_response = phenotype_response[0, sample_permutation]
pc_array = get_correlation(sample_random, phenotype_response, run_parameters)
borda_count = sum_array_ranking_to_borda_count(borda_count, np.abs(pc_array))
gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator
pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)
borda_count = borda_count / n_bootstraps
phenotype_name = phenotype_df.index.values[0]
feature_name_list = spreadsheet_df.index
viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))
generate_bootstrap_correlation_output(borda_count, viz_score, pearson_array,
phenotype_name, feature_name_list, run_parameters)
def generate_bootstrap_correlation_output(borda_count, viz_score, pearson_array,
phenotype_name, feature_name_list, run_parameters):
""" Save final output of correlation
Args:
pearson_array: pearson correlation coefficient array
phenotype_name: name of the phenotype
feature_name_list: list of the features correlated (size of pearson_array
run_parameters: dictionary of run parameters with key 'results_directory'
"""
phenotype_name_list = np.repeat(phenotype_name, len(feature_name_list))
viz_score = np.round(viz_score, 8)
borda_count = np.round(borda_count, 8)
pearson_array = np.round(pearson_array, 8)
output_val = np.column_stack((phenotype_name_list, feature_name_list, borda_count, viz_score, pearson_array))
df_header = ['Response', 'Feature_ID', 'quantitative_sorting_score', 'visualization_score', 'baseline_score']
result_df = pd.DataFrame(columns=df_header)
result_df['Response'] = phenotype_name_list
result_df['Feature_ID'] = feature_name_list
result_df['quantitative_sorting_score'] = borda_count
result_df['visualization_score'] = viz_score
result_df['baseline_score'] = pearson_array
result_df = result_df.sort_values("visualization_score", ascending=0)
result_df.index = range(result_df.shape[0])
write_one_phenotype(result_df, phenotype_name, feature_name_list, run_parameters)
def get_correlation(spreadsheet_mat, phenotype_response, run_parameters):
""" correlation function definition for all run methods
Args:
spreadsheet_mat: features x samples
phenotype_response: one x samples
run_parameters: with key 'correlation_measure'
Returns:
correlation_array: features x one
"""
correlation_array = np.zeros(spreadsheet_mat.shape[0])
if 'correlation_measure' in run_parameters:
if run_parameters['correlation_measure'] == 'pearson':
spreadsheet_mat = spreadsheet_mat - spreadsheet_mat.mean(axis=1).reshape((-1, 1))
phenotype_response = phenotype_response - phenotype_response.mean()
spreadsheet_mat_var = np.std(spreadsheet_mat, axis=1)
phenotype_response_var = np.std(phenotype_response)
numerator = spreadsheet_mat.dot(phenotype_response)
denominator = spreadsheet_mat_var * phenotype_response_var * spreadsheet_mat.shape[1]
with np.errstate(divide='ignore', invalid='ignore'):
correlation_array = np.true_divide(numerator, denominator)
correlation_array[denominator==0] = 0
return correlation_array
if run_parameters['correlation_measure'] == 't_test':
a = spreadsheet_mat[:, phenotype_response!=0]
b = spreadsheet_mat[:, phenotype_response==0]
d = np.mean(a, axis=1) - np.mean(b, axis=1)
denom = np.sqrt(np.var(a, axis=1, ddof=1)/a.shape[1] + np.var(b, axis=1, ddof=1)/b.shape[1])
with np.errstate(divide='ignore', invalid='ignore'):
correlation_array = np.divide(d, denom)
correlation_array[np.isnan(denom)] = 0
return correlation_array
return correlation_array
def sum_array_ranking_to_borda_count(borda_count, corr_array):
""" sum to borda count with a contigous array added to borda count
Args:
borda_count: the current borda count - same size as correlation array
corr_array: the correlation array to rank and add to the count
Returns:
borda_count: the ranking of the correlation array added to the input borda count
"""
num_elem = borda_count.size
# either assign (no duplicate case) or enumerate the correlation array
if num_elem == (np.unique(corr_array)).size:
borda_count[np.argsort(corr_array)] += np.int_(sorted(np.arange(0, corr_array.size) + 1))
return borda_count
# enumerate the borda vote
borda_add = np.zeros(num_elem)
enum_value = 1
sort_order = np.argsort(corr_array)
current_value = corr_array[sort_order[0]]
for k in range(0, num_elem):
if corr_array[sort_order[k]] != current_value:
enum_value += 1
current_value = corr_array[sort_order[k]]
borda_add[sort_order[k]] = enum_value
# scale to the number of elements in the array -- philosopical choice here --
borda_add = borda_add + (num_elem - enum_value)
return borda_count + borda_add
def sample_a_matrix_pearson(spreadsheet_mat, rows_fraction, cols_fraction):
""" percent_sample x percent_sample random sample, from spreadsheet_mat.
Args:
spreadsheet_mat: feature x sample spread sheet as matrix.
percent_sample: decimal fraction (slang-percent) - [0 : 1].
Returns:
sample_random: A specified precentage sample of the spread sheet.
sample_permutation: the array that correponds to columns sample.
"""
features_size = int(np.round(spreadsheet_mat.shape[0] * (1 - rows_fraction)))
features_permutation = np.random.permutation(spreadsheet_mat.shape[0])
features_permutation = features_permutation[0:features_size].T
patients_size = int(np.round(spreadsheet_mat.shape[1] * cols_fraction))
sample_permutation = np.random.permutation(spreadsheet_mat.shape[1])
sample_permutation = sample_permutation[0:patients_size]
sample_random = spreadsheet_mat[:, sample_permutation]
sample_random[features_permutation[:, None], :] = 0
return sample_random, sample_permutation
def zscore_dataframe(features_by_sample_df):
""" zscore by rows for features x samples dataframe
Args:
features_by_sample_df: zscore along rows for features x phenotypes dataframe
Returns:
spreadsheet_df: rows add up to zero, normalized to the mean and std deveiation
"""
zscore_df = (features_by_sample_df.sub(features_by_sample_df.mean(axis=1), axis=0)).truediv(
np.maximum(features_by_sample_df.std(axis=1), 1e-12), axis=0)
return zscore_df
def write_one_phenotype(result_df, phenotype_name, feature_name_list, run_parameters):
""" write the phenotype output file to the results directory and the temporary directory files
Args:
result_df:
phenotype_name:
feature_name_list:
run_parameters:
Output:
{phenotype}_{method}_{correlation_measure}_{timestamp}_viz.tsv
"""
top_gamma_of_sort = run_parameters['top_gamma_of_sort']
result_df.to_csv(get_output_file_name(run_parameters, 'results_directory', phenotype_name, 'viz'), header=True, index=False, sep='\t', float_format="%g")
download_result_df = | pd.DataFrame(data=None, index=None, columns=[phenotype_name]) | pandas.DataFrame |
# Copyright 2014 Open Data Science Initiative and other authors. See AUTHORS.txt
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import csv
import copy
import numpy as np
import scipy.io
import datetime
import json
import yaml
import re
import tarfile
import logging
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(message)s",
filename="/tmp/sods.log",
filemode="w",
)
from functools import reduce
import pandas as pd
from .config import *
from . import access
from . import util
DATAPATH = os.path.expanduser(os.path.expandvars(config.get("datasets", "dir")))
PYTRENDS_AVAILABLE = True
try:
from pytrends.request import TrendReq
except ImportError:
PYTRENDS_AVAILABLE = False
GPY_AVAILABLE = True
try:
import GPy
except ImportError:
GPY_AVAILABLE = False
NETPBMFILE_AVAILABLE = True
try:
import netpbmfile
except ImportError:
NETPBMFILE_AVAILABLE = False
GEOPANDAS_AVAILABLE = True
try:
import geopandas
except ImportError:
GEOPANDAS_AVAILABLE = False
if sys.version_info >= (3, 0):
from urllib.parse import quote
from urllib.request import urlopen
else:
from urllib2 import quote
from urllib2 import urlopen
# Global variables
default_seed = 10000
def bmi_steps(data_set="bmi_steps"):
if not access.data_available(data_set):
access.download_data(data_set)
data = pd.read_csv(os.path.join(access.DATAPATH, data_set, "steps-bmi-data.csv"))
X = np.hstack(
(data["steps"].values[:, np.newaxis], data["bmi"].values[:, np.newaxis])
)
Y = data["gender"].values[:, None]
return access.data_details_return(
{"X": X, "Y": Y, "covariates": ["steps", "bmi"], "response": ["gender"]},
data_set,
)
# The data sets
def boston_housing(data_set="boston_housing"):
if not access.data_available(data_set):
access.download_data(data_set)
all_data = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "housing.data"))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return access.data_details_return({"X": X, "Y": Y}, data_set)
def boxjenkins_airline(data_set="boxjenkins_airline", num_train=96):
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
access.download_data(data_set)
data = np.loadtxt(
os.path.join(access.DATAPATH, data_set, "boxjenkins_airline.csv"), delimiter=","
)
Y = data[:num_train, 1:2]
X = data[:num_train, 0:1]
Xtest = data[num_train:, 0:1]
Ytest = data[num_train:, 1:2]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"covariates": [util.decimalyear("year")],
"response": ["AirPassengers"],
"info": "Monthly airline passenger data from Box & Jenkins 1976.",
},
data_set,
)
def brendan_faces(data_set="brendan_faces"):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, data_set, "frey_rawface.mat"))
Y = mat_data["ff"].T
return access.data_details_return({"Y": Y}, data_set)
def della_gatta_TRP63_gene_expression(data_set="della_gatta", gene_number=None):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, data_set, "DellaGattadata.mat"))
X = np.double(mat_data["timepoints"])
if gene_number == None:
Y = mat_data["exprs_tp53_RMA"]
else:
Y = mat_data["exprs_tp53_RMA"][:, gene_number]
if len(Y.shape) == 1:
Y = Y[:, None]
return access.data_details_return({"X": X, "Y": Y, "gene_number": gene_number}, data_set)
def epomeo_gpx(data_set="epomeo_gpx", sample_every=4):
"""Data set of three GPS traces of the same movement on Mt Epomeo in Ischia. Requires gpxpy to run."""
try:
import gpxpy
import gpxpy.gpx
except ImportError:
print("Need to install gpxpy to process the empomeo_gpx dataset.")
return
if not access.data_available(data_set):
access.download_data(data_set)
files = [
"endomondo_1",
"endomondo_2",
"garmin_watch_via_endomondo",
"viewranger_phone",
"viewranger_tablet",
]
X = []
for file in files:
gpx_file = open(os.path.join(access.DATAPATH, "epomeo_gpx", file + ".gpx"), "r")
gpx = gpxpy.parse(gpx_file)
segment = gpx.tracks[0].segments[0]
points = [
point
for track in gpx.tracks
for segment in track.segments
for point in segment.points
]
data = [
[
(
point.time
- datetime.datetime(2013, 8, 21, tzinfo=datetime.timezone.utc)
).total_seconds(),
point.latitude,
point.longitude,
point.elevation,
]
for point in points
]
X.append(np.asarray(data)[::sample_every, :])
gpx_file.close()
X = pd.DataFrame(
X[0], columns=["seconds", "latitude", "longitude", "elevation"]
)
X.set_index(keys="seconds", inplace=True)
return access.data_details_return(
{
"X": X,
"info": "Data is an array containing time in seconds, latitude, longitude and elevation in that order.",
},
data_set,
)
if GEOPANDAS_AVAILABLE:
def nigerian_administrative_zones(
data_set="nigerian_administrative_zones", refresh_data=False
):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
from zipfile import ZipFile
with ZipFile(
os.path.join(access.DATAPATH, data_set, "nga_admbnda_osgof_eha_itos.gdb.zip"), "r"
) as zip_ref:
zip_ref.extractall(
os.path.join(access.DATAPATH, data_set, "nga_admbnda_osgof_eha_itos.gdb")
)
states_file = "nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/nga_admbnda_osgof_eha_itos.gdb/"
from geopandas import read_file
Y = read_file(os.path.join(access.DATAPATH, data_set, states_file), layer=1)
Y.crs = "EPSG:4326"
Y.set_index("admin1Name_en")
return access.data_details_return({"Y": Y}, data_set)
def nigerian_covid(data_set="nigerian_covid", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "line-list-nigeria.csv")
Y = pd.read_csv(
filename,
parse_dates=[
"date",
"date_confirmation",
"date_onset_symptoms",
"date_admission_hospital",
"death_date",
],
)
return access.data_details_return({"Y": Y}, data_set)
def nigeria_nmis(data_set="nigeria_nmis", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "healthmopupandbaselinenmisfacility.csv")
Y = pd.read_csv(filename)
return access.data_details_return({"Y": Y}, data_set)
def nigerian_population(data_set="nigerian_population", refresh_data=False):
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "nga_admpop_adm1_2020.csv")
Y = pd.read_csv(filename)
Y.dropna(axis=1, how='all', inplace=True)
Y.dropna(axis=0, how='any', inplace=True)
Y.rename(columns = {"ADM0_NAME":"admin0Name_en",
"ADM0_PCODE" : "admin0Pcode",
"ADM1_NAME" : "admin1Name_en",
"ADM1_PCODE" : "admin1Pcode",
"T_TL" :"population"},
inplace=True)
Y["admin0Name_en"] = Y["admin0Name_en"].str.capitalize()
Y["admin1Name_en"] = Y["admin1Name_en"].str.capitalize()
Y = Y.set_index("admin1Name_en")
return access.data_details_return({"Y": Y}, data_set)
def pmlr(volumes="all", data_set="pmlr", refresh_data=False):
"""Abstracts from the Proceedings of Machine Learning Research"""
if not access.data_available(data_set) and not refresh_data:
access.download_data(data_set)
proceedings = access.pmlr_proceedings_list(data_set)
# Create a new resources entry for downloading contents of proceedings.
data_name_full = "pmlr"
access.data_resources[data_set]["dirs"] = [['.']]
for entry in proceedings:
if volumes == "all" or entry["volume"] in volumes:
file = entry["yaml"].split("/")[-1]
proto, url = entry["yaml"].split("//")
file = os.path.basename(url)
dirname = os.path.dirname("/".join(url.split("/")[1:]))
urln = proto + "//" + url.split("/")[0]
access.data_resources[data_name_full]["files"].append([file])
access.data_resources[data_name_full]["dirs"].append([dirname])
access.data_resources[data_name_full]["urls"].append(urln)
Y = []
# Download the volume data
if not access.data_available(data_name_full):
access.download_data(data_name_full)
for entry in reversed(proceedings):
volume = entry["volume"]
# data_name_full = data_name_full_stub + "v" + str(volume)
if volumes == "all" or volume in volumes:
file = entry["yaml"].split("/")[-1]
proto, url = entry["yaml"].split("//")
file = os.path.basename(url)
dirname = os.path.dirname("/".join(url.split("/")[1:]))
urln = proto + "//" + url.split("/")[0]
volume_file = open(
os.path.join(access.DATAPATH, data_name_full, dirname, file), "r"
)
Y += yaml.load(volume_file, Loader=yaml.FullLoader)
Y = pd.DataFrame(Y)
Y["published"] = pd.to_datetime(Y["published"])
# Y.columns.values[4] = util.json_object('authors')
# Y.columns.values[7] = util.json_object('editors')
try:
Y["issued"] = Y["issued"].apply(
lambda x: np.datetime64(datetime.datetime(*x["date-parts"]))
)
except TypeError:
raise TypeError("Type error for entry\n" + Y["issued"]) from e
def full_name(person):
order = ["given", "prefix", "family", "suffix"]
names = [str(person[key]) for key in order if key in person and person[key] is not None]
return " ".join(names)
Y["author"] = Y["author"].apply(
lambda x: ', '.join([full_name(author) for author in x])
)
Y["editor"] = Y["editor"].apply(
lambda x: ', '.join([full_name(editor) for editor in x])
)
columns = list(Y.columns)
columns[14] = util.datetime64_("published")
columns[11] = util.datetime64_("issued")
Y.columns = columns
return access.data_details_return(
{
"Y": Y,
"info": "Data is a pandas data frame containing each paper, its abstract, authors, volumes and venue.",
},
data_set,
)
def football_data(season="1617", data_set="football_data"):
"""Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """
league_dict = {"E0": 0, "E1": 1, "E2": 2, "E3": 3, "EC": 4}
def league2num(string):
if isinstance(string, bytes):
string = string.decode("utf-8")
return league_dict[string]
def football2num(string):
if isinstance(string, bytes):
string = string.decode("utf-8")
if string in access.football_dict:
return access.football_dict[string]
else:
access.football_dict[string] = len(access.football_dict) + 1
return len(access.football_dict) + 1
def datestr2num(s):
return util.date2num(datetime.datetime.strptime(s.decode("utf-8"), "%d/%m/%y"))
data_set_season = data_set + "_" + season
access.data_resources[data_set_season] = copy.deepcopy(access.data_resources[data_set])
access.data_resources[data_set_season]["urls"][0] += season + "/"
start_year = int(season[0:2])
end_year = int(season[2:4])
files = ["E0.csv", "E1.csv", "E2.csv", "E3.csv"]
if start_year > 4 and start_year < 93:
files += ["EC.csv"]
access.data_resources[data_set_season]["files"] = [files]
if not access.data_available(data_set_season):
access.download_data(data_set_season)
start = True
for file in reversed(files):
filename = os.path.join(access.DATAPATH, data_set_season, file)
# rewrite files removing blank rows.
writename = os.path.join(access.DATAPATH, data_set_season, "temp.csv")
input = open(filename, encoding="ISO-8859-1")
output = open(writename, "w")
writer = csv.writer(output)
for row in csv.reader(input):
if any(field.strip() for field in row):
writer.writerow(row)
input.close()
output.close()
table = np.loadtxt(
writename,
skiprows=1,
usecols=(0, 1, 2, 3, 4, 5),
converters={
0: league2num,
1: datestr2num,
2: football2num,
3: football2num,
},
delimiter=",",
)
if start:
X = table[:, :4]
Y = table[:, 4:]
start = False
else:
X = np.append(X, table[:, :4], axis=0)
Y = np.append(Y, table[:, 4:], axis=0)
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [
util.discrete(league_dict, "league"),
util.datenum("match_day"),
util.discrete(access.football_dict, "home team"),
util.discrete(access.football_dict, "away team"),
],
"response": [util.integer("home score"), util.integer("away score")],
},
data_set,
)
def sod1_mouse(data_set="sod1_mouse"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "sod1_C57_129_exprs.csv")
Y = pd.read_csv(filename, header=0, index_col=0)
num_repeats = 4
num_time = 4
num_cond = 4
return access.data_details_return({"Y": Y}, data_set)
def spellman_yeast(data_set="spellman_yeast"):
"""This is the classic Spellman et al 1998 Yeast Cell Cycle gene expression data that is widely used as a benchmark."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "combined.txt")
Y = pd.read_csv(filename, header=0, index_col=0, sep="\t")
return access.data_details_return({"Y": Y}, data_set)
def spellman_yeast_cdc15(data_set="spellman_yeast"):
"""These are the gene expression levels from the CDC-15 experiment of Spellman et al (1998)."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "combined.txt")
Y = pd.read_csv(filename, header=0, index_col=0, sep="\t")
t = np.asarray(
[
10,
30,
50,
70,
80,
90,
100,
110,
120,
130,
140,
150,
170,
180,
190,
200,
210,
220,
230,
240,
250,
270,
290,
]
)
times = ["cdc15_" + str(time) for time in t]
Y = Y[times].T
t = t[:, None]
return access.data_details_return(
{
"Y": Y,
"t": t,
"info": "Time series of synchronized yeast cells from the CDC-15 experiment of Spellman et al (1998).",
},
data_set,
)
def lee_yeast_ChIP(data_set="lee_yeast_ChIP"):
"""Yeast ChIP data from Lee et al."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "binding_by_gene.tsv")
S = pd.read_csv(filename, header=1, index_col=0, sep="\t")
transcription_factors = [col for col in S.columns if col[:7] != "Unnamed"]
annotations = S[["Unnamed: 1", "Unnamed: 2", "Unnamed: 3"]]
S = S[transcription_factors]
return access.data_details_return(
{
"annotations": annotations,
"Y": S,
"transcription_factors": transcription_factors,
},
data_set,
)
def fruitfly_tomancak(data_set="fruitfly_tomancak", gene_number=None):
"""Fruitfly gene expression data from Tomancak et al."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "tomancak_exprs.csv")
Y = pd.read_csv(filename, header=0, index_col=0).T
num_repeats = 3
num_time = 12
xt = np.linspace(0, num_time - 1, num_time)
xr = np.linspace(0, num_repeats - 1, num_repeats)
xtime, xrepeat = np.meshgrid(xt, xr)
X = np.vstack((xtime.flatten(), xrepeat.flatten())).T
return access.data_details_return({"X": X, "Y": Y, "gene_number": gene_number}, data_set)
def drosophila_protein(data_set="drosophila_protein"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "becker_et_al.csv")
Y = pd.read_csv(filename, header=0)
return access.data_details_return({"Y": Y}, data_set)
def drosophila_knirps(data_set="drosophila_protein"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "becker_et_al.csv")
# in the csv file we have facts_kni and ext_kni. We treat facts_kni as protein and ext_kni as mRNA
df = pd.read_csv(filename, header=0)
t = df["t"].to_numpy()[:, np.newaxis]
x = df["x"].to_numpy()[:, np.newaxis]
g = df["expression1"].to_numpy()[:, np.newaxis]
p = df["expression2"].to_numpy()[:, np.newaxis]
leng = x.shape[0]
T = np.vstack([t, t])
S = np.vstack([x, x])
inx = np.zeros(leng * 2)[:, None]
inx[leng * 2 // 2 : leng * 2] = 1
X = np.hstack([T, S, inx])
Y = np.vstack([g, p])
return access.data_details_return({"Y": Y, "X": X}, data_set)
if PYTRENDS_AVAILABLE:
def google_trends(
query_terms=["big data", "machine learning", "data science"],
data_set="google_trends",
refresh_data=False,
):
"""
Data downloaded from Google trends for given query terms. Warning,
if you use this function multiple times in a row you get blocked
due to terms of service violations.
The function will cache the result of any query in an attempt to
avoid this. If you wish to refresh an old query set refresh_data
to True. The original function is inspired by this notebook:
http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb
But the update makes use of `pytrends`
"""
query_terms.sort()
from pytrends.request import TrendReq
pytrends = TrendReq(hl="en-US", tz=360)
# Create directory name for data
dir_path = os.path.join(access.DATAPATH, "google_trends")
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
dir_name = "-".join(query_terms)
dir_name = dir_name.replace(" ", "_")
dir_path = os.path.join(dir_path, dir_name)
file = "data.csv"
file_name = os.path.join(dir_path, file)
if not os.path.exists(file_name) or refresh_data:
print(
"Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks."
)
# quote the query terms.
quoted_terms = []
for term in query_terms:
quoted_terms.append(quote(term))
print("Query terms: ", ", ".join(query_terms))
print("Fetching query:")
pytrends = TrendReq(hl="en-US", tz=0)
pytrends.build_payload(query_terms, cat=0, timeframe="all", geo="", gprop="")
df = pytrends.interest_over_time()
print("Done.")
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
df["Date"] = df.index
df = df.set_index(np.array(range(len(df.index))))
df = df.rename({"date": "Date"})
df.to_csv(file_name)
loaddf = False
else:
print(
"Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function."
)
print("Query terms: ", ", ".join(query_terms))
df = pd.read_csv(file_name, parse_dates=[0])
loaddf = True
columns = df.columns
terms = len(query_terms)
if loaddf:
X = np.asarray(
[
(
util.date2num(
datetime.datetime.strptime(df.iloc[row]["Date"], "%Y-%m-%d")
),
i,
)
for i in range(terms)
for row in df.index
]
)
else:
X = np.asarray(
[
(util.date2num(df.iloc[row]["Date"]), i)
for i in range(terms)
for row in df.index
]
)
Y = np.asarray(
[[df.iloc[row][query_terms[i]]] for i in range(terms) for row in df.index]
)
output_info = columns[1:]
cats = {}
for i in range(terms):
cats[query_terms[i]] = i
return access.data_details_return(
{
"data frame": df,
"X": X,
"Y": Y,
"query_terms": query_terms,
"info": "Data downloaded from google trends with query terms: "
+ ", ".join(query_terms)
+ ".",
"covariates": [util.datenum("date"), util.discrete(cats, "query_terms")],
"response": ["normalized interest"],
},
data_set,
)
def oil(data_set="three_phase_oil_flow"):
"""The three phase oil data from Bishop and James (1993)."""
if not access.data_available(data_set):
access.download_data(data_set)
oil_train_file = os.path.join(access.DATAPATH, data_set, "DataTrn.txt")
oil_trainlbls_file = os.path.join(access.DATAPATH, data_set, "DataTrnLbls.txt")
oil_test_file = os.path.join(access.DATAPATH, data_set, "DataTst.txt")
oil_testlbls_file = os.path.join(access.DATAPATH, data_set, "DataTstLbls.txt")
oil_valid_file = os.path.join(access.DATAPATH, data_set, "DataVdn.txt")
oil_validlbls_file = os.path.join(access.DATAPATH, data_set, "DataVdnLbls.txt")
fid = open(oil_train_file)
X = np.fromfile(fid, sep="\t").reshape((-1, 12))
fid.close()
fid = open(oil_test_file)
Xtest = np.fromfile(fid, sep="\t").reshape((-1, 12))
fid.close()
fid = open(oil_valid_file)
Xvalid = np.fromfile(fid, sep="\t").reshape((-1, 12))
fid.close()
fid = open(oil_trainlbls_file)
Y = np.fromfile(fid, sep="\t").reshape((-1, 3)) * 2.0 - 1.0
fid.close()
fid = open(oil_testlbls_file)
Ytest = np.fromfile(fid, sep="\t").reshape((-1, 3)) * 2.0 - 1.0
fid.close()
fid = open(oil_validlbls_file)
Yvalid = np.fromfile(fid, sep="\t").reshape((-1, 3)) * 2.0 - 1.0
fid.close()
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"Xtest": Xtest,
"Xvalid": Xvalid,
"Yvalid": Yvalid,
},
data_set,
)
# else:
# throw an error
def leukemia(data_set="leukemia"):
if not access.data_available(data_set):
access.download_data(data_set)
all_data = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "leuk.dat"))
X = all_data[1:, 1:]
censoring = all_data[1:, 1]
Y = all_data[1:, 0]
return access.data_details_return({"X": X, "censoring": censoring, "Y": Y}, data_set)
def oil_100(seed=default_seed, data_set="three_phase_oil_flow"):
np.random.seed(seed=seed)
data = oil()
indices = util.permute(1000)
indices = indices[0:100]
X = data["X"][indices, :]
Y = data["Y"][indices, :]
return access.data_details_return(
{
"X": X,
"Y": Y,
"info": "Subsample of the full oil data extracting 100 values randomly without replacement, here seed was "
+ str(seed),
},
data_set,
)
def pumadyn(seed=default_seed, data_set="pumadyn-32nm"):
"""Data from a simulation of the Puma robotic arm generated by <NAME>."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
tar = tarfile.open(os.path.join(dir_path, "pumadyn-32nm.tar.gz"))
print("Extracting file.")
tar.extractall(path=dir_path)
tar.close()
# Data is variance 1, no need to normalize.
data = np.loadtxt(
os.path.join(access.DATAPATH, data_set, "pumadyn-32nm", "Dataset.data.gz")
)
indices = util.permute(data.shape[0])
indicesTrain = indices[0:7168]
indicesTest = indices[7168:-1]
indicesTrain.sort(axis=0)
indicesTest.sort(axis=0)
X = data[indicesTrain, 0:-2]
Y = data[indicesTrain, -1][:, None]
Xtest = data[indicesTest, 0:-2]
Ytest = data[indicesTest, -1][:, None]
return access.data_details_return(
{"X": X, "Y": Y, "Xtest": Xtest, "Ytest": Ytest, "seed": seed}, data_set
)
def robot_wireless(data_set="robot_wireless"):
# WiFi access point strengths on a tour around UW Paul Allen building.
if not access.data_available(data_set):
access.download_data(data_set)
file_name = os.path.join(access.DATAPATH, data_set, "uw-floor.txt")
all_time = np.genfromtxt(file_name, usecols=(0))
macaddress = np.genfromtxt(file_name, usecols=(1), dtype=str)
x = np.genfromtxt(file_name, usecols=(2))
y = np.genfromtxt(file_name, usecols=(3))
strength = np.genfromtxt(file_name, usecols=(4))
addresses = np.unique(macaddress)
times = np.unique(all_time)
addresses.sort()
times.sort()
allY = np.zeros((len(times), len(addresses)))
allX = np.zeros((len(times), 3))
allY[:] = -92.0
strengths = {}
for address, j in zip(addresses, list(range(len(addresses)))):
ind = np.nonzero(address == macaddress)
temp_strengths = strength[ind]
temp_x = x[ind]
temp_y = y[ind]
temp_times = all_time[ind]
for time in temp_times:
vals = time == temp_times
if any(vals):
ind2 = np.nonzero(vals)
i = np.nonzero(time == times)
allY[i, j] = temp_strengths[ind2]
allX[i, 0] = time
allX[i, 1] = temp_x[ind2]
allX[i, 2] = temp_y[ind2]
allY = (allY + 85.0) / 15.0
X = allX[0:215, :]
Y = allY[0:215, :]
Xtest = allX[215:, :]
Ytest = allY[215:, :]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"addresses": addresses,
"times": times,
"covariates": [util.timestamp("time", "%H:%M:%S.%f"), "X", "Y"],
"response": addresses,
},
data_set,
)
def silhouette(data_set="ankur_pose_data"):
"""<NAME> and <NAME>'s silhoutte data."""
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "ankurDataPoseSilhouette.mat")
)
inMean = np.mean(mat_data["Y"])
inScales = np.sqrt(np.var(mat_data["Y"]))
X = mat_data["Y"] - inMean
X = X / inScales
Xtest = mat_data["Y_test"] - inMean
Xtest = Xtest / inScales
Y = mat_data["Z"]
Ytest = mat_data["Z_test"]
return access.data_details_return(
{"X": X, "Y": Y, "Xtest": Xtest, "Ytest": Ytest}, data_set
)
def decampos_digits(
data_set="decampos_characters", which_digits=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
):
"""Digits data set from Teo de Campos"""
if not access.data_available(data_set):
access.download_data(data_set)
path = os.path.join(access.DATAPATH, data_set)
digits = np.load(os.path.join(path, "digits.npy"))
digits = digits[which_digits, :, :, :]
num_classes, num_samples, height, width = digits.shape
Y = digits.reshape(
(digits.shape[0] * digits.shape[1], digits.shape[2] * digits.shape[3])
)
lbls = np.array([[l] * num_samples for l in which_digits]).reshape(Y.shape[0], 1)
str_lbls = np.array([[str(l)] * num_samples for l in which_digits])
return access.data_details_return(
{
"Y": Y,
"lbls": lbls,
"str_lbls": str_lbls,
"info": "Digits data set from the de Campos characters data",
},
data_set,
)
def ripley_synth(data_set="ripley_prnn_data"):
"""Synthetic classification data set generated by <NAME> for his Neural Networks book."""
if not access.data_available(data_set):
access.download_data(data_set)
train = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "synth.tr"), skip_header=1)
X = train[:, 0:2]
y = train[:, 2:3]
test = np.genfromtxt(os.path.join(access.DATAPATH, data_set, "synth.te"), skip_header=1)
Xtest = test[:, 0:2]
ytest = test[:, 2:3]
return access.data_details_return(
{
"X": X,
"Y": y,
"Xtest": Xtest,
"Ytest": ytest,
"info": "Synthetic data generated by Ripley for a two class classification problem.",
},
data_set,
)
"""def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False):
path = os.path.join(access.DATAPATH, data_set)
if access.data_available(data_set) and not refresh_data:
print('Using cached version of the data set, to use latest version set refresh_data to True')
else:
access.download_data(data_set)
data = np.loadtxt(os.path.join(access.DATAPATH, data_set, 'GLBTS.long.data'))
print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0])
allX = data[data[:, 3]!=-99.99, 2:3]
allY = data[data[:, 3]!=-99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return access.data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'info': "Global average temperature data with " + str(num_train) + " values used as training points."}, data_set)
"""
def mauna_loa(data_set="mauna_loa", num_train=545, refresh_data=False):
"""CO2 concentrations from the Mauna Loa observatory."""
path = os.path.join(access.DATAPATH, data_set)
if access.data_available(data_set) and not refresh_data:
print(
"Using cached version of the data set, to use latest version set refresh_data to True"
)
else:
access.download_data(data_set)
data = np.loadtxt(os.path.join(access.DATAPATH, data_set, "co2_mm_mlo.txt"))
print(
"Most recent data observation from month ",
data[-1, 1],
" in year ",
data[-1, 0],
)
allX = data[data[:, 3] != -99.99, 2:3]
allY = data[data[:, 3] != -99.99, 3:4]
X = allX[:num_train, 0:1]
Xtest = allX[num_train:, 0:1]
Y = allY[:num_train, 0:1]
Ytest = allY[num_train:, 0:1]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"covariates": [util.decimalyear("year", "%Y-%m")],
"response": ["CO2/ppm"],
"info": "Mauna Loa data with "
+ str(num_train)
+ " values used as training points.",
},
data_set,
)
def osu_run1(data_set="osu_run1", sample_every=4):
"""Ohio State University's Run1 motion capture data set."""
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
import zipfile
access.download_data(data_set)
zip = zipfile.ZipFile(os.path.join(access.DATAPATH, data_set, "run1TXT.ZIP"), "r")
for name in zip.namelist():
zip.extract(name, path)
from . import mocap
Y, connect = mocap.load_text_data("Aug210106", path)
Y = Y[0:-1:sample_every, :]
return access.data_details_return({"Y": Y, "connect": connect}, data_set)
def swiss_roll_generated(num_samples=1000, sigma=0.0):
with open(
os.path.join(os.path.dirname(__file__), "datasets", "swiss_roll.pickle")
) as f:
if sys.version_info >= (3, 0):
import pickle
else:
import cPickle as pickle
data = pickle.load(f)
Na = data["Y"].shape[0]
perm = np.random.permutation(np.r_[:Na])[:num_samples]
Y = data["Y"][perm, :]
t = data["t"][perm]
c = data["colors"][perm, :]
so = np.argsort(t)
Y = Y[so, :]
t = t[so]
c = c[so, :]
return {"Y": Y, "t": t, "colors": c}
def singlecell(data_set="guo_qpcr_2010"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "guo_qpcr.csv")
Y = pd.read_csv(filename, header=0, index_col=0)
genes = Y.columns
labels = Y.index
# data = np.loadtxt(os.path.join(dir_path, 'singlecell.csv'), delimiter=",", dtype=str)
return access.data_details_return(
{
"Y": Y,
"info": "qPCR singlecell experiment in Mouse, measuring 48 gene expressions in 1-64 cell states. The labels have been created as in Guo et al. [2010]",
"genes": genes,
"labels": labels,
},
data_set,
)
def swiss_roll_1000():
return swiss_roll(num_samples=1000)
def swiss_roll(num_samples=3000, data_set="swiss_roll"):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "swiss_roll_data.mat")
)
Y = mat_data["X_data"][:, 0:num_samples].transpose()
return access.data_details_return(
{
"Y": Y,
"Full": mat_data["X_data"],
"info": "The first "
+ str(num_samples)
+ " points from the swiss roll data of Tennenbaum, de Silva and Langford (2001).",
},
data_set,
)
def isomap_faces(num_samples=698, data_set="isomap_face_data"):
if not access.data_available(data_set):
access.download_data(data_set)
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, data_set, "face_data.mat"))
Y = mat_data["images"][:, 0:num_samples].transpose()
return access.data_details_return(
{
"Y": Y,
"poses": mat_data["poses"],
"lights": mat_data["lights"],
"info": "The first "
+ str(num_samples)
+ " points from the face data of Tennenbaum, de Silva and Langford (2001).",
},
data_set,
)
if GPY_AVAILABLE:
def toy_rbf_1d(seed=default_seed, num_samples=500):
"""
Samples values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.
:param seed: seed to use for random sampling.
:type seed: int
:param num_samples: number of samples to sample in the function (default 500).
:type num_samples: int
"""
np.random.seed(seed=seed)
num_in = 1
X = np.random.uniform(low=-1.0, high=1.0, size=(num_samples, num_in))
X.sort(axis=0)
rbf = GPy.kern.RBF(num_in, variance=1.0, lengthscale=np.array((0.25,)))
white = GPy.kern.White(num_in, variance=1e-2)
kernel = rbf + white
K = kernel.K(X)
y = np.reshape(
np.random.multivariate_normal(np.zeros(num_samples), K), (num_samples, 1)
)
return {
"X": X,
"Y": y,
"info": "Sampled "
+ str(num_samples)
+ " values of a function from an RBF covariance with very small noise for inputs uniformly distributed between -1 and 1.",
}
def toy_rbf_1d_50(seed=default_seed):
np.random.seed(seed=seed)
data = toy_rbf_1d()
indices = util.permute(data["X"].shape[0])
indices = indices[0:50]
indices.sort(axis=0)
X = data["X"][indices, :]
Y = data["Y"][indices, :]
return {
"X": X,
"Y": Y,
"info": "Subsamples the toy_rbf_sample with 50 values randomly taken from the original sample.",
"seed": seed,
}
def toy_linear_1d_classification(seed=default_seed):
"""Simple classification data in one dimension for illustrating models."""
def sample_class(f):
p = 1.0 / (1.0 + np.exp(-f))
c = np.random.binomial(1, p)
c = np.where(c, 1, -1)
return c
np.random.seed(seed=seed)
x1 = np.random.normal(-3, 5, 20)
x2 = np.random.normal(3, 5, 20)
X = (np.r_[x1, x2])[:, None]
return {
"X": X,
"Y": sample_class(2.0 * X),
"F": 2.0 * X,
"covariates": ["X"],
"response": [util.discrete({"positive": 1, "negative": -1})],
"seed": seed,
}
def airline_delay(
data_set="airline_delay", num_train=700000, num_test=100000, seed=default_seed
):
"""Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence"""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "airline_delay.hdf")
# 1. Load the dataset
data = pd.read_hdf(filename)
# WARNING: removing year
data.pop("Year")
# Get data matrices
Yall = data.pop("ArrDelay").values[:, None]
Xall = data.values
# Subset the data (memory!!)
all_data = num_train + num_test
Xall = Xall[:all_data]
Yall = Yall[:all_data]
# Get testing points
np.random.seed(seed=seed)
N_shuffled = util.permute(Yall.shape[0])
train, test = N_shuffled[num_test:], N_shuffled[:num_test]
X, Y = Xall[train], Yall[train]
Xtest, Ytest = Xall[test], Yall[test]
covariates = [
"month",
"day of month",
"day of week",
"departure time",
"arrival time",
"air time",
"distance to travel",
"age of aircraft / years",
]
response = ["delay"]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"seed": seed,
"info": "Airline delay data used for demonstrating Gaussian processes for big data.",
"covariates": covariates,
"response": response,
},
data_set,
)
if NETPBMFILE_AVAILABLE:
def olivetti_faces(data_set="olivetti_faces"):
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
import zipfile
access.download_data(data_set)
zip = zipfile.ZipFile(os.path.join(path, "att_faces.zip"), "r")
for name in zip.namelist():
zip.extract(name, path)
Y = []
lbls = []
for subject in range(40):
for image in range(10):
image_path = os.path.join(
path, "orl_faces", "s" + str(subject + 1), str(image + 1) + ".pgm"
)
Y.append(netpbmfile.imread(image_path).flatten())
lbls.append(subject)
Y = np.asarray(Y)
lbls = np.asarray(lbls)[:, None]
return access.data_details_return(
{"Y": Y, "lbls": lbls, "info": "ORL Faces processed to 64x64 images."}, data_set
)
def xw_pen(data_set="xw_pen"):
if not access.data_available(data_set):
access.download_data(data_set)
Y = np.loadtxt(os.path.join(access.DATAPATH, data_set, "xw_pen_15.csv"), delimiter=",")
X = np.arange(485)[:, None]
return access.data_details_return(
{
"Y": Y,
"X": X,
"info": "Tilt data from a personalized digital assistant pen. Plot in original paper showed regression between time steps 175 and 275.",
},
data_set,
)
def download_rogers_girolami_data(data_set="rogers_girolami_data"):
if not access.data_available("rogers_girolami_data"):
import tarfile
access.download_data(data_set)
path = os.path.join(access.DATAPATH, data_set)
tar_file = os.path.join(path, "firstcoursemldata.tar.gz")
tar = tarfile.open(tar_file)
print("Extracting file.")
tar.extractall(path=path)
tar.close()
def olympic_100m_men(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["male100"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Olympic sprint times for 100 m men from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_100m_women(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["female100"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Olympic sprint times for 100 m women from 1896 until 2008. Example is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_200m_women(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["female200"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"info": "Olympic 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_200m_men(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["male200"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Male 200 m winning times for women from 1896 until 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_400m_women(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["female400"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Olympic 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_400m_men(data_set="rogers_girolami_data"):
download_rogers_girolami_data()
olympic_data = scipy.io.loadmat(
os.path.join(access.DATAPATH, data_set, "data", "olympics.mat")
)["male400"]
X = olympic_data[:, 0][:, None]
Y = olympic_data[:, 1][:, None]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
"info": "Male 400 m winning times for women until 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
},
data_set,
)
def olympic_marathon_men(data_set="olympic_marathon_men"):
if not access.data_available(data_set):
access.download_data(data_set)
olympics = np.genfromtxt(
os.path.join(access.DATAPATH, data_set, "olympicMarathonTimes.csv"), delimiter=","
)
X = olympics[:, 0:1]
Y = olympics[:, 1:2]
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y")],
"response": ["time"],
},
data_set,
)
def olympic_sprints(data_set="rogers_girolami_data"):
"""All olympics sprint winning times for multiple output prediction."""
X = np.zeros((0, 2))
Y = np.zeros((0, 1))
cats = {}
for i, dataset in enumerate(
[
olympic_100m_men,
olympic_100m_women,
olympic_200m_men,
olympic_200m_women,
olympic_400m_men,
olympic_400m_women,
]
):
data = dataset()
year = data["X"]
time = data["Y"]
X = np.vstack((X, np.hstack((year, np.ones_like(year) * i))))
Y = np.vstack((Y, time))
cats[dataset.__name__] = i
data["X"] = X
data["Y"] = Y
data[
"info"
] = "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning."
return access.data_details_return(
{
"X": X,
"Y": Y,
"covariates": [util.decimalyear("year", "%Y"), util.discrete(cats, "event")],
"response": ["time"],
"info": "Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.",
"output_info": {
0: "100m Men",
1: "100m Women",
2: "200m Men",
3: "200m Women",
4: "400m Men",
5: "400m Women",
},
},
data_set,
)
def movie_body_count(data_set="movie_body_count"):
"""Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by <NAME> and <NAME> for exploring differences between Python and R."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "film-death-counts-Python.csv")
Y = pd.read_csv(filename)
Y["Actors"] = Y["Actors"].apply(lambda x: x.split("|"))
Y["Genre"] = Y["Genre"].apply(lambda x: x.split("|"))
Y["Director"] = Y["Director"].apply(lambda x: x.split("|"))
return access.data_details_return(
{
"Y": Y,
"info": "Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by <NAME> and <NAME> for exploring differences between Python and R.",
},
data_set,
)
def movie_body_count_r_classify(data_set="movie_body_count"):
"""Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by <NAME> and <NAME> for exploring differences between Python and R."""
data = movie_body_count()["Y"]
X = data[["Year", "Body_Count"]]
Y = data["MPAA_Rating"] == "R" # set label to be positive for R rated films.
# Create series of movie genres with the relevant index
s = data["Genre"].str.split("|").apply(pd.Series, 1).stack()
s.index = s.index.droplevel(-1) # to line up with df's index
# Extract from the series the unique list of genres.
genres = s.unique()
# For each genre extract the indices where it is present and add a column to X
for genre in genres:
index = s[s == genre].index.tolist()
values = pd.Series(np.zeros(X.shape[0]), index=X.index)
values[index] = 1
X[genre] = values
return access.data_details_return(
{
"X": X,
"Y": Y,
"info": "Data set of movies and body count for movies scraped from www.MovieBodyCounts.com created by <NAME> and <NAME> for exploring differences between Python and R. In this variant we aim to classify whether the film is rated R or not depending on the genre, the years and the body count.",
},
data_set,
)
def movielens100k(data_set="movielens100k"):
"""Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use."""
if not access.data_available(data_set):
import zipfile
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
zip = zipfile.ZipFile(os.path.join(dir_path, "ml-100k.zip"), "r")
for name in zip.namelist():
zip.extract(name, dir_path)
encoding = "latin-1"
movie_path = os.path.join(access.DATAPATH, "movielens100k", "ml-100k")
items = pd.read_csv(
os.path.join(movie_path, "u.item"),
index_col="index",
header=None,
sep="|",
names=[
"index",
"title",
"date",
"empty",
"imdb_url",
"unknown",
"Action",
"Adventure",
"Animation",
"Children" "s",
"Comedy",
"Crime",
"Documentary",
"Drama",
"Fantasy",
"Film-Noir",
"Horror",
"Musical",
"Mystery",
"Romance",
"Sci-Fi",
"Thriller",
"War",
"Western",
],
encoding=encoding,
)
users = pd.read_csv(
os.path.join(movie_path, "u.user"),
index_col="index",
header=None,
sep="|",
names=["index", "age", "sex", "job", "id"],
encoding=encoding,
)
parts = [
"u1.base",
"u1.test",
"u2.base",
"u2.test",
"u3.base",
"u3.test",
"u4.base",
"u4.test",
"u5.base",
"u5.test",
"ua.base",
"ua.test",
"ub.base",
"ub.test",
]
ratings = []
for part in parts:
rate_part = pd.read_csv(
os.path.join(movie_path, part),
index_col="index",
header=None,
sep="\t",
names=["user", "item", "rating", "index"],
encoding=encoding,
)
rate_part["split"] = part
ratings.append(rate_part)
Y = pd.concat(ratings)
return access.data_details_return(
{
"Y": Y,
"film_info": items,
"user_info": users,
"info": "The Movielens 100k data",
},
data_set,
)
def nigeria_nmis_facility_database(data_set="nigeria_nmis_facility_database"):
"""A rigorous, geo-referenced baseline facility inventory across Nigeria is created spanning from 2009 to 2011 with an additional survey effort to increase coverage in 2014, to build Nigeria’s first nation-wide inventory of health facility. The database includes 34,139 health facilities info in Nigeria."""
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "healthmopupandbaselinenmisfacility.csv")
Y = pd.read_csv(filename)
return access.data_details_return(
{
"Y": Y,
"info": "Geo-referenced baseline facility inventory across Nigeria giving Nigeria's first nation-wide inventory of health facilities.",
},
data_set,
)
def crescent_data(num_data=200, seed=default_seed):
"""
Data set formed from a mixture of four Gaussians. In each class two of the Gaussians are elongated at right angles to each other and offset to form an approximation to the crescent data that is popular in semi-supervised learning as a toy problem.
:param num_data_part: number of data to be sampled (default is 200).
:type num_data: int
:param seed: random seed to be used for data generation.
:type seed: int
"""
np.random.seed(seed=seed)
sqrt2 = np.sqrt(2)
# Rotation matrix
R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])
# Scaling matrices
scales = []
scales.append(np.array([[3, 0], [0, 1]]))
scales.append(np.array([[3, 0], [0, 1]]))
scales.append([[1, 0], [0, 3]])
scales.append([[1, 0], [0, 3]])
means = []
means.append(np.array([4, 4]))
means.append(np.array([0, 4]))
means.append(np.array([-4, -4]))
means.append(np.array([0, -4]))
Xparts = []
num_data_part = []
num_data_total = 0
for i in range(0, 4):
num_data_part.append(round(((i + 1) * num_data) / 4.0))
num_data_part[i] -= num_data_total
part = np.random.normal(size=(num_data_part[i], 2))
part = np.dot(np.dot(part, scales[i]), R) + means[i]
Xparts.append(part)
num_data_total += num_data_part[i]
X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))
Y = np.vstack(
(
np.ones((num_data_part[0] + num_data_part[1], 1)),
-np.ones((num_data_part[2] + num_data_part[3], 1)),
)
)
cats = {"negative": -1, "positive": 1}
return {
"X": X,
"Y": Y,
"info": "Two separate classes of data formed approximately in the shape of two crescents.",
"response": [util.discrete(cats, "class")],
}
def creep_data(data_set="creep_rupture"):
"""Brun and Yoshida's metal creep rupture data."""
if not access.data_available(data_set):
import tarfile
access.download_data(data_set)
path = os.path.join(access.DATAPATH, data_set)
tar_file = os.path.join(path, "creeprupt.tar")
tar = tarfile.open(tar_file)
tar.extractall(path=path)
tar.close()
all_data = np.loadtxt(os.path.join(access.DATAPATH, data_set, "taka"))
y = all_data[:, 1:2].copy()
features = [0]
features.extend(list(range(2, 31)))
X = all_data[:, features].copy()
cats = {"furnace cooling": 0, "air cooling": 1, "oil cooling": 2, "water quench": 3}
attributes = [
"Lifetime / hours",
"Temperature / Kelvin",
"Carbon / wt%",
"Silicon / wt%",
"Manganese / wt%",
"Phosphorus / wt%",
"Sulphur / wt%",
"Chromium / wt%",
"Molybdenum / wt%",
"Tungsten / wt%",
"Nickel / wt%",
"Copper / wt%",
"Vanadium / wt%",
"Niobium / wt%",
"Nitrogen / wt%",
"Aluminium / wt%",
"Boron / wt%",
"Cobalt / wt%",
"Tantalum / wt%",
"Oxygen / wt%",
"Normalising temperature / Kelvin",
"Normalising time / hours",
util.discrete(cats, "Cooling rate of normalisation"),
"Tempering temperature / Kelvin",
"Tempering time / hours",
util.discrete(cats, "Cooling rate of tempering"),
"Annealing temperature / Kelvin",
"Annealing time / hours",
util.discrete(cats, "Cooling rate of annealing"),
"Rhenium / wt%",
]
return access.data_details_return(
{
"X": X,
"Y": y,
"covariates": attributes,
"response": ["Rupture stress / MPa"],
},
data_set,
)
def ceres(data_set="ceres"):
"""Twenty two observations of the Dwarf planet Ceres as observed by <NAME> and published in the September edition of Monatlicher Correspondenz in 1801. These were the measurements used by Gauss to fit a model of the planets orbit through which the planet was recovered three months later."""
if not access.data_available(data_set):
access.download_data(data_set)
data = pd.read_csv(
os.path.join(access.DATAPATH, data_set, "ceresData.txt"),
index_col="Tag",
header=None,
sep="\t",
names=[
"Tag",
"Mittlere Sonnenzeit",
"Gerade Aufstig in Zeit",
"Gerade Aufstiegung in Graden",
"Nordlich Abweich",
"Geocentrische Laenger",
"Geocentrische Breite",
'Ort der Sonne + 20" Aberration',
"Logar. d. Distanz",
],
parse_dates=True,
dayfirst=False,
)
return access.data_details_return({"data": data}, data_set)
def kepler_lightcurves(data_set="kepler_telescope"):
"""Load Kepler light curves from <NAME> & <NAME>'s NeurIPS 2020 Tutorial as shown in this colab https://colab.research.google.com/drive/1TimsiQhhcK6qX_lD951H-WJDHd92my61?usp=sharing"""
datasets = {'2009350155506':
['001720554',
'002696955',
'002987660',
'003246460',
'003429637',
'003441157',
'003836439',
'004040917',
'004044238',
'004150611',
'004155395',
'004242575',
'004567097',
'004660665',
'004671313',
'004857678',
'004931363',
'004989900',
'005108214',
'005113557',
'005164767',
'005177450',
'005458880',
'005683912',
'005724440',
'005737655',
'005802562',
'005939450',
'005952403',
'005954370',
'006065699',
'006101376',
'006106415',
'006150124',
'006225718',
'006342566',
'006352430',
'006382808',
'006450107',
'006469154',
'006670812',
'006675338',
'007201012',
'007286856',
'007345479',
'007366121',
'007510397',
'007669848',
'007798339',
'007820638',
'007827131',
'007909976',
'007939145',
'007940546',
'007940959',
'007944142',
'007950369',
'007970740',
'008006161',
'008077489',
'008085683',
'008153795',
'008313018',
'008324268']}
data = kepler_telescope(datasets)
data["datasets"] = datasets
data["citation"] = "Data from Kepler space mission used by <NAME> and <NAME> for their NeurIPS tutorial https://dwh.gg/NeurIPSastro1"
data["info"] = """The following wget lines were obtained by doing a simple search at this web form: http://archive.stsci.edu/kepler/data_search/search.php
where we put "< 8" into the field "KEP_Mag" and "Quarter" into the field "User-specified field 1" and "3" into the "Field descriptions" box associated with that."""
return access.data_details_return(data, data_set)
def kepler_telescope(datasets, data_set="kepler_telescope"):
"""Load a given kepler_id's datasets."""
scan_dir = os.path.join(access.DATAPATH, data_set)
# Make sure the data is downloaded.
resource = access.kepler_telescope_urls_files(datasets)
access.data_resources[data_set] = access.data_resources["kepler_telescope_base"].copy()
access.data_resources[data_set]["files"] = resource["files"]
access.data_resources[data_set]["urls"] = resource["urls"]
if resource["urls"]:
access.download_data(data_set)
dataset_dir = os.path.join(access.DATAPATH, "kepler_telescope")
filenames = []
for dataset in datasets:
for kepler_id in datasets[dataset]:
filenames.append("kplr" + kepler_id + "-" + dataset + "_llc.fits")
from astropy.table import Table
Y = pd.DataFrame({dataset: {kepler_id: Table.read(os.path.join(dataset_dir, "kplr" + kepler_id + "-" + dataset + "_llc.fits"), format='fits').to_pandas() for kepler_id in datasets[dataset]} for dataset in datasets})
return access.data_details_return(
{
"Y": Y,
},
data_set,
)
def cmu_mocap_49_balance(data_set="cmu_mocap"):
"""Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009."""
train_motions = ["18", "19"]
test_motions = ["20"]
data = cmu_mocap(
"49", train_motions, test_motions, sample_every=4, data_set=data_set
)
data["info"] = (
"One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of "
+ data["info"]
)
return data
def cmu_mocap_35_walk_jog(data_set="cmu_mocap"):
"""Load CMU subject 35's walking and jogging motions, the same data that was used by Taylor, Roweis and Hinton at NIPS 2007. but without their preprocessing. Also used by Lawrence at AISTATS 2007."""
train_motions = [
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"28",
"30",
"31",
"32",
"33",
"34",
]
test_motions = ["18", "29"]
data = cmu_mocap(
"35", train_motions, test_motions, sample_every=4, data_set=data_set
)
data["info"] = (
"Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of "
+ data["info"]
)
return data
def cmu_mocap_high_five(data_set="cmu_mocap"):
"""Load the CMU Motion capture for the high 5 between subjects 20 and 21 in the motion capture data. The data was used by Lawrence and Moore ICML 2007. Later the work was recreated by Damianou and Lawrence at AISTATS 2013."""
data = cmu_mocap("20", ["11"], [], sample_every=4, data_set=data_set)
data2 = cmu_mocap("21", ["11"], [], sample_every=4, data_set=data_set)
data["Y1"] = data.pop("Y")
data["skel1"] = data.pop("skel")
data["Y2"] = data2["Y"]
data["skel2"] = data2["skel"]
data["info"] = (
"High Five motion capture of two subjects walking towards each other and 'high fiving' as used by Lawrence and Moore at ICML. Data taken from subjects 20 and 21. It consists of "
+ data["info"]
+ " and "
+ data2["info"]
)
return data
def cmu_mocap(
subject, train_motions, test_motions=[], sample_every=4, data_set="cmu_mocap"
):
"""Load a given subject's training and test motions from the CMU motion capture data."""
# Load in subject skeleton.
from . import mocap
subject_dir = os.path.join(access.DATAPATH, data_set)
# Make sure the data is downloaded.
all_motions = train_motions + test_motions
resource = access.cmu_urls_files(([subject], [all_motions]))
access.data_resources[data_set] = access.data_resources["cmu_mocap_full"].copy()
access.data_resources[data_set]["files"] = resource["files"]
access.data_resources[data_set]["urls"] = resource["urls"]
if resource["urls"]:
access.download_data(data_set)
skel = mocap.acclaim_skeleton(os.path.join(subject_dir, subject + ".asf"))
# Set up labels for each sequence
exlbls = np.eye(len(train_motions))
# Load sequences
tot_length = 0
temp_Y = []
temp_lbls = []
for i in range(len(train_motions)):
temp_chan = skel.load_channels(
os.path.join(subject_dir, subject + "_" + train_motions[i] + ".amc")
)
temp_Y.append(temp_chan[::sample_every, :])
temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1)))
tot_length += temp_Y[i].shape[0]
Y = np.zeros((tot_length, temp_Y[0].shape[1]))
lbls = np.zeros((tot_length, temp_lbls[0].shape[1]))
end_ind = 0
for i in range(len(temp_Y)):
start_ind = end_ind
end_ind += temp_Y[i].shape[0]
Y[start_ind:end_ind, :] = temp_Y[i]
lbls[start_ind:end_ind, :] = temp_lbls[i]
if len(test_motions) > 0:
temp_Ytest = []
temp_lblstest = []
testexlbls = np.eye(len(test_motions))
tot_test_length = 0
for i in range(len(test_motions)):
temp_chan = skel.load_channels(
os.path.join(subject_dir, subject + "_" + test_motions[i] + ".amc")
)
temp_Ytest.append(temp_chan[::sample_every, :])
temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].shape[0], 1)))
tot_test_length += temp_Ytest[i].shape[0]
# Load test data
Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1]))
lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1]))
end_ind = 0
for i in range(len(temp_Ytest)):
start_ind = end_ind
end_ind += temp_Ytest[i].shape[0]
Ytest[start_ind:end_ind, :] = temp_Ytest[i]
lblstest[start_ind:end_ind, :] = temp_lblstest[i]
else:
Ytest = None
lblstest = None
info = "Subject: " + subject + ". Training motions: "
for motion in train_motions:
info += motion + ", "
info = info[:-2]
if len(test_motions) > 0:
info += ". Test motions: "
for motion in test_motions:
info += motion + ", "
info = info[:-2] + "."
else:
info += "."
if sample_every != 1:
info += " Data is sub-sampled to every " + str(sample_every) + " frames."
return access.data_details_return(
{
"Y": Y,
"lbls": lbls,
"Ytest": Ytest,
"lblstest": lblstest,
"info": info,
"skel": skel,
},
data_set,
)
def mcycle(data_set="mcycle", seed=default_seed):
if not access.data_available(data_set):
access.download_data(data_set)
np.random.seed(seed=seed)
data = pd.read_csv(os.path.join(access.DATAPATH, data_set, "motor.csv"))
data = data.reindex(util.permute(data.shape[0])) # Randomize so test isn't at the end
X = data["times"].values[:, None]
Y = data["accel"].values[:, None]
return access.data_details_return(
{"X": X, "Y": Y, "covariates": ["times"], "response": ["acceleration"]},
data_set,
)
def elevators(data_set="elevators", seed=default_seed):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
tar = tarfile.open(name=os.path.join(dir_path, "elevators.tgz"))
tar.extractall(dir_path)
tar.close()
elevator_path = os.path.join(access.DATAPATH, "elevators", "Elevators")
elevator_train_path = os.path.join(elevator_path, "elevators.data")
elevator_test_path = os.path.join(elevator_path, "elevators.test")
train_data = pd.read_csv(elevator_train_path, header=None)
test_data = pd.read_csv(elevator_test_path, header=None)
data = pd.concat([train_data, test_data])
np.random.seed(seed=seed)
# Want to choose test and training data sizes, so just concatenate them together and mix them up
data = data.reset_index()
data = data.reindex(util.permute(data.shape[0])) # Randomize so test isn't at the end
X = data.iloc[:, :-1].values
Y = data.iloc[:, -1].values[:, None]
return access.data_details_return({"X": X, "Y": Y}, data_set)
def hospitalized_covid(data_set="hospitalized_covid"):
if not access.data_available(data_set):
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
X = pd.read_excel(os.path.join(dir_path, "Israeli_data_August_15_2021 original.xlsx"), skiprows=2)
return access.data_details_return({"X": X}, data_set)
if False:
def hapmap3(data_set="hapmap3"):
"""
The HapMap phase three SNP dataset - 1184 samples out of 11 populations.
SNP_matrix (A) encoding [see Paschou et all. 2007 (PCA-Correlated SNPs...)]:
Let (B1,B2) be the alphabetically sorted bases, which occur in the j-th SNP, then
/ 1, iff SNPij==(B1,B1)
Aij = | 0, iff SNPij==(B1,B2)
\ -1, iff SNPij==(B2,B2)
The SNP data and the meta information (such as iid, sex and phenotype) are
stored in the dataframe datadf, index is the Individual ID,
with following columns for metainfo:
* family_id -> Family ID
* paternal_id -> Paternal ID
* maternal_id -> Maternal ID
* sex -> Sex (1=male; 2=female; other=unknown)
* phenotype -> Phenotype (-9, or 0 for unknown)
* population -> Population string (e.g. 'ASW' - 'YRI')
* rest are SNP rs (ids)
More information is given in infodf:
* Chromosome:
- autosomal chromosemes -> 1-22
- X X chromosome -> 23
- Y Y chromosome -> 24
- XY Pseudo-autosomal region of X -> 25
- MT Mitochondrial -> 26
* Relative Positon (to Chromosome) [base pairs]
"""
try:
from sys import stdout
import bz2
if sys.version_info >= (3, 0):
import pickle
else:
import cPickle as pickle
except ImportError as i:
raise i(
"Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset"
)
dir_path = os.path.join(access.DATAPATH, "hapmap3")
hapmap_file_name = "hapmap3_r2_b36_fwd.consensus.qc.poly"
unpacked_files = [
os.path.join(dir_path, hapmap_file_name + ending)
for ending in [".ped", ".map"]
]
unpacked_files_exist = reduce(
lambda a, b: a and b, list(map(os.path.exists, unpacked_files))
)
if not unpacked_files_exist and not access.data_available(data_set):
access.download_data(data_set)
preprocessed_access.DATAPATHs = [
os.path.join(dir_path, hapmap_file_name + file_name)
for file_name in [".snps.pickle", ".info.pickle", ".nan.pickle"]
]
if not reduce(
lambda a, b: a and b, list(map(os.path.exists, preprocessed_access.DATAPATHs))
):
if not access.overide_manual_authorize and not access.prompt_stdin(
"Preprocessing requires ~25GB "
"of memory and can take a (very) long time, continue? [Y/n]"
):
print("Preprocessing required for further usage.")
return
status = "Preprocessing data, please be patient..."
print(status)
def write_status(message, progress, status):
stdout.write(" " * len(status))
stdout.write("\r")
stdout.flush()
status = r"[{perc: <{ll}}] {message: <13s}".format(
message=message, ll=20, perc="=" * int(20.0 * progress / 100.0)
)
stdout.write(status)
stdout.flush()
return status
if not unpacked_files_exist:
status = write_status("unpacking...", 0, "")
curr = 0
for newfilepath in unpacked_files:
if not os.path.exists(newfilepath):
filepath = newfilepath + ".bz2"
file_size = os.path.getsize(filepath)
with open(newfilepath, "wb") as new_file, open(
filepath, "rb"
) as f:
decomp = bz2.BZ2Decompressor()
file_processed = 0
buffsize = 100 * 1024
for data in iter(lambda: f.read(buffsize), b""):
new_file.write(decomp.decompress(data))
file_processed += len(data)
status = write_status(
"unpacking...",
curr + 12.0 * file_processed / (file_size),
status,
)
curr += 12
status = write_status("unpacking...", curr, status)
os.remove(filepath)
status = write_status("reading .ped...", 25, status)
# Preprocess data:
snpstrnp = np.loadtxt(unpacked_files[0], dtype=str)
status = write_status("reading .map...", 33, status)
mapnp = np.loadtxt(unpacked_files[1], dtype=str)
status = write_status("reading relationships.txt...", 42, status)
# and metainfo:
infodf = pd.DataFrame.from_csv(
os.path.join(dir_path, "./relationships_w_pops_121708.txt"),
header=0,
sep="\t",
)
infodf.set_index("IID", inplace=1)
status = write_status("filtering nan...", 45, status)
snpstr = snpstrnp[:, 6:].astype("S1").reshape(snpstrnp.shape[0], -1, 2)
inan = snpstr[:, :, 0] == "0"
status = write_status("filtering reference alleles...", 55, status)
ref = np.array([np.unique(x)[-2:] for x in snpstr.swapaxes(0, 1)[:, :, :]])
status = write_status("encoding snps...", 70, status)
# Encode the information for each gene in {-1,0,1}:
status = write_status("encoding snps...", 73, status)
snps = snpstr == ref[None, :, :]
status = write_status("encoding snps...", 76, status)
snps = snps * np.array([1, -1])[None, None, :]
status = write_status("encoding snps...", 78, status)
snps = snps.sum(-1)
status = write_status("encoding snps...", 81, status)
snps = snps.astype("i8")
status = write_status("marking nan values...", 88, status)
# put in nan values (masked as -128):
snps[inan] = -128
status = write_status("setting up meta...", 94, status)
# get meta information:
metaheader = np.r_[
["family_id", "iid", "paternal_id", "maternal_id", "sex", "phenotype"]
]
metadf = pd.DataFrame(columns=metaheader, data=snpstrnp[:, :6])
metadf.set_index("iid", inplace=1)
metadf = metadf.join(infodf.population)
metadf.to_pickle(preprocessed_datapaths[1])
# put everything together:
status = write_status("setting up snps...", 96, status)
snpsdf = pd.DataFrame(index=metadf.index, data=snps, columns=mapnp[:, 1])
with open(preprocessed_datapaths[0], "wb") as f:
pickle.dump(f, snpsdf, protocoll=-1)
status = write_status("setting up snps...", 98, status)
inandf = pd.DataFrame(index=metadf.index, data=inan, columns=mapnp[:, 1])
inandf.to_pickle(preprocessed_datapaths[2])
status = write_status("done :)", 100, status)
print("")
else:
print("loading snps...")
snpsdf = pd.read_pickle(preprocessed_datapaths[0])
print("loading metainfo...")
metadf = pd.read_pickle(preprocessed_datapaths[1])
print("loading nan entries...")
inandf = pd.read_pickle(preprocessed_datapaths[2])
snps = snpsdf.values
populations = metadf.population.values.astype("S3")
hapmap = dict(
name=data_set,
description="The HapMap phase three SNP dataset - "
"1184 samples out of 11 populations. inan is a "
"boolean array, containing wheather or not the "
"given entry is nan (nans are masked as "
"-128 in snps).",
snpsdf=snpsdf,
metadf=metadf,
snps=snps,
inan=inandf.values,
inandf=inandf,
populations=populations,
)
return hapmap
def olivetti_glasses(
data_set="olivetti_glasses", num_training=200, seed=default_seed
):
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
access.download_data(data_set)
y = np.load(os.path.join(path, "has_glasses.np"))
y = np.where(y == "y", 1, 0).reshape(-1, 1)
faces = scipy.io.loadmat(os.path.join(path, "olivettifaces.mat"))["faces"].T
np.random.seed(seed=seed)
index = util.permute(faces.shape[0])
X = faces[index[:num_training], :]
Xtest = faces[index[num_training:], :]
Y = y[index[:num_training], :]
Ytest = y[index[num_training:]]
return access.data_details_return(
{
"X": X,
"Y": Y,
"Xtest": Xtest,
"Ytest": Ytest,
"seed": seed,
"info": "ORL Faces with labels identifiying who is wearing glasses and who isn't. Data is randomly partitioned according to given seed. Presence or absence of glasses was labelled by <NAME>.",
},
"olivetti_faces",
)
def simulation_BGPLVM(data_set="bgplvm_simulation"):
mat_data = scipy.io.loadmat(os.path.join(access.DATAPATH, "BGPLVMSimulation.mat"))
Y = np.array(mat_data["Y"], dtype=float)
S = np.array(mat_data["initS"], dtype=float)
mu = np.array(mat_data["initMu"], dtype=float)
# return access.data_details_return({'S': S, 'Y': Y, 'mu': mu}, data_set)
return {
"Y": Y,
"S": S,
"mu": mu,
"info": "Simulated test dataset generated in MATLAB to compare BGPLVM between python and MATLAB",
}
def politics_twitter(data_set="politics_twitter"):
# Bailout before downloading!
import tweepy
import time
import progressbar as pb
import sys
if not access.data_available(data_set):
access.download_data(data_set)
# FIXME: Try catch here
CONSUMER_KEY = config.get("twitter", "CONSUMER_KEY")
CONSUMER_SECRET = config.get("twitter", "CONSUMER_SECRET")
OAUTH_TOKEN = config.get("twitter", "OAUTH_TOKEN")
OAUTH_TOKEN_SECRET = config.get("twitter", "OAUTH_TOKEN_SECRET")
# Authenticate
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
# Make tweepy api object, and be carefuly not to abuse the API!
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
requests_per_minute = int(180.0 / 15.0)
save_freq = 5
data_dict = {}
for party in ["ukip", "labour", "conservative", "greens"]:
# Load in the twitter data we want to join
parsed_file_path = os.path.join(
access.DATAPATH, data_set, "{}_twitter_parsed.csv".format(party)
)
file_already_parsed = False
if os.path.isfile(parsed_file_path):
print(
"Data already scraped, loading saved scraped data for {} party".format(
party
)
)
# Read the data
data = pd.read_csv(parsed_file_path)
# Check it has been fully parsed (no NATs in time)
if data["time"].isnull().sum() == 0:
file_already_parsed = True
if not file_already_parsed:
print(
"Scraping tweet data from ids for the {} party data".format(party)
)
sys.stdout.write(
"Scraping tweet data from ids for the {} party data".format(party)
)
raw_file_path = os.path.join(
access.DATAPATH, data_set, "{}_raw_ids.csv".format(party)
)
# data = pd.read_csv('./data_download/{}_raw_ids.csv'.format(party))
data = pd.read_csv(raw_file_path)
# Iterate in blocks
full_block_size = 100
num_blocks = data.shape[0] / full_block_size + 1
last_block_size = data.shape[0] % full_block_size
# Progress bar to give some indication of how long we now need to wait!
pbar = pb.ProgressBar(
widgets=[" [", pb.Timer(), "] ", pb.Bar(), " (", pb.ETA(), ") ",],
fd=sys.stdout,
)
for block_num in pbar(range(num_blocks)):
sys.stdout.flush()
# Get a single block of tweets
start_ind = block_num * full_block_size
if block_num == num_blocks - 1:
# end_ind = start_ind + last_block_size
tweet_block = data.iloc[start_ind:]
else:
end_ind = start_ind + full_block_size
tweet_block = data.iloc[start_ind:end_ind]
# Gather ther actual data, fill out the missing time
tweet_block_ids = tweet_block["id_str"].tolist()
sucess = False
while not sucess:
try:
tweet_block_results = api.statuses_lookup(
tweet_block_ids, trim_user=True
)
sucess = True
except Exception:
# Something went wrong with our pulling of result. Wait
# for a minute and try again
time.sleep(60.0)
for tweet in tweet_block_results:
data.iloc[
data["id_str"] == int(tweet.id_str), "time"
] = tweet.created_at
# Wait so as to stay below the rate limit
# Stay on the safe side, presume that collection is instantanious
time.sleep(60.0 / requests_per_minute + 0.1)
if block_num % save_freq == 0:
data.to_csv(parsed_file_path)
# Now convert times to pandas datetimes
data["time"] = pd.to_datetime(data["time"])
# Get rid of non-parsed dates
data = data.iloc[data["time"].notnull(), :]
data.to_csv(parsed_file_path)
data_dict[party] = data
return access.data_details_return(data_dict, data_set)
def cifar10_patches(data_set="cifar-10"):
"""The Candian Institute for Advanced Research 10 image data set. Code for loading in this data is taken from this Boris Babenko's blog post, original code available here: http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code"""
if sys.version_info >= (3, 0):
import pickle
else:
import cPickle as pickle
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "cifar-10-python.tar.gz")
if not access.data_available(data_set):
import tarfile
access.download_data(data_set)
# This code is from <NAME>'s blog post.
# http://bbabenko.tumblr.com/post/86756017649/learning-low-level-vision-feautres-in-10-lines-of-code
tfile = tarfile.open(filename, "r:gz")
tfile.extractall(dir_path)
with open(
os.path.join(dir_path, "cifar-10-batches-py", "data_batch_1"), "rb"
) as f:
data = pickle.load(f)
images = data["data"].reshape((-1, 3, 32, 32)).astype("float32") / 255
images = np.rollaxis(images, 1, 4)
patches = np.zeros((0, 5, 5, 3))
for x in range(0, 32 - 5, 5):
for y in range(0, 32 - 5, 5):
patches = np.concatenate(
(patches, images[:, x : x + 5, y : y + 5, :]), axis=0
)
patches = patches.reshape((patches.shape[0], -1))
return access.data_details_return(
{
"Y": patches,
"info": "32x32 pixel patches extracted from the CIFAR-10 data by <NAME> to demonstrate k-means features.",
},
data_set,
)
def movie_collaborative_filter(
data_set="movie_collaborative_filter", date="2014-10-06"
):
"""Data set of movie ratings as generated live in class by students."""
access.download_data(data_set)
dir_path = os.path.join(access.DATAPATH, data_set)
filename = os.path.join(dir_path, "film-death-counts-Python.csv")
Y = | pd.read_csv(filename) | pandas.read_csv |
#!/usr/bin/python
# -*- coding: utf-8 -*
import re
import pandas as pd
import os
from data_preparation import *
from collections import Counter
import numpy as np
import pickle
from metrics import accuracy, build_confusion_matrices
def n_gram_train(filepath, grammage, folder, register_change, start_end_symbols, weighed, tf_idf_coefficient, length, double):
tf_idf_coefficient = float(tf_idf_coefficient)
dataset = pd.DataFrame(columns=['WORD', 'TAG'])
raw_data = open(filepath, encoding='utf8').readlines()
counter = 0
for instance in raw_data:
if (instance[0] != "#" and instance.strip()):
cols = instance.split('\t')
if (int(register_change) == 0):
dataset.loc[counter] = [cols[1], cols[3]]
else:
dataset.loc[counter] = [cols[1].lower(), cols[3]]
counter = counter + 1
names = dataset['TAG'].unique().tolist()
final_dictionary = {}
start_end_symbols = int(start_end_symbols)
if (length == 1):
by_length_dictionary = {}
if (weighed == 1):
corpus_length = dataset.shape[0]
ngram_in_word_dictionary = {}
words_with_ngram_dictionary = {}
for name in names:
clone = dataset[dataset['TAG'] == name]
n_grams = []
if (length == 1):
words_lengths = []
for word in clone['WORD']:
if (start_end_symbols == 1):
word = "#" + word + "#"
if (length == 1):
words_lengths.append(len(word))
if (weighed == 1):
ngrams_of_word_dictionary = {}
for gram in test_split(word, "", 0, int(grammage), double):
n_grams.extend(gram)
if (weighed == 1):
if gram[0] in words_with_ngram_dictionary.keys():
words_with_ngram_dictionary[gram[0]].append(word)
else:
words_with_ngram_dictionary[gram[0]] = []
words_with_ngram_dictionary[gram[0]].append(word)
if gram[0] in ngrams_of_word_dictionary.keys():
ngrams_of_word_dictionary[gram[0]] += 1
else:
ngrams_of_word_dictionary[gram[0]] = 1
if (weighed == 1):
for gram in ngrams_of_word_dictionary.keys():
if gram in ngram_in_word_dictionary.keys():
if (ngrams_of_word_dictionary[gram] > ngram_in_word_dictionary[gram]):
ngram_in_word_dictionary[gram] = ngrams_of_word_dictionary[gram]
else:
ngram_in_word_dictionary[gram] = ngrams_of_word_dictionary[gram]
if (length == 1):
by_length_dictionary[name] = round(np.mean(words_lengths))
cnt = Counter(n_grams)
grams = []
if (weighed == 0):
for gram in cnt.most_common(2):
grams.append(gram[0])
else:
weighed_grams = {}
for gram in cnt.most_common():
weighed_grams[gram[0]] = tf_idf_n_gram(tf_idf_coefficient, gram[1], ngram_in_word_dictionary[gram[0]], corpus_length, len(words_with_ngram_dictionary[gram[0]]))
weighed_grams = dict(reversed(sorted(weighed_grams.items(), key=lambda item: item[1])))
for key in list(weighed_grams.keys())[0:2]:
grams.append(key)
final_dictionary[name] = grams
with open(folder + "\\" + grammage + 'grams.pkl', 'wb+') as f:
pickle.dump(final_dictionary, f, pickle.HIGHEST_PROTOCOL)
if (length == 1):
with open(folder + "\\length_" + grammage + 'grams.pkl', 'wb+') as f:
pickle.dump(by_length_dictionary, f, pickle.HIGHEST_PROTOCOL)
def n_gram_test(data, folder, grammage, register_change, start_end_symbols, length):
test_dataset = pd.DataFrame(columns=['WORD', 'TAG'])
raw_data = open(data, encoding='utf8').readlines()
counter = 0
start_end_symbols = int(start_end_symbols)
for instance in raw_data:
if (instance[0] != "#" and instance.strip()):
cols = instance.split('\t')
if (int(register_change) == 0):
test_dataset.loc[counter] = [cols[1], cols[3]]
else:
if (start_end_symbols == 0):
test_dataset.loc[counter] = [cols[1].lower(), cols[3]]
else:
test_dataset.loc[counter] = ["#" + cols[1].lower() + "#", cols[3]]
counter = counter + 1
with open(folder + "\\" + grammage + "grams.pkl", 'rb') as f:
final_dictionary = pickle.load(f)
if (length == 1):
with open(folder + "\\length_" + grammage + 'grams.pkl', 'rb') as f:
by_length_dictionary = pickle.load(f)
correct = 0
total = 0
correct_by_part = []
total_by_part = []
true_pred_dataset = | pd.DataFrame(columns=['tok', 'true', 'pred']) | pandas.DataFrame |
#!/usr/bin/env python
"""
:Author: <NAME>/Stanford Genome Technology Center
:Contact: <EMAIL>
:Creation date: 24.11.2016
:Description:
This script annotates the structural variant output of longranger (10X Genomics)
This script requires:
- all of the python packages listed (imported) below
Revisions:
None to date
CURRENT VERSION: 1.0
"""
cur_version = 1.0
### LOAD THE NECESSARY PACKAGES ###
import sys
import os
import __main__ as main
import argparse
import pandas as pd
import pybedtools
from pybedtools import BedTool
from collections import defaultdict
pd.options.mode.chained_assignment = None
#################################################################
################ ################
################ PARSE THE ARGUMENTS ################
################ ################
#################################################################
### ARGPARSE PARSING ###
"""def usage():
print "Usage examples:"
print os.path.basename(main.__file__) + " --help"
print os.path.basename(main.__file__) + " -v longranger_svs_tumor.bedpe -n longranger_svs_normal.bedpe -l lumpy_svs.vcf -b bicseq_cnvs.txt -g /path/to/file/of/genes.txt -p 5000 -q 1000000 -out output_prefix"
sys.exit(0)"""
"""def parse_args():
parser = argparse.ArgumentParser(description = "A Python script for annotating the SVs called by longranger")
parser.add_argument("--usage", help="usage example", dest="usage", action='store_true')
parser.add_argument("-v", help="BEDPE file of tumor SVs called by longranger (REQUIRED)", dest="lr_tum_in")
parser.add_argument("-n", help="BEDPE file of normal SVs called by longranger (REQUIRED)", dest="lr_norm_in")
parser.add_argument("-l", help="VCF file of SVs called by lumpy (REQUIRED)", dest="lmpy_in")
parser.add_argument("-b", help="BED file of CNVs called by BICseq (REQUIRED)", dest="bic_in")
parser.add_argument("-g", help="BED file of genes of interest (REQUIRED)", dest="gene_in")
parser.add_argument("-p", help="base pairs of padding around regions for intersection", dest="padding", default=0)
parser.add_argument("-q", help="base pairs of padding around regions for gene intersection", dest="g_padding", default=0)
parser.add_argument("-out", help="prefix for output files (REQUIRED)", dest="out_in")
parser.add_argument("--version", action='version', version='%(prog)s ' + str(cur_version))
return parser.parse_args()"""
"""if __name__ == '__main__':
args = parse_args()
if(args.usage):
usage()
if(not args.lr_tum_in or not args.lr_norm_in or not args.lmpy_in or not args.bic_in or not args.out_in):
print os.path.basename(main.__file__) + " missing a required input file\n"
usage()
sys.exit(1)"""
### SET THE ARGUMENTS ###
"""tum_sv = args.lr_tum_in #-v
norm_sv = args.lr_norm_in #-n
lumpy_vcf = args.lmpy_in #-l
bic_file = args.bic_in #-b
gene_file = args.gene_in #-g
pad_bp = args.padding #-p not req
gpad_bp = args.g_padding #-q not req
out_prefix = str(args.out_in) #-out"""
#################################################################
################ ################
################ DEFINE FUNCTIONS ################
################ ################
#################################################################
## Function to parse data from SV bedpe files -- create df for all breakpoint 1's and df for all breakpoint 2's, then stack df's and pad coordinates
def bedpe_parse(d, pad):
df_1 = d[['#chrom1','start1','stop1','name']]
df_1.columns = ['#chrom','start','stop','name']
df_1['bkpt'] = '1'
df_2 = d[['chrom2','start2','stop2','name']]
df_2.columns = ['#chrom','start','stop','name']
df_2['bkpt'] = '2'
df_both = pd.concat([df_1, df_2], ignore_index=True)
df_both['start_pad'] = df_both['start'].apply(lambda x: 0 if x-int(pad)<0 else x-int(pad))
df_both['stop_pad'] = df_both['stop'] + int(pad)
cols = df_both.columns.tolist()
cols = cols[:1] + cols[-2:] + cols[1:-2]
df_both = df_both[cols]
return df_both
### Function to turn INFO field of vcf file into a dictionary
def makeDictInfo(r):
d = defaultdict(dict)
info_list=r.split(';')
for i in range(len(info_list)):
if '=' in info_list[i]:
key, value = info_list[i].split("=")
d[key]=value
return d
### Function to turn FORMAT/SAMPLE fields of vcf file into a dictionary
def makeDictSample(r):
format_list=r['format'].split(':')
sample_list=r['sample'].split(':')
d = dict(zip(format_list,sample_list))
return d
### Function to generate end position
def getEnd(r):
cur_dict = r['info_dict']
if "END" in cur_dict:
return cur_dict['END']
else:
end_pos = 'na'
return end_pos
#################################################################
################ ################
################ DETERMINE SOMATIC SVs ################
################ ################
#################################################################
### PARSE NORMAL SV FILE (i.e. put each breakpoint on its own line + pad the coordinates)
#-v longranger_svs_tumor.bedpe,
#-n longranger_svs_normal.bedpe,
#-l lumpy_svs.vcf,
#-b bicseq_cnvs.txt,
#-g /path/to/file/of/genes.txt ,
#-out output_prefix,
#5000, 1000000
def filter_svs(tum_sv, norm_sv, lumpy_vcf, bic_file, gene_file, out_prefix, pad_bp, gpad_bp):
df_norm = pd.read_table(norm_sv, sep="\t", skiprows=1)
df_norm_both = bedpe_parse(df_norm, pad_bp)
header = list(df_norm_both.columns)
df_norm_both.columns = [h+"_norm" for h in header]
### PARSE TUMOR SV FILE
df_tum = pd.read_table(tum_sv, sep="\t", skiprows=1)
df_tum_both = bedpe_parse(df_tum, pad_bp)
### PERFORM INTERSECTION OF NORMAL + TUMOR FILES
df_norm_both_str = df_norm_both.to_string(index=False) ## Convert df to string (seems to be only way to coerce pandas df into bedtool object)
df_norm_bed = BedTool(df_norm_both_str, from_string=True) ## Convert df as str to bedtool object
df_tum_both_str = df_tum_both.to_string(index=False) ## Convert df to string (seems to be only way to coerce pandas df into bedtool object)
df_tum_bed = BedTool(df_tum_both_str, from_string=True) ## Convert df as str to bedtool object
som_header = list(df_tum_both.columns) + list(df_norm_both.columns) ## Create header
som_header = [s.strip('#') for s in som_header] ## Remove comment from header
som_isect = df_tum_bed.intersect(df_norm_bed, wa=True, wb=True) ## Intersect files
som_isect = som_isect.to_dataframe(names=som_header) ## Convert result to pandas data frame
### ADD SOMATIC INFO TO ORIGINAL TUMOR SV FILE
## Determine which tumor breakpoints are germline and add info to original file + add a column to indicate if somatic
som_isect = som_isect[['name','bkpt','name_norm','chrom_norm','start_norm','stop_norm']] ## Extract + reorder columns
som_isect[['name_norm','chrom_norm','start_norm','stop_norm']] = som_isect[['name_norm','chrom_norm','start_norm','stop_norm']].astype(str) ## Change cols to strings in prep for join
som_isect['info_lr_norm(name, chrom, start, end)'] = som_isect[['name_norm','chrom_norm','start_norm','stop_norm']].apply(lambda x: ','.join(x), axis=1) ## Merge columns of normal info
som_isect = som_isect[['name','bkpt','info_lr_norm(name, chrom, start, end)']] ## Extract columns to retain in final table
som_isect[['bkpt']] = som_isect[['bkpt']].astype(str) ## Change col to string in prep for join
som_isect_summ = som_isect.groupby('name').agg({'bkpt': lambda x: ', '.join(x), 'info_lr_norm(name, chrom, start, end)': lambda x: '; '.join(x)}).reset_index() ## Group info for final table ## Rename columns
som_merge = | pd.merge(df_tum, som_isect_summ, how='left', on='name') | pandas.merge |
from flask import Flask,request
import pandas as pd
import numpy as np
import json
import pickle
import os
app=Flask(__name__)
model_file_path=os.path.join(os.path.pardir,os.path.pardir,'models','lr_model.pkl')
scaler_file_path=os.path.join(os.path.pardir,os.path.pardir,'models','lr_scaler.pk1')
with open(model_file_path, 'rb') as f:
model_loaded= pickle.load(f)
with open(scaler_file_path, 'rb') as f:
scaler_loaded=pickle.load(f)
@app.route('/api',methods=['POST'])
def make_prediction():
data =json.dumps(request.get_json(force=True))
df= | pd.read_json(data) | pandas.read_json |
#!/Users/rohan/opt/anaconda3/bin/python
import numpy as np
import pandas as pd
import pickle
from pathlib import Path
import sys
import os
import datetime
# handeling deprecated warnings
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def init_report():
'''
This will check if report.csv exists in given directory and creates if there isn't
then the absolute path of that report is stored in a txt file which is in the application
dir, the mode of writing willbe a+ or w+ everytime this pipeline is used for new project update
the txt will be updated with the abs_path of the report from that project directory
'''
pass
def state_method():
stages = '''
preliminary -
* create - /Plots and save all the epoch-cycle plots
* create Data Overview as using the following
Step-1 -
import os
path = '../samples/'
overview_path = '../samples/overview.txt'
eval_path = '../samples/evaluate.txt'
if os.path.exists(path):
print('samples dir, exists..checking for dictionaries existence..')
if os.path.exists(overview_path) and os.path.exists(eval_path):
print('Data exists. no need of overwritting.')
else:
print("overview and eval doesn't exist, proceed to step-2")
else:
print("samples/ dir is non-existent, Establishing one..")
os.mkdir(path) # samples directory
Step-2
# dictionary init
overview_dict = {}
eval_dict = {}
# fill the following -
# for overview
#string
kind = 'Image Data'
#tuple
dimensions = x_train.shape
#labels : str(list of unique target values)
targets = list(np.unique(y_train))
#nd.array
data = x_train[0:3]
#nd.array or class_names
labels = y_train[0:3]
vars0 = ['kind','dimensions', 'targets', 'data', 'labels']
# filling overview_dict
for x in vars0:
try:
overview_dict[x] = eval(x)
except:
overview_dict[x] = x
# evaluate_dict
eval_dict = {'test_cases' : x_test[0:50], 'true': y_test[0:50], 'class_names': None ,'model':'/{model-name}.h5'}
# dump 1
with open(overview_path,'wb') as f:
pickle.dump(overview_dict,f)
# dump 2
with open(eval_path,'wb') as f:
pickle.dump(eval_dict,f)
MAIN-
step - 1, use ../../pipline -varl with 1 to get the var-dict to show the variables to be filled with
step - 2 - use the following snippet after fetching ../../pipeline -varl with 2
code:
var = ['desc','project_name', 'framework','prediction_type','network_type',
'architecture','layers','hidden_units','activations','epochs',
'metrics','loss','optimiser','learning_rate','batch_size','train_performance','test_performance','classification_report','elapsed','summary'
,'ipynb','plots']
param = {}
for val in var:
try:
param[val] = eval(val)
except:
param[val] = val
# check if anything is missing
step - 3 - pickle dump
import pickle
file = open("artefacts.txt", "wb")
dictionary = param
pickle.dump(dictionary, file)
file.close()
step - 4 - ./pipeline.py -np
* relative path to the project dir ex - ./MNIST
* choose framework
* done!
'''
print(stages)
def sample_collection():
pass
def docs():
help ='''
Arguments - Description
-np [path-to-project]: New project creation
-nptr [path-to-project]: tranfer learning projects (./Project-name)
-varl or --var-list [optional] : prints variable/artifact list or dictionary with datatypes
* [optional]
1. --update : update a the dictionary and var-list with new artifacts
2. --pop : pop an unwanted artifact
--status [optional] : prints out catalog with all the projects done until then
* [optional]
1. keras : keras portion of catalog
2. pytorch : pytorch portion of catalog
--state : prints the actual procedure required to establish a report.csv
-ar [optional] : prints arifact for a provided framework & project
* [optional]
1. -d : (default) displays artefact based on input
2. --update [optional]: updates an existing artefact with the novel info' provided
* [optional]
1. -f | -file : for atributes like summary and descriptions input is taken from a file.
-db [optional] : deploys emails to the people who joined the odyssey
* [optional]
1. --status : prints the db of users
2. --deploy : deployes preset email and writes a log
3. --del : Truncates the Existing db while also backingup the records under assets/bckup
and writes a logfile.
4. --backup : simple backup of the database.
'''
print(help)
def meta_collect(project_path,project_name,fw,nntype):
date = datetime.datetime.now().strftime('%d-%A (%Y)')
'''
This function collects all the Data required to create a report.csv file
control flow will be if-else anchored on framework name, since two-frameworks have two distinct reports
generated.
if pytorch : {code} else: {keras-code}
'''
print('Meta Data Collection and Organisation')
meta = os.path.join(project_path,'artefacts.txt')
if os.path.exists(meta):
print('artefacts.txt exists..')
with open(meta, 'rb') as f:
data = f.read()
d = pickle.loads(data)
# report = pd.DataFrame(pd.Series(d)).T
# dev = '../catalog1.csv'
main= '../assets/catalog.csv'
print('Updating Catalog .. ')
if not os.path.exists(main):
print('No catalog file in existence, creating one, writing content..')
with open(main,'w') as f:
f.write('{},{},{},{},{}\n'.format('date','project','framework','type','path'))
# write content
with open(main,'a+') as f:
f.write('{},{},{},{},{}\n'.format(date,project_name,fw,nntype,'./Projects/'+project_path[2:]+'artefacts.txt'))
print('Done!')
else:
with open(main,'a+') as f:
f.write('{},{},{},{},{}\n'.format(date,project_name,fw,nntype,'./Projects/'+project_path[2:]+'artefacts.txt'))
print('Done!')
# report.csv creation
# generate_report(project_path, project_name,fw, report)
else:
print('artefacts.txt is non-existent, create network-metadata before triggering pipeline')
'''
After the inputs taken, all the inputs will then be packaged and parameterized into other subroutine called
validate_meta() which comprehensively checks for all the data with the user
Then comes the generate_report() which creates a pandas dataframes from all the stuff and generates two
csv files which
'''
def path_creation(project_path):
while(True):
fw = input('Select Framework : 1. Pytorch, 2. Keras >> Enter: ')
if fw == '1':
fw = 'Pytorch/'
break
elif fw == '2':
fw = 'Keras/'
break
else:
print('Invalid!')
continue
path_url = os.path.join(project_path,fw)
print('Project Path ~ ', path_url)
return path_url,fw
def pickle_handle(file_path,method,d=None):
if method == 'dump':
print('Dumping records!')
with open(file_path,'wb') as f:
pickle.dump(d,f)
print(f'byte-code dump created at {file_path}')
elif method == 'load':
print('Loading Byte-code text!')
with open(file_path, 'rb') as f:
data = f.read()
d = pickle.loads(data)
return d
def artefact_edit(flag='-d'):
'''
catalog read and select path pivoting on framwework and project name
'''
catalog_path = '../assets/catalog.csv'
catalog = pd.read_csv(catalog_path)
while True:
fw = input('select framework 1) Keras 2) Pytorch >> Enter: ')
fw = 'Keras/' if fw == '1' else 'Pytorch/' if fw == '2' else 'Invalid'
if fw in list(catalog['framework']):
catalog = catalog[catalog['framework'] == fw]
print(f'{fw} Catalog ')
print(catalog)
break
else:
print('invalid framework name')
continue
print()
print('Project-List : ',list(catalog['project']))
while True:
prj = input('Enter project name : ')
if prj in list(catalog['project']):
catalog = catalog[catalog['project'] == prj]
print(f'{prj} Catalog')
print(catalog)
break
else:
print('Invalid Project Name (spell-check) : ')
continue
path = catalog['path'].values[0].split('/')[2:]
path = os.path.join('./',*path)
print(f'\nAccessing Artefact archive of {fw}-{prj}')
artefact = pickle_handle(path,'load')
if flag == '-d':
print(list(artefact.keys()))
while True:
art_name = input('Enter Artifact to retreive info : ')
if art_name in list(artefact.keys()):
print('ARTEFACT - INFO')
print(f"{art_name} >> ",artefact[art_name])
break
else:
print()
print(f'Artefact - {art_name} is non-existent!')
continue
elif flag[2] == '--update':
print(list(artefact.keys()))
while True:
art_name = input('Enter Artifact to retreive info : ')
if art_name in list(artefact.keys()):
print('ARTEFACT - INFO ( current ) ')
print(f"{art_name} >> ",artefact[art_name])
break
else:
print()
print(f'Artefact - {art_name} is non-existent!')
continue
print(f'Updating Artefact Info for >> {art_name}')
if flag[3] == '-f' or flag[3] == '-file':
print()
print('File Input')
print()
while True:
filename = input('Enter FILE Relative-PATH : ')
if os.path.exists(filename):
break
else:
print('invalid filepath!')
continue
with open(filename,'r') as f:
content = f.readlines()
text = ''
for line in content:
text += line
print('PREVIEW\n')
print(text)
print('\nVariable Added to the Dictionary! -- saving state .. ')
artefact[art_name] = text
pickle_handle(file_path=path,method='dump',d=artefact)
print('ARTEFACT - INFO ( Updated ) ')
print(f"{art_name} >> ",artefact[art_name])
else:
info = input('New info : ')
artefact[art_name] = info
print('Variable Added to the Dictionary! -- saving state .. ')
pickle_handle(file_path=path,method='dump',d=artefact)
print('ARTEFACT - INFO ( Updated ) ')
print(f"{art_name} >> ",artefact[art_name])
def main():
if sys.argv[1] == '-np':
try:
if len(sys.argv) == 3:
abs_path = sys.argv[2]
project_name = sys.argv[2].split('/')[1]
print('Project Directory - {}'.format(project_name))
project_path,fw = path_creation(abs_path)
nntype = input('Enter Approach Type >> ')
meta_collect(project_path,project_name,fw,nntype) # Data collection subroutine
except:
print("There's no Absolute Path given in the arguments! ")
while(True):
abs_path = input('Enter the Name of Directory : ')
nntype = input('Enter Approach Type >> ')
if os.path.isdir(abs_path):
try:
project_name = abs_path.split('/')[1]
print('Project Directory - ./{}'.format(project_name))
except:
project_name = abs_path
abs_path = './'+abs_path
print('Project Directory - {}'.format(abs_path))
break
else:
print('Invalid Path!')
print('Here are the list of neighbouring dirs : {}'.format(list(os.listdir('./'))))
continue
# Data collection subroutine
project_path,fw = path_creation(abs_path)
meta_collect(project_path,project_name,fw,nntype)
elif sys.argv[1] == '-nptr':
print('Transfer-Learning')
if len(sys.argv) == 3:
abs_path = sys.argv[2]
project_name = sys.argv[2].split('/')[1]
print('Project Directory - {}'.format(project_name))
prj_path = os.path.join('./Transfer-Learning',project_name+'/')
if os.path.exists(prj_path):
print(f'Relative Path {prj_path}')
nntype = input('Enter Approach Type >> ')
fw = input('select framework 1) Keras 2) Pytorch >> Enter: ')
fw = 'Keras/' if fw == '1' else 'Pytorch/' if fw == '2' else 'Invalid'
else:
print("project directory doesn't exist! [check-dir-name]")
exit(1)
meta_collect(prj_path,project_name,fw,nntype)
elif sys.argv[1] == '-varl' or sys.argv[1] == '--var-list':
# read here
d = pickle_handle(file_path='./var-dict.txt',method='load')
if len(sys.argv) == 2:
print('Default Variable List : ')
inp = input('1. For Entire Dict, 2. For Var-List >> Enter : ')
if inp == '1':
for var,dtype in zip(d.keys(), d.values()):
print('{:}-----{:}'.format(var,dtype))
elif inp == '2':
print(list(d.keys()))
else:
print('Invalid arg')
# display it
else:
if sys.argv[2] == '--update':
# Load the pickled prompt_list, display it
# update with new prompts required
print('Adding new variable and dtype to var-dict')
variable = input('Enter variable to add into dictionary: ')
d[variable] = input('Enter Dtype of that variable: ')
print('Variable Added to the Dictionary! -- saving state .. ')
pickle_handle(file_path='./var-dict.txt',method='dump',d=d)
elif sys.argv[2] == '--pop':
# popping variable
print('Popping Existing variable and dtype from var-dict')
variable = input('Enter variable to pop outof the dictionary: ')
try:
d.pop(variable)
print(f'{variable} deleted from the records! -- saving state..')
pickle_handle(file_path='./var-dict.txt',method='dump',d=d)
except:
print('Invalid var-name')
exit(1)
else:
print('Invalid follow up argument detect, -varl is followed by --update')
exit(1)
elif sys.argv[1] == '--status':
catalog = | pd.read_csv('../assets/catalog.csv') | pandas.read_csv |
import re
import time
from pathlib import Path
from tarfile import TarFile
from timeit import default_timer as timer
from typing import *
from zipfile import ZipFile
import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm
from smseventlog import delta, dt
from smseventlog import errors as er
from smseventlog import eventfolders as efl
from smseventlog import functions as f
from smseventlog import getlog
from smseventlog.data.internal import faults as flt
from smseventlog.data.internal import plm
from smseventlog.data.internal import utils as utl
from smseventlog.database import db
from smseventlog.utils import fileops as fl
log = getlog(__name__)
ahs_files = ['data', 'dnevent', 'sfevent']
def import_dls(p: Path, mw=None) -> dict:
"""Upload downloads folder from local computer to p-drive
p : Path
filepath to process
mw : gui.gui.MainWindow
mw object to update statusbar with progress
Returns
-------
dict
dict of result times
Import csvs to database:
faults
plm
Zip:
dsc folder (ge files)
Attempt to get unit from:
- file name
- dsc stats file
- fault csv
- plm csv
- TODO check selected dir contains some correct files (eg not accidental selection)
"""
start = time.time()
now = lambda x: time.time() - x
# check if unit given in file name
unit = utl.unit_from_str(s=p.name)
d = f.date_from_str(s=p.name)
d_lower = dt.now() + delta(days=-365 * 2)
m_result = {k: dict(num=0, time=0) for k in ('ge_zip', 'fault', 'plm')}
# list of dates created as backup if no dsc
lst_dates = [fl.date_created(p) for p in p.iterdir()]
# callback to update statusbar
if mw is None:
from smseventlog.gui._global import update_statusbar as us
else:
us = mw.update_statusbar
# find dsc files to use for stat file first
lst_dsc = utl.FolderSearch('dsc', d_lower=d_lower).search(p)
if lst_dsc:
lst_dates = [] # use dsc for date, clear backup dates
# try to get unit from first dsc serial file first
try:
p_stat = stats_from_dsc(p=lst_dsc[0])
if unit is None:
print('p_stat', p_stat)
unit = unit_from_stat(p_stat)
except Exception as e:
# print(e)
log.warning('Failed to get unit from stats file.')
# save files to import after unit check
m_import = {}
unit_func = dict(
fault=flt.unit_from_fault,
plm=plm.unit_from_haulcycle)
# check unit from fault/plm
for ftype in unit_func.keys():
try:
lst_csv = utl.FolderSearch(ftype, d_lower=d_lower).search(p)
if lst_csv:
m_import[ftype] = lst_csv
# try to get unit if doesn't exist yet
if unit is None:
unit = unit_func.get(ftype)(p=lst_csv[0], raise_errors=False)
except Exception as e:
# print(e)
us(msg=f'Failed to read {ftype} file(s).', warn=True, log_=True)
# get dates from ge dsc
for p_dsc in lst_dsc:
lst_dates.append(date_from_dsc(p_dsc))
# check for AHS files in first level of dls folder
ahs_folders = utl.FolderSearch('ahs', max_depth=0).search(p)
if not ahs_folders:
suffix = 'DLS'
else:
suffix = 'FRDLS'
if unit is None:
unit = val_from_ahs_files(ahs_folders, 'unit')
# get date from ahs files
if d is None:
lst_dates.append(val_from_ahs_files(ahs_folders, 'date'))
# final check, fail if unit doesn't exist yet
if unit is None:
raise er.NoUnitError()
# sort dates and set date if not given in folder name
if d is None and lst_dates:
lst_dates = sorted(lst_dates, reverse=False)
d = lst_dates[0]
if d is None:
raise er.NoDateError()
name = f'{unit} - {d:%Y-%m-%d}'
title = f'{name} - {suffix}'
m_result['name'] = name
from smseventlog.eventfolders import UnitFolder
uf = UnitFolder(unit=unit)
p_dst = uf.p_dls / f'{d.year}/{title}'
# make sure we don't overwrite folder
log.info(f'p_dst: {p_dst}')
if p_dst.exists():
raise er.FolderExistsError(p=p_dst)
# import fault/plm
for ftype, lst_csv in m_import.items():
time_prev = time.time()
# log.info(f'importing: {ftype}')
try:
rowsadded = utl.combine_import_csvs(lst_csv=lst_csv, ftype=ftype, unit=unit, n_jobs=-4)
m_result[ftype] = dict(num=rowsadded or 0, time=now(time_prev))
except Exception as e:
# NOTE could maybe raise a custom exception here?
us(msg=f'Failed to import {ftype} files.', warn=True, log_=True)
# zip GE dsc files
if lst_dsc:
time_prev = time.time()
for p_dsc in lst_dsc:
# log.info(f'zipping: {p_dsc}')
fl.zip_folder_threadsafe(p_src=p_dsc, p_dst=p_dst / p_dsc.name, delete=True)
m_result['ge_zip'] = dict(num=len(lst_dsc), time=now(time_prev))
# zip dnevent/sfevent folders in place
if ahs_folders:
time_prev = time.time()
# copy 6 newest files > 3mb to PREVIEW dir
make_ahs_data_preview(ahs_folders)
for p_ahs in ahs_folders:
# if any(item in p_ahs.name.lower() for item in ('dnevent', 'sfevent')):
fl.zip_folder_threadsafe(p_src=p_ahs, p_dst=p_dst / p_ahs.name, delete=True)
m_result['ahs_zip'] = dict(num=len(ahs_folders), time=now(time_prev))
# upload all to p-drive
us(f'Uploading files to: {p_dst}')
fl.move_folder(p_src=p, p_dst=p_dst)
m_result['time_total'] = now(start)
return m_result
def make_ahs_data_preview(
ahs_folders: List[Path],
p_dst: Path = None,
n_newest: int = 6) -> None:
"""Extract x newest data files > 3mb, copy to separate DATA_PREVIEW dir"""
p_data = [p for p in ahs_folders if p.name.lower() == 'data']
if not p_data:
return
p_data = p_data[0]
min_size = 3e6 # 3mb
lst = []
if p_dst is None:
p_dst = p_data.parent
p_dst = p_dst / 'DATA_PREVIEW'
# loop newest files, collect those > 3mb
for p in sorted(p_data.glob('*.gz*'), reverse=True):
if p.stat().st_size > min_size:
lst.append(p)
if len(lst) >= n_newest:
break
# move files to DATA_PREVIEW dir
for p in lst:
fl.copy_file(p_src=p, p_dst=p_dst / p.name)
def val_from_ahs_files(ahs_folders: List[Path], type_: str) -> Union[str, None]:
"""Get unit number/date from list of ahs FR folders
Parameters
----------
ahs_folders : List[Path]
[data, dnevent, sfevent]
type_ : str
unit | date
Returns
-------
Union[str, None]
unit/date or None
"""
val = None
expr = r'gz$|txt$'
if not type_ in ('unit', 'date'):
raise ValueError(f'type_ must be unit|date, not "{type_}"')
for p_ahs in ahs_folders:
if val is None:
for p2 in sorted(list(p_ahs.iterdir()), reverse=True):
if re.search(expr, p2.name.lower()):
if type_ == 'unit':
temp = p2.name.split('_')[0]
if db.unit_exists(temp):
val = temp
elif type_ == 'date':
# get date as 6 digit date YYMMDD
val = re.search(r'\d{6}', p2.name)[0]
val = dt.strptime(val, '%y%m%d')
break
return val
def is_year(name: str) -> bool:
"""Check if passed in string is a 4 digit year, eg '2020'
Parameters
----------
name : str
String to check
Returns
-------
bool
"""
exp = re.compile('^[2][0-9]{3}$')
ans = re.search(exp, name)
return not ans is None
@er.errlog(msg='Couldn\'t find recent dls folder.', err=False)
def get_recent_dls_unit(unit: str) -> Path:
"""Get most recent dls folder for single unit
Parameters
----------
unit : str
Returns
-------
Path
Path to most recent dls folder
"""
p_unit = efl.UnitFolder(unit=unit).p_unit
p_dls = p_unit / 'Downloads'
if not p_dls.exists():
log.warning(f'Download folder doesn\'t exist: {p_dls}')
return
# get all downloads/year folders
lst_year = [p for p in p_dls.iterdir() if p.is_dir() and is_year(p.name)]
if not lst_year:
log.warning('No download year folders found.')
return
# sort year folders by name, newest first, select first
lst_year_sorted = sorted(lst_year, key=lambda p: p.name, reverse=True) # sort by year
p_year = lst_year_sorted[0]
# sort all dls folders on date from folder title
lst_dls = [p for p in p_year.iterdir() if p.is_dir()]
lst_dls_sorted = sorted(filter(lambda p: f.date_from_str(p.name) is not None, lst_dls),
key=lambda p: f.date_from_str(p.name), reverse=True)
return lst_dls_sorted[0]
def zip_recent_dls_unit(unit: str, _zip=True) -> Path:
"""Func for gui to find (optional zip) most recent dls folder by parsing date in folder title"""
from ...gui import _global as gbl
from ...gui.dialogs import msg_simple, msgbox
p_dls = get_recent_dls_unit(unit=unit)
if not p_dls is None:
msg = f'Found DLS folder: {p_dls.name}, calculating size...'
gbl.update_statusbar(msg)
gbl.get_mainwindow().app.processEvents()
size = fl.calc_size(p_dls)
msg = f'Found DLS folder:\n\n{p_dls.name}\n{size}\n\nZip now?'
if not msgbox(msg=msg, yesno=True):
return
else:
msg = 'Couldn\'t find recent DLS folder, check folder structure for issues.'
msg_simple(msg=msg, icon='warning')
return
if _zip:
p_zip = fl.zip_folder_threadsafe(p_src=p_dls, delete=False)
return p_zip
else:
return p_dls
def fix_dsc(p: Path) -> None:
"""Process/fix single dsc/dls folder"""
# log.info(f'fix_dsc: {p}')
start = timer()
unit = utl.unit_from_path(p)
uf = efl.UnitFolder(unit=unit)
p_parent = p.parent
d = date_from_dsc(p=p)
# rename dls folder: UUU - YYYY-MM-DD - DLS
newname = f'{unit} - {d:%Y-%m-%d} - DLS'
p_new = uf.p_dls / f'{d.year}/{newname}'
# need to make sure there is only one _dsc_ folder in path
# make sure dsc isn't within 2 levels of 'Downloads' fodler
dsccount = sum(1 for _ in p_parent.glob('*dsc*'))
if dsccount > 1 or check_parents(p=p, depth=2, names=['downloads']):
# just move dsc folder, not parent and contents
p_src = p
p_dst = p_new / p.name
else:
p_src = p_parent # folder above _dsc_
p_dst = p_new
# zip and move dsc folder, then move anything else remaining in the parent dir
is_zip = p.suffix in ('.zip', '.tar')
is_same_folder = p_src == p_dst
if not is_same_folder or not is_zip:
msg = ''
for n, _p in dict(orig=p, src=p_src, dst=p_dst).items():
msg += f'\n\t\t{n:<4}: {_p}'
log.info(f'fix_dsc:{msg}')
try:
if not is_zip:
p_zip = fl.zip_folder_threadsafe(
p_src=p,
p_dst=p_new / p.name,
delete=True)
if not is_same_folder:
fl.move_folder(p_src=p_src, p_dst=p_dst)
except Exception as e:
log.warning(f'Error fixing dsc folder: {str(p_src)}')
raise e
log.info(f'Elapsed time: {f.deltasec(start, timer())}s')
def fix_dls_all_units(d_lower: dt = None) -> None:
if d_lower is None:
d_lower = dt.now() + delta(days=-30)
units = utl.all_units()
# collect dsc files from all units in parallel
result = Parallel(n_jobs=-1, verbose=11)(delayed(utl.process_files)(
ftype='dsc',
units=unit,
d_lower=d_lower,
parallel=False) for unit in units)
# fix them
def date_from_dsc(p: Path) -> dt:
"""Parse date from dsc folder name, eg 328_dsc_20180526-072028
- if no dsc, use date created"""
try:
sdate = p.name.split('_dsc_')[-1].split('-')[0]
d = dt.strptime(sdate, '%Y%m%d')
except Exception:
d = fl.date_created(p)
return d
def get_recent_dsc_single(
unit: str,
d_lower: dt = dt(2020, 1, 1),
year: str = None,
all_files: bool = False,
ftype: str = 'dsc',
max_depth: int = 3):
"""Return list of most recent dsc folder from each unit
- OR most recent fault... could extend this for any filetype
Parameters
----------
d_lower : datetime, optional,
limit search by date, default dt(2020,1,1)
unit : str, optional
all_files: bool
return dict of unit: list of all sorted files
Returns
-------
list | dict
"""
lst = []
uf = efl.UnitFolder(unit=unit)
p_dls = uf.p_dls
if not year is None:
p_year = p_dls / year
if p_year.exists():
p_dls = p_year
lst_unit = utl.FolderSearch(ftype, d_lower=d_lower, max_depth=max_depth).search(p_dls)
if lst_unit:
lst_unit.sort(key=lambda p: date_from_dsc(p), reverse=True)
if not all_files:
lst.append(lst_unit[0])
else:
lst.extend(lst_unit)
return lst
def get_recent_dsc_all(minesite='FortHills', model='980E', all_files=True, **kw):
"""Return list of most recent dsc folders for all units"""
lst = []
# keep all files to try and import next most recent if file fails
if all_files:
lst = {}
units = db.unique_units(minesite=minesite, model=model)
for unit in tqdm(units):
recent_dsc = get_recent_dsc_single(unit=unit, all_files=all_files, **kw)
if not recent_dsc:
print(f'\n\nNo recent dsc for: {unit}')
if not all_files:
lst.extend(recent_dsc)
else:
lst[unit] = recent_dsc
return lst
def move_tr3(p):
unit = utl.unit_from_path(p) # assuming in unit folder
p_dst_base = Path('/Users/Jayme/OneDrive/SMS Equipment/Share/tr3 export')
p_dst = p_dst_base / f'{unit}/{p.name}'
fl.copy_file(p_src=p, p_dst=p_dst)
def check_parents(p: Path, depth: int, names: list) -> bool:
"""Check path to make sure parents aren't top level folders
Parameters
----------
p : Path
Path to check\n
depth : int
From start of folder path to this folder level\n
names : list
Names to check
Returns
-------
bool
If path checked is top level folder
"""
names = [n.lower() for n in names]
for parent in list(p.parents)[:depth]:
if parent.name.lower() in names:
return True
return False
def zip_recent_dls(units, d_lower=dt(2020, 1, 1)):
# get most recent dsc from list of units and zip parent folder for attaching to TSI
if not isinstance(units, list):
units = [units]
lst = []
for unit in units:
lst.extend(get_recent_dsc_single(unit=unit, d_lower=d_lower))
lst_zip = [fl.zip_folder_threadsafe(p_src=p.parent, delete=False) for p in lst]
return lst_zip
# STATS csv
def stats_from_dsc(p):
"""Get stats file path from dsc path"""
if p.is_dir():
try:
return list((p / 'stats').glob('SERIAL*csv'))[0]
except Exception:
return None
print(f'Couldn\'t read stats: {p}')
elif p.suffix == '.zip':
return ZipFile(p)
elif p.suffix == '.tar':
return TarFile(p)
def import_stats(lst=None, d_lower=dt(2021, 1, 1)):
"""Use list of most recent dsc and combine into dataframe"""
if lst is None:
lst = get_recent_dsc_all(d_lower=d_lower)
if isinstance(lst, dict):
dfs = []
for unit, lst_csv in tqdm(lst.items()):
# try to find/load csv, or move to next if fail
for p in lst_csv:
try:
p_csv = stats_from_dsc(p)
df_single = get_stats(p=p_csv)
dfs.append(df_single)
break
except Exception as e:
log.warning(f'Failed to load csv: {p}, \n{str(e)}')
df = pd.concat(dfs)
else:
df = pd.concat([get_stats(stats_from_dsc(p)) for p in lst])
return df
def get_list_stats(unit):
"""Return list of STATS csvs for specific unit"""
from ...eventfolders import UnitFolder
uf = UnitFolder(unit=unit)
p_dls = uf.p_dls
return p_dls.glob('SERIAL*csv')
def smr_from_stats(lst):
return pd.concat([get_stats(p) for p in lst])
def unit_from_stat(p: Path) -> Union[str, None]:
"""Try to get unit from stats file
Parameters
----------
p : Path
Returns
-------
Union[str, None]
unit if exists else None
"""
df = get_stats(p=p)
unit = df.index[0]
if not unit == 'TEMP':
return unit
def get_stats(p, all_cols=False):
"""
Read stats csv and convert to single row df of timestamp, psc/tsc versions + inv SNs, to be combined
Can read zip or tarfiles"""
# dsc folder could be zipped, just read zipped csv, easy!
# super not dry
# print(p)
if isinstance(p, ZipFile):
zf = p
p = Path(zf.filename)
csv = [str(file.filename) for file in zf.filelist if re.search(
r'serial.*csv', str(file), flags=re.IGNORECASE)][0]
with zf.open(csv) as reader:
df = | pd.read_csv(reader, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import logging
from datetime import datetime
from enum import Enum
from io import StringIO
from typing import Dict, Generator, Optional, Tuple, Union
from urllib.parse import urljoin
import numpy as np
import pandas as pd
import requests
from requests import HTTPError
from wetterdienst.core.scalar.request import ScalarRequestCore
from wetterdienst.core.scalar.result import StationsResult, ValuesResult
from wetterdienst.core.scalar.values import ScalarValuesCore
from wetterdienst.exceptions import InvalidParameter
from wetterdienst.metadata.columns import Columns
from wetterdienst.metadata.datarange import DataRange
from wetterdienst.metadata.kind import Kind
from wetterdienst.metadata.period import Period, PeriodType
from wetterdienst.metadata.provider import Provider
from wetterdienst.metadata.resolution import Resolution, ResolutionType
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.provider.dwd.forecast.access import KMLReader
from wetterdienst.provider.dwd.forecast.metadata import (
DwdForecastDate,
DwdMosmixParameter,
DwdMosmixType,
)
from wetterdienst.provider.dwd.forecast.metadata.field_types import INTEGER_PARAMETERS
from wetterdienst.provider.dwd.forecast.metadata.unit import DwdMosmixUnit
from wetterdienst.provider.dwd.metadata.column_names import DwdColumns
from wetterdienst.provider.dwd.metadata.constants import (
DWD_MOSMIX_L_SINGLE_PATH,
DWD_MOSMIX_S_PATH,
DWD_SERVER,
)
from wetterdienst.provider.dwd.metadata.datetime import DatetimeFormat
from wetterdienst.util.cache import metaindex_cache
from wetterdienst.util.enumeration import parse_enumeration_from_template
from wetterdienst.util.geo import convert_dm_to_dd
from wetterdienst.util.network import list_remote_files_fsspec
log = logging.getLogger(__name__)
class DwdMosmixDataset(Enum):
SMALL = "small"
LARGE = "large"
class DwdMosmixValues(ScalarValuesCore):
"""
Fetch weather forecast data (KML/MOSMIX_S dataset).
Parameters
----------
station_id : List
- If None, data for all stations is returned.
- If not None, station_ids are a list of station ids for which data is desired.
parameter: List
- If None, data for all parameters is returned.
- If not None, list of parameters, per MOSMIX definition, see
https://www.dwd.de/DE/leistungen/opendata/help/schluessel_datenformate/kml/mosmix_elemente_pdf.pdf?__blob=publicationFile&v=2 # noqa:E501,B950
"""
_tz = Timezone.GERMANY
_data_tz = Timezone.UTC
_has_quality = False
_irregular_parameters = ()
_integer_parameters = INTEGER_PARAMETERS
_string_parameters = ()
def _create_humanized_parameters_mapping(self) -> Dict[str, str]:
"""Method for creation of parameter name mappings based on
self._parameter_base"""
hcnm = {
parameter.value: parameter.name.lower()
for parameter in self.stations.stations._parameter_base[
self.stations.stations.mosmix_type.name
]
}
return hcnm
def __init__(self, stations: StationsResult) -> None:
""""""
super(DwdMosmixValues, self).__init__(stations=stations)
parameter_base = self.stations.stations._parameter_base
dataset_accessor = self.stations.stations._dataset_accessor
parameter_ = []
for parameter, dataset in self.stations.parameter:
if parameter == dataset:
parameter = [par.value for par in parameter_base[dataset_accessor]]
parameter_.extend(parameter)
else:
parameter_.append(parameter.value)
self.kml = KMLReader(
station_ids=self.stations.station_id.tolist(),
parameters=parameter_,
)
# TODO: add __eq__ and __str__
@property
def metadata(self) -> pd.DataFrame:
"""
Wrapper for forecast metadata
:return:
"""
return self.stations.df
def query(self) -> Generator[ValuesResult, None, None]:
"""
Replace collect data method as all information is read once from kml file
:return:
"""
for df in self._collect_station_parameter():
df = self._coerce_parameter_types(df)
if self.stations.stations.tidy:
df = self.tidy_up_df(df, self.stations.stations.mosmix_type)
# df = self._tidy_up_df(df)
# df[
# Columns.DATASET.value
# ] = self.stations.stations.mosmix_type.value.lower()
# df[Columns.VALUE.value] = pd.to_numeric(
# df[Columns.VALUE.value], errors="coerce"
# ).astype(float)
df = self._coerce_meta_fields(df)
if self.stations.humanize:
df = self._humanize(df)
df = self._organize_df_columns(df)
result = ValuesResult(stations=self.stations, df=df)
yield result
def _collect_station_parameter(self) -> Generator[pd.DataFrame, None, None]:
"""
Wrapper of read_mosmix to collect forecast data (either latest or for
defined dates)
:return:
"""
if self.stations.start_issue == DwdForecastDate.LATEST:
for df in self.read_mosmix(self.stations.stations.start_issue):
yield df
else:
for date in pd.date_range(
self.stations.stations.start_issue,
self.stations.stations.end_issue,
freq=self.stations.frequency.value,
):
try:
for df in self.read_mosmix(date):
yield df
except IndexError as e:
log.warning(e)
continue
def _tidy_up_df(self, df: pd.DataFrame, dataset) -> pd.DataFrame:
"""
:param df:
:return:
"""
df_tidy = df.melt(
id_vars=[
Columns.STATION_ID.value,
Columns.DATE.value,
],
var_name=DwdColumns.PARAMETER.value,
value_name=DwdColumns.VALUE.value,
)
df[Columns.QUALITY.value] = np.nan
df[Columns.QUALITY.value] = df[Columns.QUALITY.value].astype(float)
return df_tidy
def read_mosmix(
self, date: Union[datetime, DwdForecastDate]
) -> Generator[pd.DataFrame, None, None]:
"""
Manage data acquisition for a given date that is used to filter the found files
on the MOSMIX path of the DWD server.
:param date: datetime or enumeration for latest MOSMIX forecast
:return: DWDMosmixResult with gathered information
"""
for df_forecast in self._read_mosmix(date):
df_forecast = df_forecast.rename(
columns={
"station_id": DwdColumns.STATION_ID.value,
"datetime": DwdColumns.DATE.value,
}
)
yield df_forecast
def _read_mosmix(
self, date: Union[DwdForecastDate, datetime]
) -> Generator[pd.DataFrame, None, None]:
"""
Wrapper that either calls read_mosmix_s or read_mosmix_l depending on
defined period type
:param date:
:return:
"""
if self.stations.stations.mosmix_type == DwdMosmixType.SMALL:
yield from self.read_mosmix_small(date)
else:
yield from self.read_mosmix_large(date)
def read_mosmix_small(
self, date: Union[DwdForecastDate, datetime]
) -> Generator[Tuple[pd.DataFrame, pd.DataFrame], None, None]:
"""
Reads single MOSMIX-S file with all stations and returns every forecast that
matches with one of the defined station ids.
:param date:
:return:
"""
url = urljoin(DWD_SERVER, DWD_MOSMIX_S_PATH)
file_url = self.get_url_for_date(url, date)
self.kml.read(file_url)
for forecast in self.kml.get_forecasts():
yield forecast
def read_mosmix_large(
self, date: Union[DwdForecastDate, datetime]
) -> Generator[Tuple[pd.DataFrame, pd.DataFrame], None, None]:
"""
Reads multiple MOSMIX-L files with one per each station and returns a
forecast per file.
:param date:
:return:
"""
url = urljoin(DWD_SERVER, DWD_MOSMIX_L_SINGLE_PATH)
for station_id in self.stations.station_id:
station_url = f"{url}{station_id}/kml"
try:
file_url = self.get_url_for_date(station_url, date)
except HTTPError:
log.warning(f"Files for {station_id} do not exist on the server")
continue
self.kml.read(file_url)
yield next(self.kml.get_forecasts())
@staticmethod
def get_url_for_date(url: str, date: Union[datetime, DwdForecastDate]) -> str:
"""
Method to get a file url based on the MOSMIX-S/MOSMIX-L url and the date that is
used for filtering.
:param url: MOSMIX-S/MOSMIX-L path on the dwd server
:param date: date used for filtering of the available files
:return: file url based on the filtering
"""
urls = list_remote_files_fsspec(url, recursive=False)
if date == DwdForecastDate.LATEST:
try:
url = list(filter(lambda url_: "LATEST" in url_.upper(), urls))[0]
return url
except IndexError as e:
raise IndexError(f"Unable to find LATEST file within {url}") from e
df_urls = pd.DataFrame({"URL": urls})
df_urls[DwdColumns.DATE.value] = df_urls["URL"].apply(
lambda url_: url_.split("/")[-1].split("_")[2].replace(".kmz", "")
)
df_urls = df_urls[df_urls[DwdColumns.DATE.value] != "LATEST"]
df_urls[DwdColumns.DATE.value] = pd.to_datetime(
df_urls[DwdColumns.DATE.value], format=DatetimeFormat.YMDH.value
)
df_urls = df_urls.loc[df_urls[DwdColumns.DATE.value] == date]
if df_urls.empty:
raise IndexError(f"Unable to find {date} file within {url}")
return df_urls["URL"].item()
class DwdMosmixRequest(ScalarRequestCore):
""" Implementation of sites for MOSMIX forecast sites """
provider = Provider.DWD
kind = Kind.FORECAST
_url = (
"https://www.dwd.de/DE/leistungen/met_verfahren_mosmix/"
"mosmix_stationskatalog.cfg?view=nasPublication"
)
_colspecs = [
(0, 5),
(6, 11),
(12, 17),
(18, 22),
(23, 44),
(45, 51),
(52, 58),
(59, 64),
(65, 71),
(72, 76),
]
_columns = [
Columns.STATION_ID.value,
Columns.ICAO_ID.value,
Columns.FROM_DATE.value,
Columns.TO_DATE.value,
Columns.HEIGHT.value,
Columns.LATITUDE.value,
Columns.LONGITUDE.value,
Columns.NAME.value,
Columns.STATE.value,
]
_tz = Timezone.GERMANY
_parameter_base = DwdMosmixParameter
_values = DwdMosmixValues
_resolution_type = ResolutionType.FIXED
_resolution_base = Resolution # use general Resolution for fixed Resolution
_period_type = PeriodType.FIXED
_period_base = None
_data_range = DataRange.FIXED
_has_datasets = True
_dataset_tree = DwdMosmixParameter
_unique_dataset = True
_dataset_base = DwdMosmixDataset
_unit_tree = DwdMosmixUnit
@property
def _dataset_accessor(self) -> str:
"""
:return:
"""
return self.mosmix_type.name
@classmethod
def _setup_discover_filter(cls, filter_):
"""
Use SMALL and LARGE instead of resolution, which is fixed for Mosmix
:param filter_:
:return:
"""
filter_ = pd.Series(filter_).apply(
parse_enumeration_from_template, args=(cls._dataset_base,)
).tolist() or [*cls._dataset_base]
return filter_
_base_columns = [
Columns.STATION_ID.value,
Columns.ICAO_ID.value,
Columns.FROM_DATE.value,
Columns.TO_DATE.value,
Columns.HEIGHT.value,
Columns.LATITUDE.value,
Columns.LONGITUDE.value,
Columns.NAME.value,
Columns.STATE.value,
]
@staticmethod
def adjust_datetime(datetime_: datetime) -> datetime:
"""
Adjust datetime to MOSMIX release frequency, which is required for MOSMIX-L
that is only released very 6 hours (3, 9, 15, 21). Datetime is floored
to closest release time e.g. if hour is 14, it will be rounded to 9
:param datetime_: datetime that is adjusted
:return: adjusted datetime with floored hour
"""
regular_date = datetime_ + pd.offsets.DateOffset(hour=3)
if regular_date > datetime_:
regular_date -= pd.Timedelta(hours=6)
delta_hours = (datetime_.hour - regular_date.hour) % 6
datetime_adjusted = datetime_ - pd.Timedelta(hours=delta_hours)
return datetime_adjusted
def __init__(
self,
parameter: Optional[Tuple[Union[str, DwdMosmixParameter], ...]],
mosmix_type: Union[str, DwdMosmixType],
start_issue: Optional[
Union[str, datetime, DwdForecastDate]
] = DwdForecastDate.LATEST,
end_issue: Optional[Union[str, datetime]] = None,
start_date: Optional[Union[str, datetime]] = None,
end_date: Optional[Union[str, datetime]] = None,
humanize: bool = True,
tidy: bool = True,
si_units: bool = True,
) -> None:
"""
:param parameter: parameter(s) to be collected
:param mosmix_type: mosmix type, either small or large
:param start_issue: start of issue of mosmix which should be caught
(Mosmix run at time XX:YY)
:param end_issue: end of issue
:param start_date: start date for filtering returned dataframe
:param end_date: end date
:param humanize: humanize parameter names
:param tidy: tidy data to be row-wise
:param si_units: convert to si units
"""
self.mosmix_type = parse_enumeration_from_template(mosmix_type, DwdMosmixType)
super().__init__(
parameter=parameter,
start_date=start_date,
end_date=end_date,
resolution=Resolution.HOURLY,
period=Period.FUTURE,
si_units=si_units,
)
if not start_issue:
start_issue = DwdForecastDate.LATEST
try:
start_issue = parse_enumeration_from_template(start_issue, DwdForecastDate)
except InvalidParameter:
pass
# Parse issue date if not set to fixed "latest" string
if start_issue is DwdForecastDate.LATEST and end_issue:
log.info(
"end_issue will be ignored as 'latest' was selected for issue date"
)
if start_issue is not DwdForecastDate.LATEST:
if not start_issue and not end_issue:
start_issue = DwdForecastDate.LATEST
elif not end_issue:
end_issue = start_issue
elif not start_issue:
start_issue = end_issue
start_issue = | pd.to_datetime(start_issue, infer_datetime_format=True) | pandas.to_datetime |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op", [operator.add, operator.sub, operator.mul, operator.truediv]
)
def test_operators_none_as_na(self, op):
df = DataFrame(
{"col1": [2, 5.0, 123, None], "col2": [1, 2, 3, 4]}, dtype=object
)
# since filling converts dtypes from object, changed expected to be
# object
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isna(expected)] = None
tm.assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
tm.assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("op,res", [("__eq__", False), ("__ne__", True)])
# TODO: not sure what's correct here.
@pytest.mark.filterwarnings("ignore:elementwise:FutureWarning")
def test_logical_typeerror_with_non_valid(self, op, res, float_frame):
# we are comparing floats vs a string
result = getattr(float_frame, op)("foo")
assert bool(result.all().all()) is res
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product(
[list("abc"), ["one", "two", "three"], [1, 2, 3]],
names=["first", "second", "third"],
)
df = DataFrame(
np.arange(27 * 3).reshape(27, 3),
index=index,
columns=["value1", "value2", "value3"],
).sort_index()
idx = pd.IndexSlice
for op in ["add", "sub", "mul", "div", "truediv"]:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level="third", axis=0)
expected = pd.concat(
[opa(df.loc[idx[:, :, i], :], v) for i, v in x.items()]
).sort_index()
tm.assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ["two", "three"])
result = getattr(df, op)(x, level="second", axis=0)
expected = (
pd.concat([opa(df.loc[idx[:, i], :], v) for i, v in x.items()])
.reindex_like(df)
.sort_index()
)
tm.assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([["A", "B"], ["a", "b"]])
df = DataFrame(np.ones((2, 4), dtype="int64"), columns=midx)
s = pd.Series({"a": 1, "b": 2})
df2 = df.copy()
df2.columns.names = ["lvl0", "lvl1"]
s2 = s.copy()
s2.index.name = "lvl1"
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level="lvl1")
res6 = df2.mul(s2, axis=1, level="lvl1")
exp = DataFrame(
np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype="int64"), columns=midx
)
for res in [res1, res2]:
tm.assert_frame_equal(res, exp)
exp.columns.names = ["lvl0", "lvl1"]
for res in [res3, res4, res5, res6]:
tm.assert_frame_equal(res, exp)
def test_add_with_dti_mismatched_tzs(self):
base = pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC")
idx1 = base.tz_convert("Asia/Tokyo")[:2]
idx2 = base.tz_convert("US/Eastern")[1:]
df1 = DataFrame({"A": [1, 2]}, index=idx1)
df2 = DataFrame({"A": [1, 1]}, index=idx2)
exp = DataFrame({"A": [np.nan, 3, np.nan]}, index=base)
tm.assert_frame_equal(df1 + df2, exp)
def test_combineFrame(self, float_frame, mixed_float_frame, mixed_int_frame):
frame_copy = float_frame.reindex(float_frame.index[::2])
del frame_copy["D"]
frame_copy["C"][:5] = np.nan
added = float_frame + frame_copy
indexer = added["A"].dropna().index
exp = (float_frame["A"] * 2).copy()
tm.assert_series_equal(added["A"].dropna(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added["A"], exp.loc[added["A"].index])
assert np.isnan(added["C"].reindex(frame_copy.index)[:5]).all()
# assert(False)
assert np.isnan(added["D"]).all()
self_added = float_frame + float_frame
tm.assert_index_equal(self_added.index, float_frame.index)
added_rev = frame_copy + float_frame
assert np.isnan(added["D"]).all()
assert np.isnan(added_rev["D"]).all()
# corner cases
# empty
plus_empty = float_frame + DataFrame()
assert np.isnan(plus_empty.values).all()
empty_plus = DataFrame() + float_frame
assert np.isnan(empty_plus.values).all()
empty_empty = DataFrame() + DataFrame()
assert empty_empty.empty
# out of order
reverse = float_frame.reindex(columns=float_frame.columns[::-1])
tm.assert_frame_equal(reverse + float_frame, float_frame * 2)
# mix vs float64, upcast
added = float_frame + mixed_float_frame
_check_mixed_float(added, dtype="float64")
added = mixed_float_frame + float_frame
_check_mixed_float(added, dtype="float64")
# mix vs mix
added = mixed_float_frame + mixed_float_frame
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = float_frame + mixed_int_frame
_check_mixed_float(added, dtype="float64")
def test_combine_series(
self, float_frame, mixed_float_frame, mixed_int_frame, datetime_frame
):
# Series
series = float_frame.xs(float_frame.index[0])
added = float_frame + series
for key, s in added.items():
tm.assert_series_equal(s, float_frame[key] + series[key])
larger_series = series.to_dict()
larger_series["E"] = 1
larger_series = Series(larger_series)
larger_added = float_frame + larger_series
for key, s in float_frame.items():
tm.assert_series_equal(larger_added[key], s + series[key])
assert "E" in larger_added
assert np.isnan(larger_added["E"]).all()
# no upcast needed
added = mixed_float_frame + series
assert np.all(added.dtypes == series.dtype)
# vs mix (upcast) as needed
added = mixed_float_frame + series.astype("float32")
_check_mixed_float(added, dtype=dict(C=None))
added = mixed_float_frame + series.astype("float16")
_check_mixed_float(added, dtype=dict(C=None))
# FIXME: don't leave commented-out
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = mixed_int_frame + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = mixed_int_frame + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = datetime_frame["A"]
# 10890
# we no longer allow auto timeseries broadcasting
# and require explicit broadcasting
added = datetime_frame.add(ts, axis="index")
for key, col in datetime_frame.items():
result = col + ts
tm.assert_series_equal(added[key], result, check_names=False)
assert added[key].name == key
if col.name == ts.name:
assert result.name == "A"
else:
assert result.name is None
smaller_frame = datetime_frame[:-5]
smaller_added = smaller_frame.add(ts, axis="index")
tm.assert_index_equal(smaller_added.index, datetime_frame.index)
smaller_ts = ts[:-5]
smaller_added2 = datetime_frame.add(smaller_ts, axis="index")
tm.assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = datetime_frame.add(ts[:0], axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# Frame is all-nan
result = datetime_frame[:0].add(ts, axis="index")
expected = DataFrame(
np.nan, index=datetime_frame.index, columns=datetime_frame.columns
)
tm.assert_frame_equal(result, expected)
# empty but with non-empty index
frame = datetime_frame[:1].reindex(columns=[])
result = frame.mul(ts, axis="index")
assert len(result) == len(ts)
def test_combineFunc(self, float_frame, mixed_float_frame):
result = float_frame * 2
tm.assert_numpy_array_equal(result.values, float_frame.values * 2)
# vs mix
result = mixed_float_frame * 2
for c, s in result.items():
tm.assert_numpy_array_equal(s.values, mixed_float_frame[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = DataFrame() * 2
assert result.index.equals(DataFrame().index)
assert len(result.columns) == 0
def test_comparisons(self, simple_frame, float_frame):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = simple_frame.xs("a")
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
tm.assert_numpy_array_equal(result.values, func(df1.values, df2.values))
msg = (
"Unable to coerce to Series/DataFrame, "
"dimension must be <= 2: (30, 4, 1, 1, 1)"
)
with pytest.raises(ValueError, match=re.escape(msg)):
func(df1, ndim_5)
result2 = func(simple_frame, row)
tm.assert_numpy_array_equal(
result2.values, func(simple_frame.values, row.values)
)
result3 = func(float_frame, 0)
tm.assert_numpy_array_equal(result3.values, func(float_frame.values, 0))
msg = "Can only compare identically-labeled DataFrame"
with pytest.raises(ValueError, match=msg):
func(simple_frame, simple_frame[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_strings_to_numbers_comparisons_raises(self, compare_operators_no_eq_ne):
# GH 11565
df = DataFrame(
{x: {"x": "foo", "y": "bar", "z": "baz"} for x in ["a", "b", "c"]}
)
f = getattr(operator, compare_operators_no_eq_ne)
msg = "'[<>]=?' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
f(df, 0)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]["A"] = np.nan
with np.errstate(invalid="ignore"):
expected = missing_df.values < 0
with np.errstate(invalid="raise"):
result = (missing_df < 0).values
tm.assert_numpy_array_equal(result, expected)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
lst = [2, 2, 2]
tup = tuple(lst)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
tm.assert_frame_equal(result, expected)
result = df.values > b
tm.assert_numpy_array_equal(result, expected.values)
msg1d = "Unable to coerce to Series, length must be 2: given 3"
msg2d = "Unable to coerce to DataFrame, shape must be"
msg2db = "operands could not be broadcast together with shapes"
with pytest.raises(ValueError, match=msg1d):
# wrong shape
df > lst
with pytest.raises(ValueError, match=msg1d):
# wrong shape
result = df > tup
# broadcasts like ndarray (GH#23000)
result = df > b_r
tm.assert_frame_equal(result, expected)
result = df.values > b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df > b_c
with pytest.raises(ValueError, match=msg2db):
df.values > b_c
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
# broadcasts like ndarray (GH#23000)
result = df == b_r
tm.assert_frame_equal(result, expected)
result = df.values == b_r
tm.assert_numpy_array_equal(result, expected.values)
with pytest.raises(ValueError, match=msg2d):
df == b_c
assert df.values.shape != b_c.shape
# with alignment
df = DataFrame(
np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc")
)
expected.index = df.index
expected.columns = df.columns
with pytest.raises(ValueError, match=msg1d):
result = df == lst
with pytest.raises(ValueError, match=msg1d):
result = df == tup
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list("abcdefg")
X_orig = DataFrame(
np.arange(10 * len(columns)).reshape(-1, len(columns)),
columns=columns,
index=range(10),
)
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list("bedcf")
subs = list("bcdef")
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result1, result3)
tm.assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1, s)
assert s is s2
assert s._mgr is s2._mgr
df = df_orig.copy()
df2 = df
df += 1
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1, df)
assert df is df2
assert df._mgr is df2._mgr
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
tm.assert_series_equal(s, s2)
tm.assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
tm.assert_frame_equal(df, df2)
tm.assert_frame_equal(df_orig + 1.5, df)
assert df is df2
assert df._mgr is df2._mgr
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({"A": arr.copy(), "B": "foo"})
df = df_orig.copy()
df2 = df
df["A"] += 1
expected = DataFrame({"A": arr.copy() + 1, "B": "foo"})
| tm.assert_frame_equal(df, expected) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
# # Coding Exercises (Part 1)
# ## Full Data Workflow A-Z: Merging, Joining, Concatenating
# ### Exercise 12: Merging, joining, aligning and concatenating Data
# Now, you will have the opportunity to analyze your own dataset. <br>
# __Follow the instructions__ and insert your code! You are either requested to
# - Complete the Code and __Fill in the gaps__. Gaps are marked with "__---__" and are __placeholders__ for your code fragment.
# - Write Code completely __on your own__
# In some exercises, you will find questions that can only be answered, if your code is correct and returns the right output! The correct answer is provided below your coding cell. There you can check whether your code is correct.
# If you need a hint, check the __Hints Section__ at the end of this Notebook. Exercises and Hints are numerated accordingly.
# If you need some further help or if you want to check your code, you can also check the __solutions notebook__.
# ### Have Fun!
# --------------------------------------------------------------------------------------------------------------
# ## Option 1: Self_guided
# ### Concatenating DataFrames vertically
# __Import__ the cars dataset (with cars from usa and europe) from the csv-file __cars_clean.csv__. <br>
# Also __import__ the csv-file __cars_jap.csv__ (with cars from japan) and __concatenate__ both DataFrames __vertically__! <br>
# __Save__ the __concatenated DataFrame__ in the variable __cars_all__! <br>
# Finally, __sort__ cars_all by the model_year from __low to high__!
# ### Left Join
# __Import__ the csv-files __summer.csv__ (as summer) and __dictionary.csv__ (as dic) which contains the __full country name__ for the olympic country codes as well as __population__ and __gdp__ statistics for some countries.<br>
#
# __"Copy and paste"__ the __full country name__, __population__ and __gdp__ from the dic DataFrame __into the summer DataFrame__ with a __Left Join__!<br>
# __Save__ the new merged DataFrame in the variable __summer_new__!<br>
#
# __Inspect__ summer_new and determine the __olympic country codes__ for which the dic DataFrame does __not provide__ any information!
# ### Arithmetic operations between DataFrames / Alignment
# __Import__ the csv-files __ath_2008.csv__ and __ath_2012.csv__ with all medals winners in the Sport __Athletics__ in the Editions __2008__ and __2012__.
# For __all Athletes__ in the two DataFrames, __aggregate/add__ the total number of __Gold__, __Silver__ and __Bronze__ Medals over both editions! __Save__ the aggregated DataFrame in the variable __add__. (Hint: add should contain an index with the Athlete names and three columns, Gold, Silver, Bronze)
# __Sort__ add by Gold, Silver, Bronze from __high to low__! Change datatype to __integer__, if necessary! The first Athlete in your DataFrame should be ... no surprise ... Usain Bolt with 6 Gold and 0 Silver and Bronze Medals.
# -------------------------------------
# ## Option 2: Guided and Instructed
# # STOP HERE, IF YOU WANT TO DO THE EXERCISE ON YOUR OWN!
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# In[ ]:
#run the cell
import pandas as pd
# ### Concatenating DataFrames vertically
# In[ ]:
#run the cell
cars = pd.read_csv("cars_clean.csv")
# __Inspect__ the __cars__ DataFrame!
# In[ ]:
#run the cell
cars.head()
# In[ ]:
#run the cell
cars.tail()
# In[ ]:
#run the cell
cars.info()
# __Inspect__ the cars_jap DataFrame!
# In[ ]:
#run the cell
cars_jap = pd.read_csv("cars_jap.csv")
# In[ ]:
#run the cell
cars_jap.head()
# Before we can concatenate both DataFrames, we need to __align__ them!<br>
# 108. __Insert__ the column __origin__ to __cars_jap__ at the most appropriate position! __Fill in the gaps!__
# In[ ]:
cars_jap.insert(7, "origin", "japan")
# Also the column labels should match. <br>
# 109. __Overwrite__ the column labels in __cars_jap__ and use the same column labels that we have in cars!
# In[ ]:
cars_jap.columns = cars.columns
# __Inspect__!
# In[ ]:
#run the cell
cars_jap.head()
# 110. __Concatenate__ both DataFrames __vertically__ and create a __new RangeIndex__! __Save__ the new DataFrame in the variable __cars_all__!
# In[ ]:
cars_all = pd.concat([cars, cars_jap], ignore_index= True)
# __Inspect__!
# In[ ]:
#run the cell
cars_all.head()
# In[ ]:
#run the cell!
cars_all.tail()
# 111. __Sort cars_call__ by the __model_year__ from __low to high__! Create a __new RangeIndex__ (drop the old)! __Fill in the gaps__!
# In[ ]:
cars_all = cars_all.sort_values("model_year").reset_index(drop = True)
# __Inspect__!
# In[ ]:
#run the cell
cars_all.head()
# In[ ]:
#run the cell
cars_all.tail()
# In[ ]:
#run the cell
cars_all.info()
# ----------------------------------------------------------------------
# ### Left Join
# In[ ]:
# run the cell!
summer = pd.read_csv("summer.csv")
# __Inspect__ the __summer__ DataFrame!
# In[ ]:
# run the cell!
summer.head()
# In[ ]:
# run the cell!
dic = pd.read_csv("dictionary.csv")
# __Inspect__ dict!
# In[ ]:
# run the cell!
dic.head()
# __dic__ contains the Olympic Games __Country Codes__ ("Code") with the corresponding __full country names__ ("Country") as well as recent __Population__ and __GDP__ statistics.<br>
# 112. __Create__ the columns __Country__, __Population__ and __GDP per Capita__ in the __summer__ DataFrame by using a __Left Join__ with __pd.merge()__. <br>
# __Save__ the merged Dataframe in the variable __summer_new__! __Fill in the gaps__!
# In[ ]:
summer_new = pd.merge(summer, dic, how = "left", left_on= "Country", right_on = "Code")
# __Inspect__ summer_new!
# In[ ]:
# run the cell!
summer_new.head()
# In[ ]:
# run the cell!
summer_new.info()
# Apparently, __dic__ does __not contain__ additional information for __all olympic country codes__ that are in the __summer__ Dataframe.
# 113. __Filter__ summer_new for the elements in the column __Country_x__, where the __corresponding value__ in the column __Code__ is __missing__! <br>
# __Count__ the frequency! __Fill in the gaps__!
# In[ ]:
summer_new.loc[summer_new.Code.isnull(), "Country_x"].value_counts()
# For these country codes, we need to find __other sources__ for additional information on the __full country name__, __population__ and __gdp__ (most of these countries do not exist any more.) -> BONUS EXERCISE ;-)
# --------------------------
# ### Arithmetic operations between DataFrames / Alignment
# In[ ]:
#run the cell
ath_2008 = | pd.read_csv("ath_2008.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import streamlit as st
import streamlit.components.v1 as components
import matplotlib.pyplot as plt
import kayak
from PIL import Image
import numpy as np
filename_airport = './assets/airports.csv'
filename_aircraft = './assets/aircraft.csv'
output = './assets/output.xlsx'
blank = Image.open('./assets/blank.jpeg')
greenest = Image.open('./assets/planet-earth.png')
cheapest = Image.open('./assets/decrease.png')
shortest = Image.open('./assets/chronometer.png')
bg = Image.open('./assets/background.jpg')
plane = Image.open('./assets/plane.png')
import pandas as pd
df_airport = pd.read_csv(filename_airport)
df_airport.head()
df_aircraft = pd.read_csv(filename_aircraft)
df_aircraft.head()
departure_airport_code = "LAX"
arrival_airport_code = "SFO"
aircraft='Airbus A320'
num_of_pax = 1
st.title('ZeroCarbonFly')
st.subheader('ZeroCarbonFly is a supporting tool for sustainable travel. Our website guides you to a green flight and visualizes your effort on Zero Carbon action.')
st.info('Climate change has become a crucial issue in contemporary society. The US has pledged to achieve carbon neutrality by 2050, with a 2030 emissions target to be announced shortly. To meet the 2015 Paris Agreement, global greenhouse gas emissions need to be cut by 25– 50% over the next decade. According to the U.S. Greenhouse Gas Emissions and Sinks report by EPA, the primary source of greenhouse gas emissions in the United States is Transportation, which composed 29 percent of 2019 greenhouse gas emissions. Among all the travel patterns, air travel is the fastest-growing source of carbon emissions and emits the largest greenhouse gas. ')
st.image(bg)
airport_code = df_airport['iata_code'].tolist()
airport_code = [x for x in airport_code if pd.isnull(x) == False]
class_list = ['Economy', 'Business', 'Premium', 'First']
st.sidebar.title('Find flights:')
departure_airport_code = st.sidebar.selectbox('Departure Airport', airport_code)
arrival_airport_code = st.sidebar.selectbox('Arrival Airport', airport_code)
date = st.sidebar.date_input('Flight Date')
class_type = st.sidebar.selectbox('Class Type', class_list)
num_of_pax = st.sidebar.slider('Number of Passengers', min_value=1, max_value=10)
carry_on_bag_number = st.sidebar.selectbox('Carry-on Bags', [0,1])
checked_bag_number = st.sidebar.selectbox('Checked Bags', [0,1,2])
date = | pd.to_datetime(date) | pandas.to_datetime |
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, keep_default_na=False, header=None)
class CTSlices(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00206/slice_localization_data.zip'
download_unzip(url, dataset_path)
file_name = 'slice_localization_data.csv'
file_path = os.path.join(dataset_path, file_name)
df = pd.read_csv(file_path)
# No patient should be in both train and test set
df_train_valid = deepcopy(df.loc[df.patientId < 80, :]) # Pandas complains if it is a view
df_test = deepcopy(df.loc[df.patientId >= 80, :]) # - " -
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'patientId')
y_columns = ['reference']
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res = df_res.drop(columns='patientId')
self.x, self.y = xy_split(df_res, y_columns)
class ForecastingOrders(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Daily+Demand+Forecasting+Orders).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/Daily_Demand_Forecasting_Orders.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ForecastingStoreData(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Demand+Forecasting+for+a+store).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class FacebookComments(RegressionDataset):
"""
Predict the number of likes on posts from a collection of Facebook pages.
Every page has multiple posts, making the number of pages less than the samples
in the dataset (each sample is one post).
# Note
The provided test split has a relatively large discrepancy in terms
of distributions of the features and targets. Training and validation splits are
also made to ensure that the same page is not in both splits. This makes the distributions
of features in training and validation splits vary to a relatively large extent, possible
because the number of pages are not that many, while the features are many.
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Facebook+Comment+Volume+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip'
download_unzip(url, dataset_path)
dataset_path = os.path.join(dataset_path, 'Dataset')
# The 5th variant has the most data
train_path = os.path.join(dataset_path, 'Training', 'Features_Variant_5.csv')
test_path = os.path.join(dataset_path, 'Testing', 'Features_TestSet.csv')
df_train_valid = pd.read_csv(train_path, header=None)
df_test = pd.read_csv(test_path, header=None)
y_columns = df_train_valid.columns[-1:]
# Page ID is not included, but can be derived. Page IDs can not be
# in both training and validation sets
page_columns = list(range(29))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class Facebookmetrics (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00368/Facebook_metrics.zip'
download_unzip(url, dataset_path)
filename = 'dataset_Facebook.csv'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';')
class ForestFires(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Forest+Fires).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'forestfires.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class GNFUV(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00452/GNFUV USV Dataset.zip'
download_unzip(url, dataset_path)
dfs = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
dfs.append(pd.read_csv(file_path, header=None))
class GNFUV_2(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data+Set+2).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00466/CNFUV_Datasets.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None))
class Greenhouse_Gas_Observing_Network (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Greenhouse+Gas+Observing+Network).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00328/ghg_data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None, sep='\s+'))
class Hungarian_Chickenpox_Cases (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Hungarian+Chickenpox+Cases).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00580/hungary_chickenpox.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, index_col='Date', parse_dates=True))
class IIWA14_R820_Gazebo_Dataset_10Trajectories(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/IIWA14-R820-Gazebo-Dataset-10Trajectories).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00574/IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, header=None)
class Metro_Interstate_Traffic_Volume(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Metro+Interstate+Traffic+Volume).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Metro_Interstate_Traffic_Volume.csv.gz'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00492/Metro_Interstate_Traffic_Volume.csv.gz'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_News_Final(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'News_Final.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/News_Final.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class Online_Video_Characteristics_and_Transcoding_Time(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Online+Video+Characteristics+and+Transcoding+Time+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00335/online_video_dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == 'README.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class OnlineNews(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00332/OnlineNewsPopularity.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'OnlineNewsPopularity', 'OnlineNewsPopularity.csv')
df = pd.read_csv(file_path, )
df.drop(columns=['url', ' timedelta'], inplace=True)
y_columns = [' shares']
df[y_columns[0]] = np.log(df[y_columns[0]])
self.x, self. y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Parkinson(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/parkinsons).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path: str = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/' \
'parkinsons/telemonitoring/parkinsons_updrs.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path)
y_columns = ['motor_UPDRS', 'total_UPDRS']
df_train_valid = df[df['subject#'] <= 30]
df_test = deepcopy(df[df['subject#'] > 30])
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'subject#')
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res.drop(columns='subject#', inplace=True)
self.x, self.y = xy_split(df_res, y_columns)
class Physicochemical_Properties_of_Protein_Tertiary_Structure(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Physicochemical+Properties+of+Protein+Tertiary+Structure).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CASP.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00265/CASP.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class PPG_DaLiA_Data_Set(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/PPG-DaLiA).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00495/data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class QSAR_aquatic_toxicity(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/QSAR+aquatic+toxicity).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'qsar_aquatic_toxicity.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00505/qsar_aquatic_toxicity.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', names=["TPSA(Tot)", "SAacc", "H-050", "MLOGP", "RDCHI", " GATS1p", "nN", "C-040", "quantitative response, LC50 [-LOG(mol/L)]"])
class QSAR_fish_bioconcentration_factor(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/QSAR+fish+bioconcentration+factor+%28BCF%29).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00511/QSAR_fish_BCF.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn =='ECFP_1024_m0-2_b2_c.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class QSAR(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/QSAR+fish+toxicity).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'qsar_fish_toxicity.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00504/qsar_fish_toxicity.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', names=[" CIC0", "SM1_Dz(Z)", " GATS1i", "NdsCH", " NdssC", "MLOGP", "quantitative response, LC50 [-LOG(mol/L)]"])
class PowerPlant(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00294/CCPP.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'CCPP', 'Folds5x2_pp.xlsx')
df = pd.read_excel(file_path)
y_columns = ['PE'] # Not clear if this is the aim of the dataset
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class ResidentialBuilding(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Real+estate+valuation+data+set).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Residential-Building-Data-Set.xlsx'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00437/Residential-Building-Data-Set.xlsx'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
y_columns = ['Y house price of unit area']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class RealEstate(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Real+estate+valuation+data+set).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Real estate valuation data set.xlsx'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00477/Real%20estate%20valuation%20data%20set.xlsx'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path, index_col='No')
class Real_time_Election_Results (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/QSAR+fish+bioconcentration+factor+%28BCF%29).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00513/ElectionData2019.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if '.csv' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class Seoul_Bike_Sharing_Demand(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Seoul+Bike+Sharing+Demand).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'SeoulBikeData.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00560/SeoulBikeData.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class Servo(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Servo).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'servo.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/servo/servo.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["motor", "screw", " pgain", "vgain", "class"])
class SGEMM_GPU_kernel_performance (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SGEMM+GPU+kernel+performance).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00440/sgemm_product_dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == 'Readme.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class Simulated_data_for_survival_modelling (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Simulated+data+for+survival+modelling).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00581/MLtoSurvival-Data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == '.gitkeep':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class SkillCraft1(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SkillCraft1+Master+Table+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'SkillCraft1_Dataset.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00272/SkillCraft1_Dataset.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class SML2010 (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/SML2010).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00274/NEW-DATA.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == '.gitkeep':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\s+'))
class Solar_Flare(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Solar+Flare).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'flare.data1'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/solar-flare/flare.data1'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df1 = pd.read_csv(file_path, header=None, skiprows=[0], sep='\s+')
filename = 'flare.data2'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/solar-flare/flare.data2'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df2 = | pd.read_csv(file_path, header=None, skiprows=[0], sep='\s+') | pandas.read_csv |
import pandas as pd
import numpy as np
import os
import csv
data_path='/Users/paulsharp/Documents/Dissertation_studies/data/QC_Applied'
output_path='/Users/paulsharp/Documents/Dissertation_studies/data'
self_report_path='/Users/paulsharp/Documents/Dissertation_studies/data'
os.chdir(self_report_path)
self_report_data=pd.read_csv('Self_report_full_data_all_timepoints.csv')
os.chdir(data_path)
subs_wave_2=[x for x in os.listdir(os.curdir) if x[17]=='2']
subs_wave_1=[x for x in os.listdir(os.curdir) if x[17]=='1']
sub_order_out=[['Subject_Order']]
os.chdir(output_path)
sub_order_df=pd.read_csv('Subject_Order_GFC_Feature_Matrix_amygdala_only.csv')
subjects=sub_order_df.Subject_Order
sub_order=sub_order_df.Subject_Order.tolist()
print(sub_order)
region_names=['dmpfc_left',
'dmpfc_right',
'vmpfc_left',
'vmpfc_right',
'vlpfc_left',
'vlpfc_right']
mast_csv_w1_Leftamyg=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_w1_Rightamyg=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_w2_Leftamyg=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_w2_Rightamyg=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_diff_left=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
mast_csv_diff_right=[['Subject','dmpfc_left1','dmpfc_left2','dmpfc_left3',
'dmpfc_right1','dmpfc_right2','vmpfc_left1','vmpfc_left2',
'vmpfc_right1','vmpfc_right2','vmpfc_right3','vmpfc_right4',
'vlpfc_left1','vlpfc_left2','vlpfc_left3','vlpfc_left4','vlpfc_left5',
'vlpfc_right1','vlpfc_right2','vlpfc_right3','vlpfc_right4',
'vlpfc_right5','vlpfc_right6','vlpfc_right7','vlpfc_right8']]
region_nums=[[96, 97, 104],[107, 116],[99, 102],[2, 110, 111, 112],
[82, 176, 177, 215, 240],[10, 123, 181, 184, 189, 209, 217, 241]]
os.chdir(data_path)
sub_count=0
for sub in sub_order:
sub_wave1=sub
print(sub_wave1)
current_sub=[sub]
sub_order_out.append(current_sub)
sub_wave2='NT2'+sub[-3:]
current_line1_left=[]
current_line1_left.append(sub_wave1)
current_line2_left=[]
current_line2_left.append(sub_wave2)
current_line1_right=[]
current_line1_right.append(sub_wave1)
current_line2_right=[]
current_line2_right.append(sub_wave2)
diff_left=[]
diff_left.append(sub_wave1)
diff_right=[]
diff_right.append(sub_wave1)
for region in region_nums:
for reg in region:
#Define amygdala connectomes
#wave2
wave1_gfc=pd.read_csv('GFC_connectome_{}_QCapplied.csv'.format(sub_wave1))
#determine which ROW each ROI in list region_num is in current dataframe
counter=0
for i in wave1_gfc.columns:
if i == '{}.0'.format(reg):
index_reg=counter
counter+=1
wave2_gfc=pd.read_csv('GFC_connectome_{}_QCapplied.csv'.format(sub_wave2))
amygdala_left_w2=wave2_gfc['243.0'][index_reg]
current_line2_left.append(amygdala_left_w2)
amygdala_right_w2=wave2_gfc['244.0'][index_reg]
current_line2_right.append(amygdala_right_w2)
#wave1
amygdala_left_w1=wave1_gfc['243.0'][index_reg]
current_line1_left.append(amygdala_left_w1)
amygdala_right_w1=wave1_gfc['244.0'][index_reg]
current_line1_right.append(amygdala_right_w2)
#Wave2 - Wave 1 (longitudinal)
diff_amygdalae_left=amygdala_left_w2-amygdala_left_w1
diff_left.append(diff_amygdalae_left)
diff_amygdalae_right=amygdala_right_w2-amygdala_left_w1
diff_right.append(diff_amygdalae_right)
mast_csv_w1_Leftamyg.append(current_line1_left)
mast_csv_w1_Rightamyg.append(current_line1_right)
mast_csv_w2_Leftamyg.append(current_line2_left)
mast_csv_w2_Rightamyg.append(current_line2_right)
mast_csv_diff_left.append(diff_left)
mast_csv_diff_right.append(diff_right)
os.chdir(output_path)
#run correlations between self-report data and ROIs
mast_csv_w1_Leftamyg= | pd.DataFrame(mast_csv_w1_Leftamyg[1:],columns=mast_csv_w1_Leftamyg[0]) | pandas.DataFrame |
# coding=utf-8
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "23/03/18"
import os
import json
import tarfile
import pandas as pd
import numpy as np
import copy
import multiprocessing
from ai4materials.utils.utils_mp import dispatch_jobs
from ai4materials.utils.utils_mp import collect_desc_folders
from ai4materials.utils.utils_data_retrieval import clean_folder
from ai4materials.utils.utils_data_retrieval import write_desc_info_file
from ai4materials.utils.utils_config import overwrite_configs
# from ai4materials.utils.utils_config import read_nomad_metainfo
from ai4materials.utils.utils_crystals import modify_crystal
#from ai4materials.models.l1_l0 import combine_features, l1_l0_minimization
#from ai4materials.models.sis import SIS
from ai4materials.utils.utils_data_retrieval import extract_files
from ai4materials.utils.utils_config import get_metadata_info
from ai4materials.utils.utils_data_retrieval import write_ase_db_file
from ai4materials.utils.utils_data_retrieval import write_target_values
from ai4materials.utils.utils_data_retrieval import write_summary_file
from ai4materials.utils.utils_mp import parallel_process
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import logging
logger = logging.getLogger('ai4materials')
logger.setLevel(logging.CRITICAL)
def calc_descriptor_in_memory(descriptor, configs, desc_file, ase_atoms_list, tmp_folder=None, desc_folder=None,
desc_info_file=None, target_list=None, operations_on_structure=None, nb_jobs=-1, **kwargs):
""" Calculates the descriptor for a list of atomic structures.
Starting from a list of ASE structures, calculates for each file the descriptor
specified by ``descriptor``, and stores the results in the compressed archive
desc_file in the directory `desc_folder`.
Parameters:
descriptor: :py:mod:`ai4materials.descriptors.base_descriptor.Descriptor` object
Descriptor to calculate.
configs: dict
Contains configuration information such as folders for input and output (e.g. desc_folder, tmp_folder),
logging level, and metadata location. See also :py:mod:`ai4materials.utils.utils_config.set_configs`.
ase_atoms_list: list of ``ase.Atoms`` objects
Atomic structures.
desc_file: string
Name of the compressed archive where the file containing the descriptors are written.
desc_folder: string, optional (default=`None`)
Folder where the desc_file is written. If not specified, the desc_folder in read from
``configs['io']['desc_folder']``.
tmp_folder: string, optional (default=`None`)
Folder where the desc_file is written. If not specified, the desc_folder in read from
``configs['io']['tmp_folder']``.
desc_info_file: string, optional (default=`None`)
File where information about the descriptor are written to disk.
target_list: list, optional (default=`None`)
List of target values. These values are saved to disk when the descriptor is calculated, and they can loaded
for subsequent analysis.
operations_on_structure: list of objects
List of operations to be applied to the atomic structures before calculating the descriptor.
nb_jobs: int, optional (default=-1)
Number of processors to use in the calculation of the descriptor.
If set to -1, all available processors will be used.
.. codeauthor:: <NAME> <<EMAIL>>
"""
if desc_info_file is None:
desc_info_file = os.path.abspath(os.path.normpath(os.path.join(desc_folder, 'desc_info.json.info')))
desc_file = os.path.abspath(os.path.normpath(os.path.join(desc_folder, desc_file)))
# make the log file empty (do not erase it because otherwise
# we have problems with permission on the Docker image)
outfile_path = os.path.join(tmp_folder, 'output.log')
open(outfile_path, 'w+')
# remove control file from a previous run
old_control_files = [f for f in os.listdir(tmp_folder) if f.endswith('control.json')]
for old_control_file in old_control_files:
file_path = os.path.join(desc_folder, old_control_file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
logger.error(e)
tar = tarfile.open(desc_file, 'w:gz')
if nb_jobs == -1:
nb_jobs = min(len(ase_atoms_list), multiprocessing.cpu_count())
# overwrite configs (priority is given to the folders defined in the function)
# if desc_folder and tmp_folder are None, then configs are not overwritten
configs = overwrite_configs(configs=configs, desc_folder=desc_folder, tmp_folder=tmp_folder)
# define desc_folder and tmp_folder for convenience
desc_folder = configs['io']['desc_folder']
tmp_folder = configs['io']['tmp_folder']
with ProcessPoolExecutor(max_workers=nb_jobs) as executor:
ase_atoms_list_with_op_nested = executor.map(worker_apply_operations,
((ase_atoms, operations_on_structure) for ase_atoms in
ase_atoms_list))
ase_atoms_list_with_op = [item for sublist in ase_atoms_list_with_op_nested for item in sublist]
# check if all elements in the ase list have labels (needed for traceability later)
label_present = [True if 'label' in ase_atoms.info.keys() else False for ase_atoms in ase_atoms_list_with_op]
if not np.all(label_present):
logger.info("Some structures in the list do not have labels. Adding or substituting labels.")
logger.info("Default labels given by the order in the list (1st structure: label=struct-1)")
logger.info("To avoid this add a label to each ASE structure using ase_atoms.info['label']='your_label'")
# substitute and add default labels
for idx, ase_atoms in enumerate(ase_atoms_list_with_op):
ase_atoms.info['label'] = str('struct-' + str(idx))
logger.info('Using {} processors'.format(nb_jobs))
# load descriptor metadata
desc_metainfo = get_metadata_info()
allowed_descriptors = desc_metainfo['descriptors']
# add target to structures in the list
if target_list is not None:
for idx_atoms, ase_atoms in enumerate(ase_atoms_list_with_op):
ase_atoms.info['target'] = target_list[idx_atoms]
if descriptor.name in allowed_descriptors:
logger.info("Calculating descriptor: {0}".format(descriptor.name))
worker_calc_descriptor = partial(calc_descriptor_one_structure, descriptor=descriptor,
allowed_descriptors=allowed_descriptors, configs=configs, idx_slice=0,
desc_file=desc_file, desc_folder=desc_folder, desc_info_file=desc_info_file,
tmp_folder=tmp_folder, target_list=target_list, **kwargs)
ase_atoms_results = parallel_process(ase_atoms_list_with_op, worker_calc_descriptor, nb_jobs=nb_jobs)
else:
raise ValueError("Please provided a valid descriptor. Valid descriptors are {}".format(allowed_descriptors))
logger.info("Calculation done.")
logger.info('Writing descriptor information to file.')
for idx_atoms, ase_atoms in enumerate(ase_atoms_results):
descriptor.write(ase_atoms, tar=tar, op_id=0)
write_ase_db_file(ase_atoms, configs, tar=tar, op_nb=0)
# we assume that the target value does not change with the application of the operations
write_target_values(ase_atoms, configs, op_nb=0, tar=tar)
# write descriptor info to file for future reference
write_desc_info_file(descriptor, desc_info_file, tar, ase_atoms_results)
tar.close()
desc_file_master = write_summary_file(descriptor, desc_file, tmp_folder,
desc_file_master=desc_file + '.tar.gz', clean_tmp=False)
clean_folder(tmp_folder)
clean_folder(desc_folder,
endings_to_delete=(".png", ".npy", "_target.json", "_aims.in", "_info.pkl", "_coord.in",
"_ase_atoms.json"))
logger.info('Descriptor file: {}'.format(desc_file_master))
return desc_file_master
def calc_descriptor_one_structure(ase_atoms, descriptor, **kwargs):
return descriptor.calculate(ase_atoms, **kwargs)
def _calc_descriptor(ase_atoms_list, descriptor, configs, idx_slice=0, desc_file=None, desc_folder=None,
desc_info_file=None, tmp_folder=None, target_list=None, cell_type=None, **kwargs):
if desc_file is None:
desc_file = 'descriptor.tar.gz'
if desc_info_file is None:
desc_info_file = os.path.abspath(os.path.normpath(os.path.join(desc_folder, 'desc_info.json.info')))
if target_list is None:
target_list = [None] * len(ase_atoms_list)
desc_file = os.path.abspath(os.path.normpath(os.path.join(desc_folder, desc_file)))
# make the log file empty (do not erase it because otherwise
# we have problems with permission on the Docker image)
outfile_path = os.path.join(tmp_folder, 'output.log')
open(outfile_path, 'w+')
# remove control file from a previous run
old_control_files = [f for f in os.listdir(tmp_folder) if f.endswith('control.json')]
for old_control_file in old_control_files:
file_path = os.path.join(desc_folder, old_control_file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
logger.error(e)
tar = tarfile.open(desc_file, 'w:gz')
# load descriptor metadata
desc_metainfo = get_metadata_info()
allowed_descriptors = desc_metainfo['descriptors']
logger.info("Calculating descriptor: {0}".format(descriptor.name))
logger.debug("Using {0} cell".format(cell_type))
ase_atoms_result = []
for idx_atoms, ase_atoms in enumerate(ase_atoms_list):
if idx_atoms % (int(len(ase_atoms_list) / 10) + 1) == 0:
logger.info("Calculating descriptor (process # {0}): {1}/{2}".format(idx_slice, idx_atoms + 1,
len(ase_atoms_list)))
# add target list to structure
ase_atoms.info['target'] = target_list[idx_atoms]
if descriptor.name in allowed_descriptors:
structure_result = descriptor.calculate(ase_atoms, **kwargs)
descriptor.write(ase_atoms, tar=tar, op_id=0)
write_ase_db_file(ase_atoms, configs, tar=tar, op_nb=0)
# we assume that the target value does not change with the application of the operations
write_target_values(ase_atoms, configs, op_nb=0, tar=tar)
ase_atoms_result.append(structure_result)
else:
raise ValueError("Please provided a valid descriptor. Valid descriptors are {}".format(allowed_descriptors))
logger.debug('Writing descriptor information to file.')
# write descriptor info to file for future reference
descriptor.write_desc_info(desc_info_file, ase_atoms_result)
tar.close()
# open the Archive and write summary file
# here we substitute the full path with the basename to be put in the tar archive
# TO DO: do it before, when the files are added to the tar
write_summary_file(descriptor, desc_file, tmp_folder)
logger.info('Descriptor calculation (process #{0}): done.'.format(idx_slice))
filelist = []
for file_ in filelist:
file_path = os.path.join(desc_folder, file_)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
logger.error(e)
return desc_file
def load_descriptor(desc_files, configs):
"""Load a descriptor file"""
if not isinstance(desc_files, list):
desc_files = [desc_files]
target_list = []
structure_list = []
for idx, desc_file in enumerate(desc_files):
if idx % (int(len(desc_files) / 10) + 1) == 0:
logger.info("Extracting file {}/{}: {}".format(idx + 1, len(desc_files), desc_file))
target = extract_files(desc_file, file_type='target_files', tmp_folder=configs['io']['tmp_folder'])
structure = extract_files(desc_file, file_type='ase_db_files', tmp_folder=configs['io']['tmp_folder'])
target_list.extend(target)
structure_list.extend(structure)
# This is removed otherwise the files created by the Viewer will be erased
# clean_folder(configs['io']['tmp_folder'], delete_all=True)
return target_list, structure_list
def calc_model(method, tmp_folder, results_folder, combine_features_with_ops=True, cols_to_drop=None, df_features=None,
target=None, energy_unit=None, length_unit=None, allowed_operations=None, derived_features=None,
max_dim=None, sis_control=None, control_file=None, lookup_file=None):
""" Calculates model.
"""
if control_file is None:
control_file = os.path.abspath(os.path.normpath(os.path.join(tmp_folder, 'control.json')))
if max_dim is None:
max_dim = 3
if method == 'l1_l0' or method == 'SIS':
raise NotImplementedError("Not supported currently.")
# # if there are nan, drop entire row
# if df.isnull().values.any():
# #df.dropna(axis=0, how='any', inplace=True)
# #df.reset_index(inplace=True)
# logger.info('Dropping samples for which the selected features are not available.')
if method == 'l1_l0':
if cols_to_drop is not None:
df_not_features = df_features[cols_to_drop]
df_features.drop(cols_to_drop, axis=1, inplace=True)
else:
df_not_features = None
# convert numerical columns in float
for col in df_features.columns.tolist():
df_features[str(col)] = df_features[str(col)].astype(float)
# make dict with metadata name: shortname
features = df_features.columns.tolist()
features = [feature.split('(', 1)[0] for feature in features]
shortname = []
metadata_info = read_nomad_metainfo()
for feature in features:
try:
shortname.append(metadata_info[str(feature)]['shortname'])
except KeyError:
shortname.append(feature)
features_shortnames = dict(zip(features, shortname))
if method == 'l1_l0':
# combine features
if combine_features_with_ops:
df_combined = combine_features(df=df_features, energy_unit=energy_unit, length_unit=length_unit,
metadata_info=metadata_info, allowed_operations=allowed_operations,
derived_features=derived_features)
else:
df_combined = df_features
feature_list = df_combined.columns.tolist()
# replace metadata info name with shortname
# using the {'metadata_name': 'metadata_shortname'}
for fullname, shortname in features_shortnames.items():
feature_list = [item.replace(fullname.lower(), shortname) for item in feature_list]
# it is a list of panda dataframe:
# 1st el: 1D, 2nd: 2d, 3rd 3D
try:
# feature selection using l1-l0
df_desc, y_pred, target = l1_l0_minimization(target, df_combined.values, feature_list,
energy_unit=energy_unit, max_dim=max_dim, print_lasso=True,
lassonumber=25, lambda_grid_points=100)
except ValueError as err:
logger.error("Please select a different set of features and/or operations. ")
logger.error("Hint: try to remove Z_val, r_sigma, r_pi and/or the x+y / |x+y| operation.")
logger.error("and/or use [energy]=eV and [length]=angstrom.")
raise Exception("{}".format(err))
# write results to file(s)
if not os.path.exists(results_folder):
os.makedirs(results_folder)
for idx_dim in range(max_dim):
# define paths for file writing
path_to_csv = os.path.join(results_folder, str(method) + '_dim' + str(idx_dim) + '.csv')
path_to_csv_viewer = os.path.join(results_folder,
str(method) + '_dim' + str(idx_dim) + '_for_viewer.csv')
df_dim = df_desc[idx_dim]
# add y_pred y_true (i.e. target) to dataframe
y_pred_true = [('y_pred', y_pred[idx_dim]), ('y_true', target)]
df_true_pred = pd.DataFrame.from_items(y_pred_true)
df_result = pd.concat([df_dim, df_true_pred], axis=1, join='inner')
# extract only the coordinates for the viewer and rename them
coord_cols = range(idx_dim + 1)
df_result_viewer = df_dim[df_dim.columns[coord_cols]]
df_result_viewer.columns = ['coord_' + str(i) for i in coord_cols]
df_result_viewer = | pd.concat([df_result_viewer, df_true_pred], axis=1, join='inner') | pandas.concat |
import os
from flask import jsonify, request
from server import app
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from aif360.sklearn.metrics import disparate_impact_ratio, base_rate, consistency_score
def bias_table(Y, prot_attr=None, instance_type=None):
groups = Y.index.unique(prot_attr)
with np.errstate(divide='ignore', invalid='ignore'):
pct = [Y.xs(g, level=prot_attr).shape[0]/Y.shape[0] for g in groups]
data = [[np.divide(1, disparate_impact_ratio(Y[stage].dropna() == outcome, prot_attr=prot_attr, priv_group=g))
for stage in Y.columns for outcome in Y[stage].unique() if not pd.isna(outcome)]
for g in groups]
pct_name = 'proportion at first stage' if instance_type is None else f'proportion of {instance_type}'
num_stages = len(data[0])
col = pd.MultiIndex.from_tuples([(pct_name, '')]
+ list(zip(['disparate impact']*num_stages, [f'{stage} -> {outcome}' for stage in Y.columns for outcome in Y[stage].unique() if not pd.isna(outcome)])))
table = pd.DataFrame(np.c_[pct, data], columns=col, index=groups).sort_index()
table = filter_bias(table)
def colorize(v):
if v < 0.8:
return 'color: red'
elif v > 1.25:
return 'color: blue'
return ''
return table.style.format('{:.3f}').format({(pct_name, ''): '{:.1%}'}
).bar(subset=pct_name, align='left', vmin=0, vmax=1, color='#5fba7d'
).applymap(colorize, subset='disparate impact')
def consistency_table(X, Y):
data = [consistency_score(X.loc[Y[stage].notna()], Y[stage].dropna() == outcome)
for stage in Y.columns for outcome in Y[stage].unique() if not | pd.isna(outcome) | pandas.isna |
"""
Client entrypoint.
"""
import argparse
import logging
import os
import time
from pprint import pprint
import pandas as pd
import requests
# Local storage definitions
DATA_DIR = "wk_items"
CSV_SLUG = "{levels}_{items}.csv"
# Request headers
HTTP_HEADERS = {
"Wanikani-Revision": "20170710", # can change?
"Authorization": "Bearer {api_token}", # User API token is loaded here
"content-type": "application/json" # optional
}
# Endpoints
API_MAIN = "https://api.wanikani.com/v2/"
API_RESETS = API_MAIN + "resets"
API_REVIEWS = API_MAIN + "reviews"
API_STUDY_MATERIALS = API_MAIN + "study_materials"
API_SUBJECTS = API_MAIN + "subjects"
if __name__ == "__main__":
# Set up command-line args
parser = argparse.ArgumentParser(description='Python client to WaniKani API.')
parser.add_argument("api_token", type=str, help="User API token.")
parser.add_argument("--levels", type=str, default="", help="Comma-separated (no spaces!) list of WaniKani levels.")
parser.add_argument(
"--items",
type=str,
default="",
help="Comma-separated (no spaces!) list of WaniKani item types (kanji, vocabulary, radical)."
)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
# Set up local storage
os.makedirs(DATA_DIR, exist_ok=True)
filepath: str = os.path.join(
DATA_DIR,
CSV_SLUG.format(
levels=args.levels if args.levels else "all",
items=args.items if args.items else "all"
)
)
if os.path.isfile(filepath):
logging.warning(f"CSV datafile already exists. Will overwrite: {filepath}")
# Load API token into headers
HTTP_HEADERS["Authorization"] = HTTP_HEADERS["Authorization"].format(api_token=args.api_token)
# Assemble query
url = API_SUBJECTS
if args.levels or args.items:
url += "?"
if args.items:
url += f"types={args.items}"
if "types" in url and args.levels:
url += "&"
if args.levels:
url += f"levels={args.levels}"
logging.info(f"Full WaniKani URL: {url}")
# Execute requests
r = requests.get(url, headers=HTTP_HEADERS)
time.sleep(0.5)
r_json: dict = r.json()
# Extract main "data" field from JSON response
if "data" not in r_json:
logging.critical(f"Failed to get items for URL: {url}")
logging.critical(r.text)
exit(1)
data: list = r_json["data"]
# pprint(data[:2])
# Transform data to expected format
filtered_data = []
for el in data: # type: dict
filtered_data.append(
{
"WK Level": el["data"]["level"],
"Spelling": el["data"]["characters"], # kanji
"Reading": el["data"]["readings"][0]["reading"], # kana (one of the readings)
"Meaning": el["data"]["meanings"][0]["meaning"] # one of the meanings
# TODO: Multiple meanings needed at all...?
# **{f"Reading {i}": reading["reading"] for i, reading in enumerate(el["data"]["readings"])},
# **{f"Meaning {i}": meaning["meaning"] for i, meaning in enumerate(el["data"]["meanings"])}
}
)
pprint(filtered_data[:5])
# Convert to pandas.DataFrame
df = | pd.DataFrame(filtered_data) | pandas.DataFrame |
"""Convenience methods for common tasks, such as retrieiving
subsets of events, converting timestamps, getting a term, etc.
To use:
`import utils`
"""
import re
import csv
import copy
import logging
import datetime
from urllib import parse
import numpy as np
import pandas as pd
logging.basicConfig(filename='sensordata-utils.log', filemode='w', level=logging.WARN)
def get_term(timestamp: float) -> str:
"""Returns a term id based on a timestamp in seconds. If the provided
timestamp is in milliseconds this method will truncate the timestamp to seconds.
"""
inmillis = len(str(abs(timestamp))) >= 13
if inmillis:
timestamp = int(timestamp / 1000)
eventtime = datetime.datetime.fromtimestamp(timestamp)
year = eventtime.year
month = eventtime.month
if month >= 8:
return 'fall%d' % year
if month >= 7: # TODO: Deal with summer terms?
return 'summer-1-%d' % year
if month > 5:
return 'summer-2-%d' % year
if month >= 1:
return 'spring%d' % year
return None
#: The typical fieldnames included in sensordata events.
DEFAULT_FIELDNAMES = [
'email',
'CASSIGNMENTNAME',
'time',
'Class-Name',
'Unit-Type',
'Unit-Name',
'From-Unit-Name',
'To-Unit-Name',
'Type',
'Subtype',
'Subsubtype',
'onTestCase',
'Current-Statements',
'Current-Methods',
'Current-Size',
'Current-Test-Assertions',
'ConsoleOutput'
]
def raw_to_csv(inpath: str, outpath: str, fieldnames=None) -> None:
"""
Given a file of newline separated URLs, writes the URL query params as
rows in CSV format to the specified output file.
If your URLs are DevEventTracker posted events, then you probably want
the :attr:`DEFAULT_FIELDNAMES`. These fieldnames can be imported and
modified as needed.
"""
with open(inpath, 'r') as infile, open(outpath, 'w') as outfile:
if not fieldnames:
fieldnames = DEFAULT_FIELDNAMES
writer = csv.DictWriter(outfile, delimiter=',', fieldnames=fieldnames)
writer.writeheader()
for line in infile:
event = processline(line, fieldnames)
if event is not None:
if isinstance(event, list):
for item in event:
writer.writerow(item)
else:
writer.writerow(event)
def processline(url, fieldnames=None, filtertype=None):
"""
Given a URL, returns a dict object containing the key-value
pairs from its query params. Filters for a specific Type if specified.
Args:
fieldnames (list, default=None): The list of fieldnames to capture. If `None`,
uses `DEFAULT_FIELDNAMES`.
filtertype (bool): Only return a dict if the query param for Type == filtertype
Returns:
(dict) containing the key-value pairs from the the url's query params.
"""
if not fieldnames:
fieldnames = DEFAULT_FIELDNAMES
if 'http' in url:
url = url.split(':', 1)[-1]
items = parse.parse_qs(parse.urlparse(url).query)
kvpairs = {}
for key, value in items.items():
if _shouldwritekey(key, fieldnames):
kvpairs[key] = value[0].rstrip('\n\r')
elif key.startswith('name'): # some items are in the form name0=somekey, value0=somevalue
k = value[0] # e.g., "name0=k"
num = re.search(r'(\d+)$', key).group(0)
val = items.get('value{}'.format(num), [''])[0] # e.g., "value0=v", "value0="
if _shouldwritekey(k, fieldnames):
kvpairs[k] = val.rstrip('\n\r')
time = int(float(kvpairs.get('time', 0))) # time is not guaranteed to be present
kvpairs['time'] = time if time != 0 else ''
if filtertype and kvpairs['Type'] != filtertype:
return None
if kvpairs.get('Class-Name', '').endswith('Test') and \
kvpairs.get('Current-Test-Assertions', 0) != 0:
kvpairs['onTestCase'] = 1
return kvpairs
def split_termination_events(df):
"""Typically, Termination events contain results of several test methods being run at
once. This method takes a DataFrame containing such Termination events and returns it
with each one split into its own event (with the same timestamp).
"""
flattened = [
event
for sublist in df.apply(__split_one_termination, axis=1)
for event in sublist
]
return pd.DataFrame(flattened)
def __split_one_termination(t):
try:
t = t.to_dict()
if t['Type'] != 'Termination' or t['Subtype'] != 'Test':
return [t]
try:
tests = t['Unit-Name'].strip('|').split('|')
outcomes = t['Subsubtype'].strip('|').split('|')
expandedevents = []
for test, outcome in zip(tests, outcomes):
newevent = copy.deepcopy(t)
newevent['Unit-Name'] = test
newevent['Subsubtype'] = outcome
newevent['Unit-Type'] = 'Method'
expandedevents.append(newevent)
except AttributeError:
return [t]
except KeyError:
logging.error('Missing some required keys to split termination event. Need \
Type, Subtype, and Subsubtype. Doing nothing.')
return [t]
return expandedevents
def test_outcomes(te):
"""Parse outcomes for the specified test termination."""
outcomes = te['Subsubtype'].strip('|').split('|')
failures = outcomes.count('Failure')
successes = outcomes.count('Success')
errors = len(outcomes) - (failures + successes)
return pd.Series({
'successes': successes,
'failures': failures,
'errors': errors
})
def _shouldwritekey(key, fieldnames):
if not fieldnames:
return True
if key in fieldnames:
return True
return False
def maptouuids(sensordata=None, sdpath=None, uuids=None, uuidpath=None, crnfilter=None,
crncol='crn', usercol='email', assignmentcol='CASSIGNMENTNAME', due_dates=None):
"""Map sensordata to users and assignments based on studentProjectUuids.
Args:
sensordata (pd.DataFrame): A DataFrame containing sensordata
sdpath (str): Path to raw sensordata (CSV). Either this or `sensordata`
must be provided.
uuids (pd.DataFrame): A DataFrame containined uuids
uuidpath (str): Path to UUID file. The file is expected to contain columns
['studentProjectUuid', {crncol}, {usercol}, {assignmentcol}]
at least. Either this or uuids must be provided.
crnfilter (str): A CRN to filter UUIDs on
crncol (str): Name of the column containing course CRNs
assignmentcol (str): Name of the column containing assignment names. Defaults to
'CASSIGNMENTNAME'. This will get renamed to 'assignment'.
usercol (str): Name of the column containing user information. This will get
renamed to userName, and domains will be removed from emails.
due_dates (list): A list of `pd.Timestamp` values indicating due dates of assignments.
Use these timestamps as a heuristic identifier of which assignment
the events are being generated for. If omitted, the resulting
dataframe will have no **assignment** column.
Returns:
A `pd.DataFrame` containing the result of a left join on sensordata and uuids.
"""
# check required params
if sensordata is None and sdpath is None:
raise ValueError('Either sensordata or sdpath must be provided. Got None for both.')
if uuids is None and uuidpath is None:
raise ValueError('Either uuids or uuidpath must be provided. Got None for both.')
# read sensordata
if sensordata is None:
sensordata = pd.read_csv(sdpath, low_memory=False)
# read uuids
cols = ['userUuid', 'studentProjectUuid', assignmentcol, usercol]
if crnfilter:
cols.append(crncol)
if uuids is None:
uuids = pd.read_csv(uuidpath, usecols=cols)
uuids = (
uuids.rename(columns={usercol: 'userName', assignmentcol: 'assignment'})
.sort_values(by=['userName', 'assignment'], ascending=[1, 1])
)
umap = lambda u: u.split('@')[0] if str(u) != 'nan' and u != '' else u
uuids['userName'] = uuids['userName'].apply(umap)
# filter uuids by crn if provided
if crnfilter:
uuids = uuids[(uuids[crncol].notnull()) & (uuids[crncol].str.contains(crnfilter))]
uuids = uuids.drop(columns=[crncol])
# create user oracle
users = (
uuids.loc[:, ['userUuid', 'userName']]
.drop_duplicates(subset=['userUuid', 'userName'])
.set_index('userUuid')
)
# join
merged = sensordata.join(users, on='userUuid')
merged = merged.query('userName.notnull()')
del sensordata
# create assignment oracle
assignments = (
uuids.loc[:, ['studentProjectUuid', 'assignment']]
.drop_duplicates(subset=['studentProjectUuid', 'assignment'])
.set_index('studentProjectUuid')
)
conflicts = uuids.groupby('studentProjectUuid').apply(lambda g: len(g['assignment'].unique()) > 1)
conflicts = list(conflicts[conflicts].index)
with_conflicts = merged.loc[merged['studentProjectUuid'].isin(conflicts)]
without_conflicts = merged.loc[~merged['studentProjectUuid'].isin(conflicts)]
without_conflicts = without_conflicts.join(assignments, on='studentProjectUuid')
# for uuids with conflicting assignments, map based on timestamps
if due_dates:
with_conflicts.loc[:, 'assignment'] = (
with_conflicts.apply(__assignment_from_timestamp, due_dates=due_dates, axis=1)
)
merged = pd.concat([with_conflicts, without_conflicts], ignore_index=True, sort=False) \
.sort_values(by=['userName', 'assignment', 'time'])
return merged
def __assignment_from_timestamp(event, due_dates, offset=None):
offset = pd.Timedelta(1, 'w') if offset is None else offset
try:
t = | pd.to_datetime(event['time'], unit='ms') | pandas.to_datetime |
"""
Following functions are specific to the analysis of the data saved
with BELLA control system
"""
import pandas as pd
import matplotlib.pyplot as plt
import os
import glob
import re
from numpy import unravel_index
import numpy as np
from scipy import stats
import json
from functions.data_analysis import df_outlier2none
def get_data(dir_date, nscan=None, para=None, trim_std=None):
'''Get DataFrame
dir_date: directory of a date where scan data is stored (str)
nscan: list of scan number(int)
para_list: list of parameters(str).No need to write the full name.
'''
path = get_scan_path(dir_date, nscan)
df = get_data_from_path(path, para)
#parameters to consider getting rid of outliers...(don't consider scan)
para_vals = list(df.columns)
if 'scan' in para_vals:
para_vals.remove('scan')
if 'DateTime Timestamp' in para_vals:
para_vals.remove('DateTime Timestamp')
if 'Shotnumber' in para_vals:
para_vals.remove('Shotnumber')
#get rid of outliers
if trim_std:
df_new = df_outlier2none(df, std=trim_std, columns = para_vals )
return df
def get_files_list(dirpath,f_format):
"""
get get path of all files with f_format in the directory
dir_date: directory path
f_format: ex) txt
"""
return sorted(glob.glob(dirpath+'/*.'+f_format))
def get_notebook_name():
"""
Return the full path of the jupyter notebook.
"""
import ipykernel
import requests
from requests.compat import urljoin
from notebook.notebookapp import list_running_servers
kernel_id = re.search('kernel-(.*).json',
ipykernel.connect.get_connection_file()).group(1)
servers = list_running_servers()
for ss in servers:
response = requests.get(urljoin(ss['url'], 'api/sessions'),
params={'token': ss.get('token', '')})
for nn in json.loads(response.text):
if nn['kernel']['id'] == kernel_id:
relative_path = nn['notebook']['path']
return os.path.join(ss['notebook_dir'], relative_path)
def save_dataframe(df, name, ipynb = None):
'''save dataframe under data/"current ipython name"/'''
#get the file name of ipynb
if ipynb == None:
ipynb_fullpath = get_notebook_name()
ipynb = os.path.splitext(os.path.basename(ipynb_fullpath))[0]
#Open the data folder if doesnt exist
if not os.path.exists('data_ipynb'):
os.makedirs('data_ipynb')
if not os.path.exists('data_ipynb/'+ipynb):
os.makedirs('data_ipynb/'+ipynb)
#Save data
df.to_pickle('data_ipynb/'+ipynb+'/'+name+'.pkl')
print(name+' saved')
return None
def load_dataframe(name, ipynb = None):
"""load dataframe which was saved using the function save_dataframe
name: correspons to the name of the daframe you sppecified with save_dataframe
ipynb: the ipynb name you are running. If None, it will be automatically aquired. (NOt working sometime).
"""
#get the file name of ipynb
if ipynb == None:
ipynb_fullpath = get_notebook_name()
ipynb = os.path.splitext(os.path.basename(ipynb_fullpath))[0]
load_path = 'data_ipynb/'+ipynb+'/'+name+'.pkl'
df = pd.read_pickle(load_path)
print(name+' loaded')
return df
def get_data_from_path(path_list, para_list = None):
'''Get DataFrame from the file.
path_list: a filename or list of multiple filenames. they will append all data sets.
para_list: list of parameters (column names) you want to select from dataframe
output: dataframe
'''
data_list = []
for i in range(len(path_list)):
data_i = pd.read_csv(path_list[i], sep='\t')
if para_list:
#get full name of the parameters
para_list_full = []
for j in para_list:
para_full = par_full(path_list[i], j)
if para_full:
para_list_full = para_list_full+[para_full]
#If you can get all parameters, append the data of the scan
if len(para_list_full) == len(para_list):
data_i = data_i[para_list_full]
data_list.append(data_i)
else:
print('Skip saving data from', os.path.basename(path_list[i]))
else:
#if there is no para_list, get all the parameters that are saved
data_list.append(data_i)
data = pd.concat(data_list, sort=False)
#rename column names to alias if exists
for col in data.columns:
if 'Alias:' in col:
alias = col.split('Alias:', 1)[1]
data = data.rename(columns={col:alias})
return data
def get_nscan_last(dir_date):
'''Get the last scan number which is already done'''
path = dir_date + '\\analysis'
if not os.path.isdir(path):
return 0
else:
# get last scan info file name
files = glob.glob(path + '\\s*info.txt')
file_last = os.path.basename(files[-1])
# regexp. find number in the file name
n_scans = int(re.findall(r"\d+", file_last)[0])
return n_scans
def get_scan_path(dir_date, nscan=None):
'''
Get a path of the scan file s**.txt in the analysis
nscan: List or int of scan number. if None, creat a list of all scan text paths
'''
#if nscan_list=None, make a list of all scan #s
if not nscan:
nscan_last = get_nscan_last(dir_date)
nscan_list = range(1, nscan_last+1)
elif isinstance(nscan, int):
nscan_list = [nscan]
else:
nscan_list = nscan
path_list = []
#make a list of all scan file paths
for i in nscan_list:
path = dir_date + '\\analysis\\s' + str(i) + '.txt'
path_list = path_list + [path]
return path_list
def par_full(file, par):
'''get a full name of the parameter'''
data = pd.read_csv(file, sep='\t')
indices = [k for k, s in enumerate(list(data)) if par in s]
if not indices or data.empty:
print(par, 'not found in', os.path.basename(file))
return None
elif len(indices) > 1:
for j in indices:
if list(data)[j]==par:
return list(data)[j]
raise NameError('Please Specify the Name. Several parameters match for ',par,list( list(data)[i] for i in indices ) )
return None
else:
return list(data)[indices[0]]
def show_time_xaxis():
'''X axis changed from timestamp to day time of california when you plot a graph with time stamp on x axis.
'''
from datetime import datetime
summer20_start = datetime.timestamp(datetime(2020,3,8,3,0))
summer20_end = datetime.timestamp(datetime(2020,11,1,2,0))
# get current axis
ax = plt.gca()
# get current xtick labels
xticks = ax.get_xticks()
if xticks[0] > summer20_start and xticks[0] <summer20_end:
xticks = xticks - 7*60*60
else:
xticks = xticks - 8*60*60
# convert all xtick labels to selected format from ms timestamp
ax.set_xticklabels([pd.to_datetime(tm, unit='s').strftime('%Y-%m-%d\n %H:%M:%S') for tm in xticks],
rotation=50)
return None
def get_calib(Dict, device, axis = None):
'''Get paramters from calibration dictionary
device: device name except axis (drop off the X or Y in the device name)'''
if device in Dict:
#get target position
try: target = Dict[device]['target'][axis]
except: target = 0
#get sign (positive or negative)
try: sign = Dict[device]['sign'][axis]
except: sign = 1
#get calibration
try: calib = Dict[device]['calib'][axis]
except:
try: calib = Dict[device]['calib']
except: calib = 1
#get unit
try: unit = ' ('+Dict[device]['unit']+')'
except: unit = ''
return target, sign, calib, unit
#if 2ndmomW0, get calibration data from centroid data
elif device.split()[1]=='2ndmomW0' or '2ndmom' or '2mdmom' or 'FWHM':
device_centroid = device.split()[0]+' centroid'
return get_calib(Dict, device_centroid)
else:
print('can not find a calibration for ', device)
return None, None, None, None
def PT_time(lvtimestamp):
'''
Conert the labview timestamp to pacific time.
lvtimestamp: labview timestamp (float). should be 10 digit (36...)
"'''
lv_dt = datetime.fromtimestamp(lvtimestamp) #labview time
utc_dt = lv_dt - relativedelta(years=66, days=1) #UTC time
#convert to Pacific time
ca_tz = timezone('America/Los_Angeles')
ca_date = utc_dt.astimezone(ca_tz)
return ca_date
def df_calib(df, Dict):
'''Return a new dataframe with calibrated values. Only those with calibration saved in Dict are stored in the
output dataframe.
df: DataFrame
Dict: calibration dictionary. It's in a separate file
'''
#parameters of the DataFrame
para = df.columns
#define a new dataframe
df_new = | pd.DataFrame() | pandas.DataFrame |
import os, sys
import click
from sklearn import metrics, inspection
import pandas as pd
import geopandas as gpd
import numpy as np
def init_out_dict():
"""Initiates the main model evaluatoin dictionary for a range of model metric scores.
The scores should match the scores used in the dictioary created in 'evaluation.evaluate_prediction()'.
Returns:
dict: empty dictionary with metrics as keys.
"""
scores = ['Accuracy', 'Precision', 'Recall', 'F1 score', 'Cohen-Kappa score', 'Brier loss score', 'ROC AUC score', 'AP score']
# initialize empty dictionary with one emtpy list per score
out_dict = {}
for score in scores:
out_dict[score] = list()
return out_dict
def evaluate_prediction(y_test, y_pred, y_prob, X_test, clf, config):
"""Computes a range of model evaluation metrics and appends the resulting scores to a dictionary.
This is done for each model execution separately.
Output will be stored to stderr if possible.
Args:
y_test (list): list containing test-sample conflict data.
y_pred (list): list containing predictions.
y_prob (array): array resulting probabilties of predictions.
X_test (array): array containing test-sample variable values.
clf (classifier): sklearn-classifier used in the simulation.
config (ConfigParser-object): object containing the parsed configuration-settings of the model.
Returns:
dict: dictionary with scores for each simulation
"""
if config.getboolean('general', 'verbose'):
click.echo("... Accuracy: {0:0.3f}".format(metrics.accuracy_score(y_test, y_pred)), err=True)
click.echo("... Precision: {0:0.3f}".format(metrics.precision_score(y_test, y_pred)), err=True)
click.echo("... Recall: {0:0.3f}".format(metrics.recall_score(y_test, y_pred)), err=True)
click.echo('... F1 score: {0:0.3f}'.format(metrics.f1_score(y_test, y_pred)), err=True)
click.echo('... Brier loss score: {0:0.3f}'.format(metrics.brier_score_loss(y_test, y_prob[:, 1])), err=True)
click.echo('... Cohen-Kappa score: {0:0.3f}'.format(metrics.cohen_kappa_score(y_test, y_pred)), err=True)
click.echo('... ROC AUC score {0:0.3f}'.format(metrics.roc_auc_score(y_test, y_prob[:, 1])), err=True)
click.echo('... AP score {0:0.3f}'.format(metrics.average_precision_score(y_test, y_prob[:, 1])), err=True)
# compute value per evaluation metric and assign to list
eval_dict = {'Accuracy': metrics.accuracy_score(y_test, y_pred),
'Precision': metrics.precision_score(y_test, y_pred),
'Recall': metrics.recall_score(y_test, y_pred),
'F1 score': metrics.f1_score(y_test, y_pred),
'Cohen-Kappa score': metrics.cohen_kappa_score(y_test, y_pred),
'Brier loss score': metrics.brier_score_loss(y_test, y_prob[:, 1]),
'ROC AUC score': metrics.roc_auc_score(y_test, y_prob[:, 1]),
'AP score': metrics.average_precision_score(y_test, y_prob[:, 1]),
}
return eval_dict
def fill_out_dict(out_dict, eval_dict):
"""Appends the computed metric score per run to the main output dictionary.
All metrics are initialized in init_out_dict().
Args:
out_dict (dict): main output dictionary.
eval_dict (dict): dictionary containing scores per simulation.
Returns:
dict: dictionary with collected scores for each simulation
"""
for key in out_dict:
out_dict[key].append(eval_dict[key])
return out_dict
def init_out_df():
"""Initiates and empty main output dataframe.
Returns:
dataframe: empty dataframe.
"""
return pd.DataFrame()
def fill_out_df(out_df, y_df):
"""Appends output dataframe of each simulation to main output dataframe.
Args:
out_df (dataframe): main output dataframe.
y_df (dataframe): output dataframe of each simulation.
Returns:
dataframe: main output dataframe containing results of all simulations.
"""
out_df = out_df.append(y_df, ignore_index=True)
return out_df
def polygon_model_accuracy(df, global_df, make_proj=False):
"""Determines a range of model accuracy values for each polygon.
Reduces dataframe with results from each simulation to values per unique polygon identifier.
Determines the total number of predictions made per polygon as well as fraction of correct predictions made for overall and conflict-only data.
Args:
df (dataframe): output dataframe containing results of all simulations.
global_df (dataframe): global look-up dataframe to associate unique identifier with geometry.
make_proj (bool, optional): whether or not this function is used to make a projection. If True, a couple of calculations are skipped as no observed data is available for projections. Defaults to 'False'.
Returns:
(geo-)dataframe: dataframe and geo-dataframe with data per polygon.
"""
#- create a dataframe containing the number of occurence per ID
ID_count = df.ID.value_counts().to_frame().rename(columns={'ID':'nr_predictions'})
#- add column containing the IDs
ID_count['ID'] = ID_count.index.values
#- set index with index named ID now
ID_count.set_index(ID_count.ID, inplace=True)
#- remove column ID
ID_count = ID_count.drop('ID', axis=1)
df_count = pd.DataFrame()
#- per polygon ID, compute sum of overall correct predictions and rename column name
if not make_proj: df_count['nr_correct_predictions'] = df.correct_pred.groupby(df.ID).sum()
#- per polygon ID, compute sum of all conflict data points and add to dataframe
if not make_proj: df_count['nr_observed_conflicts'] = df.y_test.groupby(df.ID).sum()
#- per polygon ID, compute sum of all conflict data points and add to dataframe
df_count['nr_predicted_conflicts'] = df.y_pred.groupby(df.ID).sum()
#- per polygon ID, compute average probability that conflict occurs
df_count['min_prob_1'] = | pd.to_numeric(df.y_prob_1) | pandas.to_numeric |
##############################################
## Author: <NAME> ##
## Date of update: 2018/05/15 ##
## Description: Data Mining Final Project ##
## - Data Preprocessing ##
## - Remove NaN ##
## - Add opponent label ##
## - Binary encode W/L and Home/Away ##
## - Pair teams and opponents ##
## - Check games' validity ##
## - Rename and concatenate df ##
##############################################
import numpy as np
import pandas as pd
import time
#-----------------------#
# Main Function #
#-----------------------#
# @param: None
# @return: None
def main():
startTime = time.time()
# Load .csv
season = pd.read_csv('./team_season_all.csv')
playoff = pd.read_csv('./team_playoff_all.csv')
# Merge seasona and playoff
df_all = | pd.concat([season, playoff], ignore_index=True) | pandas.concat |
from optparse import OptionParser
import os
import numpy as np
import pandas as pd
import get_site_features
import utils
np.set_printoptions(threshold=np.inf, linewidth=200)
pd.options.mode.chained_assignment = None
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--transcripts", dest="TRANSCRIPTS", help="transcript sequence information")
parser.add_option("--mir", dest="MIR", help="miRNA to get features for")
parser.add_option("--mirseqs", dest="MIR_SEQS", help="tsv with miRNAs and their sequences")
parser.add_option("--kds", dest="KDS", help="kd data in tsv format, this family must be included", default=None)
parser.add_option("--sa_bg", dest="SA_BG", help="SA background for 12mers")
parser.add_option("--rnaplfold_dir", dest="RNAPLFOLD_DIR", help="folder with RNAplfold info for transcripts")
parser.add_option("--pct_file", dest="PCT_FILE", default=None, help="file with PCT information")
parser.add_option("--kd_cutoff", dest="KD_CUTOFF", type=float, default=np.inf)
parser.add_option("--outfile", dest="OUTFILE", help="location to write outputs")
parser.add_option("--overlap_dist", dest="OVERLAP_DIST", help="minimum distance between neighboring sites", type=int)
parser.add_option("--upstream_limit", dest="UPSTREAM_LIMIT", help="how far upstream to look for 3p pairing", type=int)
parser.add_option("--only_canon", dest="ONLY_CANON", help="only use canonical sites", default=False, action='store_true')
(options, args) = parser.parse_args()
TRANSCRIPTS = pd.read_csv(options.TRANSCRIPTS, sep='\t', index_col='transcript')
mirseqs = | pd.read_csv(options.MIR_SEQS, sep='\t', index_col='mir') | pandas.read_csv |
from bs4 import BeautifulSoup
import chardet
from datetime import datetime
import json
import lxml
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from serpapi import GoogleSearch
import statistics
import re
import requests
import time
from a0001_admin import clean_dataframe
from a0001_admin import retrieve_path
from a0001_admin import write_paths
from a0001_admin import work_completed
from a0001_admin import work_to_do
from query_pubs import query_pubs
from find_lat_lon import findLatLong
def aggregate_info(dataset):
"""
Save a .csv
"""
# write paths
write_paths()
# acquire information
if 'nsf' in dataset: df = acquire_nsf(dataset)
elif 'nih' in dataset: df = acquire_nih(dataset)
elif 'clinical' in dataset:
df = acquire_clinical(dataset)
list_clinical_trials(dataset)
elif 'patent' in dataset: df = acquire_patent(dataset)
elif 'pub' in dataset: df = acquire_pub(dataset)
# format and co-register fields of datasets
df = coregister(dataset)
# geolocate
df = geolocate(dataset)
# summarize
df = summarize(dataset)
# list unique
df = list_unique(dataset)
def acquire_clinical(dataset):
"""
from downloaded clinical data, aggregate
"""
name = 'acquire_clinical'
if work_to_do(name):
work_completed(name, 0)
df = acquire_downloaded(dataset)
# remove out of status trials and resave over acquired file
status_drop = ['Withdrawn', 'Terminated', 'Suspended']
status_drop.append('Temporarily not available')
status_drop.append('Unknown status')
for status in status_drop:
df = df[(df['Status'] != status)]
df = clean_dataframe(df)
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df.to_csv(file_dst)
work_completed(name, 1)
else:
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
print('Clinical df = ')
print(df)
return(df)
def acquire_downloaded(dataset):
"""
aggregate all files downloaded and saved in user provided
"""
df = pd.DataFrame()
path_term = dataset + '_downloaded'
path_src = os.path.join(retrieve_path(path_term))
for file in os.listdir(path_src):
file_src = os.path.join(path_src, file)
print('file_src = ' + str(file_src))
try:
df_src = pd.read_csv(file_src)
except:
with open(file_src, 'rb') as file:
print(chardet.detect(file.read()))
encodings = ['ISO-8859-1', 'unicode_escape', 'utf-8']
for encoding in encodings:
df_src = pd.read_csv(file_src, encoding=encoding)
break
df = df.append(df_src)
df = df.drop_duplicates()
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
print('file_dst = ' + file_dst )
df.to_csv(file_dst)
return(df)
def acquire_nsf(dataset):
"""
aggregate all files in user provided into a single csv
"""
name = 'acquire_nsf'
if work_to_do(name):
work_completed(name, 0)
df = acquire_downloaded(dataset)
work_completed(name, 1)
else:
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
print('NSF df = ')
print(df)
return(df)
def acquire_nih(dataset):
"""
from downloaded nih data, aggregate
"""
name = 'acquire_nih'
if work_to_do(name):
work_completed(name, 0)
df = acquire_downloaded(dataset)
work_completed(name, 1)
else:
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
print('NIH df = ')
print(df)
return(df)
def acquire_patent():
"""
"""
df = pd.DataFrame()
return(df)
def acquire_pub(dataset):
"""
"""
df = pd.DataFrame()
query_pubs(dataset)
return(df)
def coregister(dataset):
"""
add reference value for year and value
"""
try:
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
df = clean_dataframe(df)
except:
df = pd.DataFrame()
return(df)
if 'nsf' in dataset: df = coregister_nsf(dataset, df)
if 'nih' in dataset: df = coregister_nih(dataset, df)
if 'clinical' in dataset: df = coregister_clinical(dataset, df)
else: return(df)
return(df)
def coregister_clinical(dataset, df):
"""
add year and value as enrollment
"""
print('df = ')
print(df)
name = 'coregister_clinical'
if work_to_do(name):
work_completed(name, 0)
years = []
for date in list(df['Start Date']):
print('date = ')
print(date)
try:
date = date.replace('"', '')
date_split = date.split(' ')
year = date_split[-1]
except:
year = 0
years.append(year)
values = []
for item in list(df['Enrollment']):
item = float(item)
values.append(item)
df['ref_year'] = years
df['ref_values'] = values
path_term = str(dataset + '_coregistered')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df.to_csv(file_dst)
work_completed(name, 1)
return(df)
def coregister_nih(dataset, df):
"""
"""
print('df = ')
print(df)
name = 'coregister_nih'
if work_to_do(name):
work_completed(name, 0)
years = []
for date in list(df['Fiscal Year']):
year = date
years.append(year)
values = []
for item in list(df['Direct Cost IC']):
item = float(item)
values.append(item)
df['ref_year'] = years
df['ref_values'] = values
path_term = str(dataset + '_coregistered')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df.to_csv(file_dst)
work_completed(name, 1)
return(df)
def coregister_nsf(dataset, df):
"""
"""
print('df = ')
print(df)
name = 'coregister_nsf'
if work_to_do(name):
work_completed(name, 0)
years = []
for date in list(df['StartDate']):
date_split = date.split('/')
year = date_split[-1]
years.append(year)
values = []
for item in list(df['AwardedAmountToDate']):
item = item.replace('$', '')
item = item.replace('"', '')
item = item.replace(',', '')
item = float(item)
values.append(item)
df['ref_year'] = years
df['ref_values'] = values
path_term = str(dataset + '_coregistered')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df.to_csv(file_dst)
work_completed(name, 1)
return(df)
def geolocate(dataset):
"""
"""
path_term = str(dataset + '_coregistered')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
df = clean_dataframe(df)
if 'nsf' in dataset:
name = 'geolocate_nsf'
if work_to_do(name):
work_completed(name, 0)
df = geolocate_nsf(dataset, df)
work_completed(name, 1)
elif 'nih' in dataset:
name = 'geolocate_nih'
if work_to_do(name):
work_completed(name, 0)
df = geolocate_nih(dataset, df)
work_completed(name, 1)
elif 'clinical' in dataset:
name = 'geolocate_clinical'
if work_to_do(name):
work_completed(name, 0)
df = geolocate_clinical(dataset, df)
work_completed(name, 1)
else:
df = pd.DataFrame()
return(df)
path_term = str(dataset + '_geolocated')
print('path_term = ' + str(path_term))
path_dst = os.path.join(retrieve_path(path_term))
print('path_dst = ' + str(path_dst))
file_dst = os.path.join(path_dst, dataset + '.csv')
print('file_dst = ' + str(file_dst))
df = clean_dataframe(df)
df.to_csv(file_dst)
return(df)
def geolocate_clinical(dataset, df):
"""
look up lat and lon for nsf award address
"""
#print('df.columns = ')
#print(df.columns)
address_found, lat_found, lon_found = [], [], []
for i in range(len(list(df['Sponsor/Collaborators']))):
percent_complete = round(i/len(list(df['Sponsor/Collaborators']))*100,2)
left_i = len(list(df['Sponsor/Collaborators'])) - i
print('percent_complete = ' + str(percent_complete) + ' i = ' + str(i) + ' left: ' + str(left_i))
name = df.loc[i, 'Sponsor/Collaborators']
name = name.replace('"', '')
names = name.split('|')
location = df.loc[i, 'Locations']
location = name.replace('"', '')
if '|' in location:
locations = location.split('|')
else:
locations = [location]
addresses = []
for name in names: addresses.append(name)
for location in locations: addresses.append(location)
print('addresses = ')
print(addresses)
address, lat, lon = findLatLong(addresses)
address_found.append(address)
lat_found.append(lat)
lon_found.append(lon)
df['address_found'] = address_found
df['lat_found'] = lat_found
df['lon_found'] = lon_found
return(df)
def geolocate_nih(dataset, df):
"""
look up lat and lon for nsf award address
"""
"""
for i in range(len(list(df['Organization City']))):
name = df.loc[i, 'Organization Name']
name = name.replace('"', '')
city = df.loc[i, 'Organization City']
state = df.loc[i, 'Organization State']
country = df.loc[i, 'Organization Country']
zip = df.loc[i, 'Organization Zip']
addresses = []
addresses.append(name + ' , ' + city + ' , ' country)
addresses.append(name + ' , ' + city + ' , ' + state + ' , ' + zip)
addresses.append(city + ' , ' + state + ' , ' country)
address_found, lat_found, lon_found = [], [], []
for address in addresses:
lat, lon = findLatLong(address)
if lat != None:
address_found.append(address)
lat_found.append(lat)
lon_found.append(lon)
"""
df['address_found'] = list(df['Organization Name'])
df['lat_found'] = list(df['Latitude'])
df['lon_found'] = list(df['Longitude'])
return(df)
def geolocate_nsf(dataset, df):
"""
look up lat and lon for nsf award address
"""
address_found, lat_found, lon_found = [], [], []
for i in range(len(list(df['OrganizationStreet']))):
progress = round(i/len(list(df['OrganizationStreet']))*100,2)
left = len(list(df['OrganizationStreet'])) - i
print('Progress: ' + str(progress) + ' % ' + str(left) + ' left')
name = str(df.loc[i, 'Organization'])
name = name.replace('.', '')
name = name.replace('"', '')
name = name.replace('/', '')
street = str(df.loc[i, 'OrganizationStreet'])
city = str(df.loc[i, 'OrganizationCity'])
state = str(df.loc[i, 'OrganizationState'])
zip = str(df.loc[i, 'OrganizationZip'])
print('name = ' + name)
print('street = ' + street)
print('city = ' + city)
print('state = ' + state)
print('zip = ' + zip)
addresses = []
addresses.append(name)
addresses.append(street + ' , ' + city + ' , ' + state)
addresses.append(street + ' , ' + city + ' , ' + state + ' , ' + str(zip))
addresses.append(street + ' , ' + city + ' , ' + state)
addresses.append(city + ' , ' + state)
addresses.append(zip)
#print('addresses = ')
#print(addresses)
address, lat, lon = findLatLong(addresses)
address_found.append(address)
lat_found.append(lat)
lon_found.append(lon)
df['address_found'] = address_found
df['lat_found'] = lat_found
df['lon_found'] = lon_found
return(df)
def list_clinical_trials(dataset):
"""
"""
path_term = str(dataset + '_src_query')
path_dst = os.path.join(retrieve_path(path_term))
file_dst = os.path.join(path_dst, dataset + '.csv')
df = pd.read_csv(file_dst)
df = clean_dataframe(df)
print('df.columns = ')
print(df.columns)
organizations = []
target_col_names = ['Sponsor/Collaborators', 'Locations']
for col_name in target_col_names:
for i in range(len(df[col_name])):
org = df.loc[i, col_name]
org = str(org)
if '"' in org: org = org.replace('"', '')
#print('org = ' + str(org))
if '|' in org:
orgs = org.split('|')
else:
orgs = [org]
for org in orgs:
if org not in organizations:
organizations.append(org)
counts = []
urls = []
df_found_all = pd.DataFrame()
for org in organizations:
org_urls = []
df_count = pd.DataFrame()
for col_name in target_col_names:
for item in list(df[col_name]):
try:
item = float(item)
#print('item = ' + str(item))
continue
except:
item = item
#print('item = ' + str(item))
if str(org) not in str(item): continue
df_temp = df[(df[col_name] == item)]
url_temps = list(df_temp['URL'])
for url in url_temps:
if url not in org_urls:
#print('url = ')
#print(url)
org_urls.append(url)
#df_count_single = webscrape_clinical(url)
df_count = df_count.append(webscrape_clinical(url))
df_found = pd.DataFrame()
for col in df_count.columns:
df_found[col] = [sum(list(df_count[col]))]
print('df_found = ')
print(df_found)
df_found_all = df_found_all.append(df_found)
df_found_all = df_found_all.reset_index()
del df_found_all['index']
#df_found_all = clean_dataframe(df_found_all)
print('df_found_all = ')
print(df_found_all)
counts.append(len(org_urls))
str_org_urls=" , ".join(str(elem) for elem in org_urls)
#print('org = ' + str(org))
#assert len(str_org_urls) > 0
extra_commas = 50 - len(org_urls)
for i in range(extra_commas):
str_org_urls = str_org_urls + ' , '
urls.append(str_org_urls)
df_result = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import requests
import xmltodict
import json
import time
import math
def gwas_import_aid():
"""
Transform downloaded csv file into pandas dataframe for further analysis.
:return: a dataframe containing all important information related.
"""
# read in file
df = | pd.read_csv("./data/gwas_catalog_v1.0.1-associations_e91_r2018-02-13.tsv",sep='\t',low_memory=False) | pandas.read_csv |
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
def get_avg_original_accuracy(raw_folder):
original_files = list( filter(lambda x: x.startswith('permutation_result_'), os.listdir(raw_folder)))
result = pd.DataFrame(columns=['contrast', 'class', 'Model', 'Avg original accuracy'])
for file in original_files:
df = pd.read_csv(raw_folder+file)
res = pd.DataFrame({"Avg original accuracy":
df.groupby(['contrast','class','Model'])['original_accuracy'].mean()}).reset_index()
result = | pd.concat([res,result], ignore_index=True) | pandas.concat |
from flask import Flask, request, jsonify, g, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
import plotly.graph_objects as go
from datetime import datetime
from datetime import timedelta
import glob
import requests
from app import db
from app.models import *
from app.plots import bp
import pandas as pd
import io
from app.api import vis
from sqlalchemy import sql
import numpy as np
from app.tools.curvefit.core.model import CurveModel
from app.tools.curvefit.core.functions import gaussian_cdf, gaussian_pdf
PHU = {'the_district_of_algoma':'The District of Algoma Health Unit',
'brant_county':'Brant County Health Unit',
'durham_regional':'Durham Regional Health Unit',
'grey_bruce':'Grey Bruce Health Unit',
'haldimand_norfolk':'Haldimand-Norfolk Health Unit',
'haliburton_kawartha_pine_ridge_district':'Haliburton, Kawartha, Pine Ridge District Health Unit',
'halton_regional':'Halton Regional Health Unit',
'city_of_hamilton':'City of Hamilton Health Unit',
'hastings_and_prince_edward_counties':'Hastings and Prince Edward Counties Health Unit',
'huron_county':'Huron County Health Unit',
'chatham_kent':'Chatham-Kent Health Unit',
'kingston_frontenac_and_lennox_and_addington':'Kingston, Frontenac, and Lennox and Addington Health Unit',
'lambton':'Lambton Health Unit',
'leeds_grenville_and_lanark_district':'Leeds, Grenville and Lanark District Health Unit',
'middlesex_london':'Middlesex-London Health Unit',
'niagara_regional_area':'Niagara Regional Area Health Unit',
'north_bay_parry_sound_district':'North Bay Parry Sound District Health Unit',
'northwestern':'Northwestern Health Unit',
'city_of_ottawa':'City of Ottawa Health Unit',
'peel_regional':'Peel Regional Health Unit',
'perth_district':'Perth District Health Unit',
'peterborough_county_city':'Peterborough County–City Health Unit',
'porcupine':'Porcupine Health Unit',
'renfrew_county_and_district':'Renfrew County and District Health Unit',
'the_eastern_ontario':'The Eastern Ontario Health Unit',
'simcoe_muskoka_district':'Simcoe Muskoka District Health Unit',
'sudbury_and_district':'Sudbury and District Health Unit',
'thunder_bay_district':'Thunder Bay District Health Unit',
'timiskaming':'Timiskaming Health Unit',
'waterloo':'Waterloo Health Unit',
'wellington_dufferin_guelph':'Wellington-Dufferin-Guelph Health Unit',
'windsor_essex_county':'Windsor-Essex County Health Unit',
'york_regional':'York Regional Health Unit',
'southwestern':'Southwestern Public Health Unit',
'city_of_toronto':'City of Toronto Health Unit',
'huron_perth_county':'Huron Perth Public Health Unit'}
def get_dir(data, today=datetime.today().strftime('%Y-%m-%d')):
source_dir = 'data/' + data['classification'] + '/' + data['stage'] + '/'
load_dir = source_dir + data['source_name'] + '/' + data['table_name']
file_name = data['table_name'] + '_' + today + '.' + data['type']
file_path = load_dir + '/' + file_name
return load_dir, file_path
def get_file(data):
load_dir, file_path = get_dir(data)
files = glob.glob(load_dir + "/*." + data['type'])
files = [file.split('_')[-1] for file in files]
files = [file.split('.csv')[0] for file in files]
dates = [datetime.strptime(file, '%Y-%m-%d') for file in files]
max_date = max(dates).strftime('%Y-%m-%d')
load_dir, file_path = get_dir(data, max_date)
return file_path
## Tests
def new_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New tests'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New tests'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New tests'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New tests'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New tests'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Tests<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="new tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Total tested'].tail(1).values[0],
number = {'font': {'size': 60}},
))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Total tested'],line=dict(color='#5E5AA1',dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['Total tested'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Total tested'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Tested<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def tested_positve_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New Positive pct'].notna()]
temp = df.loc[df['New Positive pct'] > 0]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New Positive pct'].tail(1).values[0]*100,
number = {'font': {'size': 60}}
))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New Positive pct'],line=dict(color='#FFF', dash='dot'),visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New Positive pct'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New Positive pct'].iloc[-2]*100,
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text': f"Percent Positivity<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tested positive").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def under_investigation_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Under Investigation'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Under Investigation'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['Under Investigation'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Under Investigation'].iloc[-2],
'increasing': {'color':'grey'},
'decreasing': {'color':'grey'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Under Investigation<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="under investigation").first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Hospital
def in_hospital_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Hospitalized'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Hospitalized'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Hospitalized'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Hospitalized'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In Hospital<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="in hospital", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def in_icu_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['ICU'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['ICU'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['ICU'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In ICU<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="in icu", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
temp = df.loc[df['confirmed_positive'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['confirmed_positive'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'title' : {"text": f"COVID-19 Patients In ICU<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>"},
'mode' : "number+delta+gauge",
'delta' : {'reference': df['confirmed_positive'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':"",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="in icu", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def on_ventilator_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Ventilator'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Ventilator'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Ventilator'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Ventilator'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Ventilator'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':False},
title={'text':f"COVID-19 Patients On Ventilator<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="on ventilator", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
temp = df.loc[df['confirmed_positive_ventilator'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['confirmed_positive_ventilator'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive_ventilator'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive_ventilator'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['confirmed_positive_ventilator'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients On Ventilator<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="on ventilator", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Cases
def new_cases_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New positives'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d",'reference': df['New positives'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['New positives'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['New positives'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h")
else:
df = vis.get_phus()
df = df.loc[df.region == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="new cases", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['value'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d",'reference': df['value'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['value'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['value'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="new cases",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def active_cases_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
df = df.loc[df['Active'].notna()]
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Active'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['Active'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['Active'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['Active'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Active Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h")
div = fig.to_json()
p = Viz.query.filter_by(header="active cases",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_cases_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
import re
import os
import json
import math
import nltk
import uuid
import pickle
import numpy as np
# import textdistance
import pandas as pd
from collections import Counter
nltk.data.path.append('app/lib/models/wordnet')
from nltk.tokenize import RegexpTokenizer, word_tokenize
from nltk.stem import WordNetLemmatizer,PorterStemmer
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
from nltk.corpus import genesis
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.stem import SnowballStemmer
from nltk.stem.lancaster import LancasterStemmer
from sklearn.metrics import roc_auc_score
from sklearn.feature_extraction.text import CountVectorizer
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
genesis_ic = wn.ic(genesis, False, 0.0)
# Load data from pickle files
cEXT = pickle.load( open( "app/lib/models/cEXT.p", "rb"))
cNEU = pickle.load( open( "app/lib/models/cNEU.p", "rb"))
cAGR = pickle.load( open( "app/lib/models/cAGR.p", "rb"))
cCON = pickle.load( open( "app/lib/models/cCON.p", "rb"))
cOPN = pickle.load( open( "app/lib/models/cOPN.p", "rb"))
vectorizer_31 = pickle.load( open( "app/lib/models/vectorizer_31.p", "rb"))
vectorizer_30 = pickle.load( open( "app/lib/models/vectorizer_30.p", "rb"))
def preprocess(sentence):
sentence=str(sentence)
sentence = sentence.lower()
sentence=sentence.replace('{html}',"")
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', sentence)
rem_url=re.sub(r'http\S+', '',cleantext)
rem_num = re.sub('[0-9]+', '', rem_url)
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(rem_num)
filtered_words = [w for w in tokens if len(w) > 2 if not w in stopwords.words('english')]
stem_words=[stemmer.stem(w) for w in filtered_words]
lemma_words=[lemmatizer.lemmatize(w) for w in stem_words]
return " ".join(filtered_words)
def apply_nlp(data):
data = data.fillna('')
dirty_text = data.iloc[:,[4,21,22,23,24,25]].copy()
dirty_text = dirty_text.applymap(lambda s:preprocess(s))
data['words']=dirty_text.sum(axis=1).astype(str)
return data
def padding(data):
fellow = data.loc[data['Role:'] == "Fellow"]
coach = data.loc[data['Role:'] == "Coach"]
num_fellow = len(fellow)
num_coach = len(coach)
diff = math.floor(num_fellow/num_coach)
rem = num_fellow%num_coach
c_diff = math.floor(num_coach/num_fellow)
c_rem = num_coach%num_fellow
if(num_fellow > num_coach):
coach = pd.concat([coach]*diff, ignore_index=True)
if(rem>=1):
last = coach.iloc[:rem]
coach = coach.append([last], ignore_index=True)
data = pd.concat([coach, fellow], ignore_index= "true")
data['UID'] = ''
uid = []
for i in range(len(data['UID'])):
x=uuid.uuid4()
uid.append(x)
data['UID']= | pd.DataFrame(uid, columns=['UID']) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['b', 'x'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
with pytest.raises(TypeError):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['b', 'a', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['x', 'b'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['b', 'a', 'a', 'c'],
categories=['b', 'a', 'c'], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_union_categorical_unwrap(self):
# GH 14173
c1 = Categorical(['a', 'b'])
c2 = pd.Series(['b', 'c'], dtype='category')
result = union_categoricals([c1, c2])
expected = Categorical(['a', 'b', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c2 = | CategoricalIndex(c2) | pandas.CategoricalIndex |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), | Series(vals2) | pandas.Series |
import math
import numpy as np
import pandas as pd
import statsmodels.api as sm
def simplify_pops(pops, params):
"""Simplify population"""
# Inserting the 0 on the new list of ages
list_new_age_groups = [0] + params['LIST_NEW_AGE_GROUPS']
pops_ = {}
for gender, pop in pops.items():
# excluding the first column (region ID)
pop_edit = pop.iloc[:, pop.columns != 'code']
# Transform the columns ID in integer values (to make easy to select the intervals and sum the pop. by region)
list_of_ages = [int(x) for x in [int(t) for t in list(pop_edit.columns)]]
# create the first aggregated age class
temp = pop_edit.iloc[:, [int(t) <= list_new_age_groups[1] for t in list_of_ages]].sum(axis=1)
# add the first column with the region ID
pop_fmt = pd.concat([pop.iloc[:, pop.columns == 'code'], temp], axis=1)
# excluding the processed columns in the previous age aggregation
pop_edit = pop_edit.iloc[:, [int(i) > list_new_age_groups[1] for i in list_of_ages]]
for i in range(1, len(list_new_age_groups) - 1):
# creating the full new renaming ages list
list_of_ages = [int(x) for x in [int(t) for t in list(pop_edit.columns)]]
# selecting the new aggregated age class based on superior limit from list_new_age_groups, SUM by ROW
temp = pop_edit.iloc[:, [int(t) <= list_new_age_groups[i + 1] for t in list_of_ages]].sum(axis=1)
# joining to the previous processed age class
pop_fmt = pd.concat([pop_fmt, temp], axis=1)
# excluding the processed columns in the previous age aggregation
pop_edit = pop_edit.iloc[:, [int(age) > list_new_age_groups[i + 1] for age in list_of_ages]]
# changing the columns names
pop_fmt.columns = ['code'] + list_new_age_groups[1:len(list_new_age_groups)]
pops_[gender] = pop_fmt
return pops_
def format_pops(pops):
"""Rename the columns names to be compatible as the pop simplification modification"""
for pop in pops.values():
list_of_columns = ['code'] + [int(x) for x in list(pop.columns)[1: len(list(pop.columns))]]
pop.columns = list_of_columns
return pops
def pop_age_data(pop, code, age, percent_pop):
"""Select and return the proportion value of population
for a given municipality, gender and age"""
n_pop = pop[pop['code'] == str(code)][age].iloc[0] * percent_pop
rounded = int(round(n_pop))
# for small `percent_pop`, sometimes we get 0
# when it's better to have at least 1 agent
if rounded == 0 and math.ceil(n_pop) == 1:
return 1
return rounded
def load_pops(mun_codes, params, year):
"""Load populations for specified municipal codes."""
pops = {}
for name, gender in [('men', 'male'), ('women', 'female')]:
pop = | pd.read_csv(f'input/pop_{name}_{year}.csv', sep=';') | pandas.read_csv |
from collections import defaultdict, Sized
import numpy as np
import pandas as pd
from pandas._libs.lib import fast_zip
from pandas._libs.parsers import union_categoricals
from pandas.core.dtypes.common import is_numeric_dtype
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph._traversal import connected_components
def get_sequence_length(obj):
if isinstance(obj, str) or not isinstance(obj, Sized):
return -1
elif isinstance(obj, Sized) and all(not isinstance(i, Sized) and pd.isnull(i) for i in obj):
return -2
else:
return len(obj)
def flatten(frame,
index_name=None,
as_index=False,
keep_na=False,
columns=None,
tile_index=False):
"""
Flatten the input before the transformation
Parameters
----------
frame: pandas.DataFrame
index_name: str
Name of the index to append to indentify each item uniquely
keep_na: bool or str
Should non-sequences elements (or sequences full of None) be kept in the dataframe
as an empty row (value given is None and new index value is None also)
columns: tuple of str
Flatten only sequence in these columns if not None
Returns
-------
(pandas.DataFrame, pandas.DataFrame, callable)
flattened input:
Flattened input to transform
length:
Lengths of the sequences. We will actually only want to know if it was a sequence
or not (see get_sequence_length(...)), during either unflattening if regroup is
True or during rationale backpropagation
sequence_constructor:
Returns the "type" of the sequences contained in the frame, or more specifically
the function used to build an instance of these sequences. Will be used during
unflattening if self.regroup is True and during rationale backpropagation
"""
if isinstance(as_index, bool):
as_column = not as_index
elif isinstance(as_index, str) and index_name is None:
index_name = as_index
as_column = False
else:
raise Exception("as_index must be str or bool, and if str, index_name must be None")
if isinstance(frame, pd.Series):
res = flatten(pd.DataFrame({"X": frame}), index_name, as_column, keep_na, columns, tile_index)
new_frame = res["X"]
new_frame.name = frame.name
return new_frame
if keep_na is True:
keep_na = 'null_index'
elif keep_na is False:
keep_na = 'remove'
assert keep_na in ('null_index', 'as_single_item', 'remove')
assert isinstance(frame, pd.DataFrame), "Can only flatten DataFrame"
if columns is None:
columns = frame.columns
elif not isinstance(columns, (tuple, list)):
columns = [columns]
else:
columns = list(columns)
lengths = frame[columns].applymap(lambda seq: get_sequence_length(seq))
for col in frame.columns:
if col not in columns:
lengths[col] = -1
result_lengths = lengths.max(axis=1)
# Each column element will be expanded on multiple rows,
# even if it is a non-iterable object
# We must know before how many rows will the expansion take
# and we take this length from the maximum sequence size
if keep_na == 'remove':
bool_row_selector = result_lengths > 0
result_lengths = result_lengths[bool_row_selector]
selected_lengths = lengths[bool_row_selector]
frame = frame[bool_row_selector]
nulls = None
else:
nulls = result_lengths < 0
# Non sequence or sequence full of None will give rise to 1 row
result_lengths[nulls] = 1
selected_lengths = lengths
nulls = result_lengths.cumsum()[nulls] - 1
categoricals = {}
frame = frame.copy()
for col in frame.columns:
if hasattr(frame[col], 'cat'):
categoricals[col] = frame[col].cat.categories
frame[col] = frame[col].cat.codes
flattened = {col: [] for col in frame.columns}
for col_name, col in frame.iteritems():
for obj, res_length, length in zip(col.values, result_lengths, selected_lengths[col_name]):
if length >= 0: # we have a normal sequence
flattened[col_name].append(obj if isinstance(obj, pd.Series) else pd.Series(obj))
# Otherwise it a non sequence, create as many rows as needed for it
else:
# -2 means sequence full of None, we put a None instead here
if length == -2:
obj = None
if res_length == 1:
flattened[col_name].append(pd.Series([obj]))
else:
flattened[col_name].append(pd.Series([obj] * res_length))
index = frame.index.repeat(result_lengths) if index_name is not None else None
for col_name in flattened:
flattened[col_name] = pd.concat(flattened[col_name], ignore_index=True)
if index is not None:
flattened[col_name].index = index
flattened = pd.DataFrame(flattened)
# flattened = pd.DataFrame(
# data={col_name: pd.concat(flattened[col_name], ignore_index=True) for col_name in flattened},
# index=frame.index.repeat(result_lengths) if index_name is not None else None)
for name, categories in categoricals.items():
flattened[name] = pd.Categorical.from_codes(flattened[name], categories=categories)
# Adds an index under the name `self.index_name` to identify uniquely every row
# of the frame
if index_name is not None:
if index_name in flattened.columns:
flattened.set_index(index_name, append=True, inplace=True)
else:
if tile_index:
new_index_values = np.concatenate([np.arange(s) for s in result_lengths])
flattened[index_name] = new_index_values
else:
new_index_values = np.arange(len(flattened))
flattened[index_name] = new_index_values
flattened[index_name] = flattened[index_name]
flattened.set_index(index_name, append=True, inplace=True)
if keep_na == 'null_index' and nulls is not None:
new_labels = np.arange(len(flattened))
# noinspection PyUnresolvedReferences
new_labels[nulls.values] = -1
flattened.index.set_codes(
new_labels, level=index_name, inplace=True)
if as_column:
flattened.reset_index(index_name, inplace=True)
flattened.reset_index(inplace=True, drop=True)
# flattened.index = flattened.index.remove_unused_levels()
return flattened
def make_merged_names(left_span_names, right_span_names, left_on, right_on, left_columns, right_columns,
suffixes=('_x', '_y')):
right_columns = set(right_columns) - set(right_on)
left_columns = set(left_columns) - set(left_on)
left_merged = [name + (suffixes[0] if name in right_columns else '') for name in left_span_names]
right_merged = [name + (suffixes[1] if name in left_columns else '') for name in right_span_names]
return left_merged, right_merged
def make_merged_names_map(left_columns, right_columns, left_on, right_on, suffixes=('_x', '_y')):
right_columns = set(right_columns) - set(right_on)
left_columns = set(left_columns) - set(left_on)
left_merged = [name + (suffixes[0] if name in right_columns else '') for name in left_columns]
right_merged = [name + (suffixes[1] if name in left_columns else '') for name in right_columns]
return dict(zip(left_columns, left_merged)), dict(zip(right_columns, right_merged))
def merge_with_spans(
left, right=None,
how='inner',
on=None,
left_on=None,
right_on=None,
suffixes=('_x', '_y'),
span_policy='partial_strict',
placeholder_columns=(),
**kwargs):
"""
Just like pandas.merge, but handles the merging of spans
Any tuple in the "on" column will be considered a (begin, end) span
How to merge those span
Parameters
----------
left: pd.DataFrame
right: pd.DataFrame
how: str
"inner", "outer", "left", "right"
on: list of (str or tuple of str)
left_on: list of (str or tuple of str)
right_on: list of (str or tuple of str)
suffixes: list of str
span_policy: str
How to merge spans ?
One of: "partial", "exact", "partial_strict"
placeholder_columns:
Zero will be put as a value instead of nan for any empty cell in those columns after the merge
kwargs: any
Any kwargs for the pd.merge function
Returns
-------
pd.DataFrame
"""
if right is None:
right = left
left = left.copy()
right = right.copy()
if isinstance(on, str):
on = [on]
if left_on is None:
left_on = on
if right_on is None:
right_on = on
left_columns = left.columns if hasattr(left, 'columns') else [left.name]
right_columns = right.columns if hasattr(right, 'columns') else [right.name]
if left_on is None and right_on is None:
left_on = right_on = list(set(left_columns) & set(right_columns))
left_on_spans = [o for o in left_on if isinstance(o, tuple)]
right_on_spans = [o for o in right_on if isinstance(o, tuple)]
left_on = [c for c in left_on if not isinstance(c, tuple)] # flatten_sequence(left_on)
right_on = [c for c in right_on if not isinstance(c, tuple)] # flatten_sequence(right_on)
left_names, right_names = make_merged_names(
left_columns, right.columns,
left_on=left_on,
right_on=right_on,
left_columns=left_columns, right_columns=right_columns, suffixes=suffixes)
left_names_map = dict(zip(left_columns, left_names))
right_names_map = dict(zip(right_columns, right_names))
categoricals = {}
for left_col, right_col in zip(left_on, right_on):
left_cat = getattr(left[left_col] if hasattr(left, 'columns') else left, 'cat', None)
right_cat = getattr(right[right_col] if hasattr(right, 'columns') else right, 'cat', None)
if left_cat is not None or right_cat is not None:
if (left_cat and right_cat and not (left_cat.categories is right_cat.categories)) or (
(left_cat is None) != (right_cat is None)):
left[left_col] = left[left_col].astype('category')
right[right_col] = right[right_col].astype('category')
cat_merge = union_categoricals([left[left_col], right[right_col]])
if hasattr(left, 'columns'):
left[left_col] = cat_merge[:len(left)]
else:
left = cat_merge[:len(left)]
if hasattr(right, 'columns'):
right[right_col] = cat_merge[len(left):]
else:
right = cat_merge[len(left):]
categoricals[left_names_map[left_col]] = left[left_col].cat.categories
categoricals[right_names_map[right_col]] = right[right_col].cat.categories
if hasattr(left, 'columns'):
left[left_col] = left[left_col].cat.codes
else:
left = left.cat.codes
if hasattr(right, 'columns'):
right[right_col] = right[right_col].cat.codes
else:
right = right.cat.codes
if len(left_on_spans) == 0:
merged = pd.merge(left, right, left_on=left_on, right_on=right_on, suffixes=suffixes, how=how, **kwargs)
else:
if how != 'inner':
left['_left_index'] = np.arange(len(left))
right['_right_index'] = np.arange(len(right))
merged = pd.merge(left, right, left_on=left_on, right_on=right_on, suffixes=suffixes, how='inner', **kwargs)
for i, (left_span_names, right_span_names) in enumerate(zip(left_on_spans, right_on_spans)):
(left_begin, left_end), (right_begin, right_end) = make_merged_names(
left_span_names, right_span_names, left_on=left_on, right_on=right_on,
left_columns=left.columns, right_columns=right_columns, suffixes=suffixes)
merged[f'overlap_size_{i}'] = np.minimum(merged[left_end], merged[right_end]) - np.maximum(merged[left_begin], merged[right_begin])
if span_policy != "none":
results = []
chunk_size = 1000000
for chunk_i in range(0, len(merged), chunk_size):
if span_policy == "partial_strict":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({right_end} > {left_begin} and {left_end} > {right_begin})'))
elif span_policy == "partial":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({right_end} >= {left_begin} and {left_end} >= {right_begin})'))
elif span_policy == "exact":
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(f'({left_begin} == {right_begin} and {left_end} == {right_end})'))
else:
results.append(merged.iloc[chunk_i:chunk_i + chunk_size].query(span_policy))
if len(results):
merged = pd.concat(results, sort=False, ignore_index=True)
else:
merged = merged.iloc[:0]
elif span_policy == "none":
pass
else:
raise Exception(f"Unrecognized policy {span_policy}")
if how != 'inner':
if how in ('left', 'outer'):
missing = left[~left['_left_index'].isin(merged['_left_index'])].copy()
missing = missing.rename(left_names_map, axis=1)
for col in right.columns:
if hasattr(right[col], 'cat') and right_names_map[col] not in missing.columns:
missing[right_names_map[col]] = pd.Categorical([None] * len(missing),
categories=right[col].cat.categories)
for col in placeholder_columns:
if col not in left_on and right_names_map.get(col, col) not in left.columns:
missing[right_names_map.get(col, col)] = 0 # -np.arange(len(missing)) - 1
merged = pd.concat([merged, missing.rename(dict(zip(left.columns, left_names)), axis=1)], sort=False,
ignore_index=True)
if how in ('right', 'outer'):
missing = right[~right['_right_index'].isin(merged['_right_index'])].copy()
missing = missing.rename(right_names_map, axis=1)
for col in left.columns:
if hasattr(left[col], 'cat') and left_names_map[col] not in missing.columns:
missing[left_names_map[col]] = pd.Categorical([None] * len(missing),
categories=left[col].cat.categories)
for col in placeholder_columns:
if col not in right_on and left_names_map.get(col, col) not in right.columns:
missing[left_names_map.get(col, col)] = 0 # -np.arange(len(missing)) - 1
merged = pd.concat([merged, missing.rename(dict(zip(right.columns, right_names)), axis=1)], sort=False,
ignore_index=True)
merged = merged.sort_values(['_left_index', '_right_index'])
del merged['_left_index']
del merged['_right_index']
merged = merged.reset_index(drop=True)
for col, categories in categoricals.items():
merged[col] = pd.Categorical.from_codes(merged[col].fillna(-1).astype(int), categories=categories)
return merged
def make_id_from_merged(*indices_arrays, same_ids=False, apply_on=None):
"""
Compute new ids from connected components by looking at `indices_arrays`
Parameters
----------
indices_arrays: collections.Sequence
1d array of positive integers
same_ids: bool
Do the multiple arrays represent the same ids ? (a 3 in one column should therefore be
connected to a 3 in another, event if they are not on the same row)
apply_on: list of (int, any)
Return the new ids matching old ids
for each (index, vector) in apply_on:
return new_ids matching those in vector that should be considered the same
of those of the vector number `index` in the `indices_arrays`
Returns
-------
list of np.ndarray
"""
if not same_ids:
indices_arrays, unique_objects = zip(*(factorize_rows(array, return_categories=True) for array in indices_arrays))
else:
indices_arrays, unique_objects = factorize_rows(indices_arrays, return_categories=True)
unique_objects = [unique_objects] * len(indices_arrays)
offset = max(indices_array.max() for indices_array in indices_arrays) + 1
N = offset * (len(indices_arrays) + 1)
if same_ids:
N = offset
offset = 0
offseted_ids = [s + i * offset for i, s in enumerate(indices_arrays)]
left_ids, right_ids = zip(*[(offseted_ids[i], offseted_ids[j])
for i in range(0, len(indices_arrays) - 1)
for j in range(i + 1, len(indices_arrays))])
left_ids = np.concatenate(left_ids)
right_ids = np.concatenate(right_ids)
_, matches = connected_components(csr_matrix((np.ones(len(left_ids)), (left_ids, right_ids)), shape=(N, N)))
matches = pd.factorize(matches)[0]
if apply_on is None:
return [
matches[s]
for s in offseted_ids
]
else:
return [
matches[factorize_rows(s, categories=unique_objects[i], return_categories=False) + i * offset]
for i, s in apply_on
]
def df_to_csr(rows, cols, data=None, n_rows=None, n_cols=None):
"""
Transforms a dataframe into a csr_matrix
Parameters
----------
data: pd.Series
Data column (column full one True will be used if None)
rows: pd.Series
Column containing row indices (can be Categorical and then codes will be used)
cols: pd.Series
Column containing column indices (can be Categorical and then codes will be used)
n_rows: int
n_cols: int
Returns
-------
csr_matrix
"""
if data is None:
data = np.ones(len(rows), dtype=bool)
if hasattr(rows, 'cat'):
n_rows = len(rows.cat.categories)
rows, rows_cat = rows.cat.codes, rows.cat.categories
else:
n_rows = n_rows or (rows.max() + 1 if len(rows) > 0 else 0)
if hasattr(cols, 'cat'):
n_cols = len(cols.cat.categories)
cols, cols_cat = cols.cat.codes, cols.cat.categories
else:
n_cols = n_cols or (cols.max() + 1 if len(cols) > 0 else 0)
return csr_matrix((np.asarray(data), (np.asarray(rows), np.asarray(cols))), shape=(n_rows, n_cols))
def df_to_flatarray(rows, data, n_rows=None):
"""
Transforms a dataframe into a flat array
Parameters
----------
data: pd.Series
Data column (column full one True will be used if None)
rows: pd.Series
Column containing row indices (can be Categorical and then codes will be used)
n_rows: int
Returns
-------
np.ndarray
"""
if hasattr(rows, 'cat'):
n_rows = len(rows.cat.categories)
rows, rows_cat = rows.cat.codes, rows.cat.categories
else:
n_rows = n_rows or (rows.max() + 1)
res = np.zeros(n_rows, dtype=data.dtype)
res[rows] = np.asarray(data)
return res
def csr_to_df(csr, row_categories=None, col_categories=None, row_name=None, col_name=None, value_name=None):
"""
Convert a csr_matrix to a dataframe
Parameters
----------
csr: csr_matrix
row_categories: any
Categories to rebuild the real object from their row indices
col_categories: any
Categories to rebuild the real object from their col indices
row_name: str
What name to give to the column built from the row indices
col_name: str
What name to give to the column built from the col indices
value_name:
What name to give to the column built from the values
If None, no value column will be built
Returns
-------
pd.DataFrame
"""
csr = csr.tocoo()
rows, cols, values = csr.row, csr.col, csr.data
if isinstance(row_categories, pd.DataFrame):
rows_df = row_categories.iloc[rows]
elif isinstance(row_categories, pd.Series):
rows_df = pd.DataFrame({row_categories.name: row_categories.iloc[rows]})
elif isinstance(row_categories, pd.CategoricalDtype):
rows_df = pd.DataFrame({row_name: pd.Categorical.from_codes(rows, dtype=row_categories)})
else:
rows_df = pd.DataFrame({row_name: rows})
if isinstance(col_categories, pd.DataFrame):
cols_df = col_categories.iloc[cols]
elif isinstance(col_categories, pd.Series):
cols_df = pd.DataFrame({col_categories.name: col_categories.iloc[cols]})
elif isinstance(col_categories, pd.CategoricalDtype):
cols_df = pd.DataFrame({col_name: pd.Categorical.from_codes(cols, dtype=col_categories)})
else:
cols_df = pd.DataFrame({col_name: cols})
res = (rows_df.reset_index(drop=True), cols_df.reset_index(drop=True))
if value_name is not None:
res = res + (pd.DataFrame({value_name: values}),)
return pd.concat(res, axis=1)
def factorize_rows(rows, categories=None, group_nans=True, subset=None, freeze_categories=True, return_categories=True):
if not isinstance(rows, list):
was_list = False
all_rows = [rows]
else:
all_rows = rows
was_list = True
del rows
not_null_subset = (subset if subset is not None else all_rows[0].columns if hasattr(all_rows[0], 'columns') else [all_rows[0].name])
cat_arrays = [[] for _ in not_null_subset]
for rows in (categories, *all_rows) if categories is not None else all_rows:
for (col_name, col), dest in zip(([(0, rows)] if len(rows.shape) == 1 else rows[subset].items() if subset is not None else rows.items()), cat_arrays):
dest.append(np.asarray(col))
cat_arrays = [np.concatenate(arrays) for arrays in cat_arrays]
is_not_nan = None
if not group_nans:
is_not_nan = ~pd.isna(np.stack(cat_arrays, axis=1)).any(1)
cat_arrays = [arrays[is_not_nan] for arrays in cat_arrays]
if len(cat_arrays) > 1:
relative_values, unique_values = pd.factorize(fast_zip(cat_arrays))
else:
relative_values, unique_values = pd.factorize(cat_arrays[0])
if freeze_categories and categories is not None:
relative_values[relative_values >= len(categories)] = -1
if not group_nans:
new_relative_values = np.full(is_not_nan.shape, fill_value=-1, dtype=relative_values.dtype)
new_relative_values[is_not_nan] = relative_values
new_relative_values[~is_not_nan] = len(unique_values) + np.arange((~is_not_nan).sum())
relative_values = new_relative_values
offset = len(categories) if categories is not None else 0
res = []
for rows in all_rows:
new_rows = relative_values[offset:offset + len(rows)]
if isinstance(rows, (pd.DataFrame, pd.Series)):
new_rows = pd.Series(new_rows)
new_rows.index = rows.index
new_rows.name = "+".join(not_null_subset)
res.append(new_rows)
offset += len(rows)
if categories is None and return_categories:
if isinstance(all_rows[0], pd.DataFrame):
if len(cat_arrays) > 1:
categories = pd.DataFrame(dict(zip(not_null_subset, [np.asarray(l) for l in zip(*unique_values)])))
else:
categories = | pd.DataFrame({not_null_subset[0]: unique_values}) | pandas.DataFrame |
"""Pytest fixtures."""
import pytest
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from hcrystalball.wrappers import ProphetWrapper
from hcrystalball.wrappers import ExponentialSmoothingWrapper
from hcrystalball.wrappers import TBATSWrapper
from hcrystalball.wrappers import SarimaxWrapper
from hcrystalball.wrappers import get_sklearn_wrapper
from hcrystalball.ensemble import StackingEnsemble, SimpleEnsemble
import pandas._testing as tm
random_state = np.random.RandomState(123)
tm.N = 100 # 100 rows
tm.K = 1 # 1 column
@pytest.fixture(scope="module")
def wrapper_instance(request):
if request.param == "prophet":
return ProphetWrapper(daily_seasonality=False, weekly_seasonality=False, yearly_seasonality=False)
elif request.param == "smoothing":
return ExponentialSmoothingWrapper(trend="add")
elif request.param == "tbats":
return TBATSWrapper(use_arma_errors=False, use_box_cox=False)
elif request.param == "sklearn":
return get_sklearn_wrapper(LinearRegression, lags=4)
elif request.param == "sarimax":
return SarimaxWrapper(order=(1, 1, 0), seasonal_order=(1, 1, 1, 2))
elif request.param == "stacking_ensemble":
return StackingEnsemble(
base_learners=[
ExponentialSmoothingWrapper(name="smoot_exp1", trend="add"),
ExponentialSmoothingWrapper(name="smoot_exp2"),
],
meta_model=LinearRegression(),
horizons_as_features=False,
weekdays_as_features=False,
)
elif request.param == "simple_ensemble":
return SimpleEnsemble(
base_learners=[
ExponentialSmoothingWrapper(name="smoot_exp1", trend="add"),
ExponentialSmoothingWrapper(name="smoot_exp2"),
]
)
@pytest.fixture(scope="module")
def wrapper_instance_capped(request):
if request.param.split(";")[0] == "prophet":
return ProphetWrapper(
daily_seasonality=False,
weekly_seasonality=False,
yearly_seasonality=False,
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "smoothing":
return ExponentialSmoothingWrapper(
trend="add",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "tbats":
return TBATSWrapper(
use_arma_errors=False,
use_box_cox=False,
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "sklearn":
return get_sklearn_wrapper(
LinearRegression,
lags=4,
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "sarimax":
return SarimaxWrapper(
order=(1, 1, 0),
seasonal_order=(1, 1, 1, 2),
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "stacking_ensemble":
return StackingEnsemble(
base_learners=[
ExponentialSmoothingWrapper(
name="smoot_exp1",
trend="add",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
),
ExponentialSmoothingWrapper(
name="smoot_exp2",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
),
],
meta_model=LinearRegression(),
horizons_as_features=False,
weekdays_as_features=False,
train_n_splits=1,
train_horizon=10,
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "simple_ensemble":
return SimpleEnsemble(
base_learners=[
ExponentialSmoothingWrapper(
name="smoot_exp1",
trend="add",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
),
ExponentialSmoothingWrapper(
name="smoot_exp2",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
),
]
)
@pytest.fixture(scope="module")
def X_y_linear_trend(request):
if request.param[-1] not in ("D", "W", "M", "Q", "Y"):
raise ValueError("Invalid `X_y_with_freq` fixture param.")
X = pd.DataFrame(
pd.date_range(start="2019-01-01", periods=100, freq=request.param.split("freq_")[1][0]),
columns=["date"],
)
if "negative" in request.param:
y = pd.Series(np.linspace(start=80, stop=-19, num=100))
else:
y = pd.Series(np.linspace(start=1, stop=100, num=100))
if "more_cols" in request.param:
X["trend"] = y + 10
X["one_hot"] = np.repeat([1, 2, 3, 4], len(X) / 4)
if "country_col" in request.param:
X["country"] = "DE"
if "ndarray" in request.param:
y = y.values
if "NaN_y" in request.param:
y[::9] = np.nan
if "Inf_y" in request.param:
y[::15] = np.inf
y[::16] = -np.inf
return X.set_index("date"), y
@pytest.fixture(scope="module")
def X_y_optional(request):
X = pd.DataFrame(index=pd.date_range(start="2019-01-01", periods=300))
if request.param == "just_X":
y = None
else:
y = np.arange(X.shape[0])
return X, y
@pytest.fixture(scope="module")
def X_with_holidays():
from hcrystalball.feature_extraction import HolidayTransformer
X = pd.DataFrame(index=pd.date_range(start="2019-01-01", periods=300))
holidays = HolidayTransformer(country_code="DE").fit_transform(X)
return X.join(holidays)
@pytest.fixture(
scope="module",
params=[
"series",
"series_with_NaN",
"series_with_Inf",
"series_with_name",
"series_with_index_name",
"dataframe",
"dataframe_with_NaN",
"dataframe_with_Inf",
"dataframe_with_name",
"dataframe_with_index_name",
"dataframe_multicolumn",
"dataframe_integer_index",
"random_string",
"emtpy_series",
"empty_dataframe",
],
)
def ts_data(request):
if "series" in request.param:
if "empty" in request.param:
result = pd.Series()
else:
result = tm.makeTimeSeries(freq="M")
elif "dataframe" in request.param:
if "empty" in request.param:
result = | pd.DataFrame() | pandas.DataFrame |
# vim: set fdm=indent:
'''
___
/ | ____ ___ ____ _____ ____ ____
/ /| | / __ `__ \/ __ `/_ / / __ \/ __ \
/ ___ |/ / / / / / /_/ / / /_/ /_/ / / / /
/_/ |_/_/ /_/ /_/\__,_/ /___/\____/_/ /_/
______ __
/ ____/___ ________ _________ ______/ /_
/ /_ / __ \/ ___/ _ \/ ___/ __ `/ ___/ __/
/ __/ / /_/ / / / __/ /__/ /_/ (__ ) /_
/_/ \____/_/ \___/\___/\__,_/____/\__/
___ __ __
/ | _____________ / /__ _________ _/ /_____ _____
/ /| |/ ___/ ___/ _ \/ / _ \/ ___/ __ `/ __/ __ \/ ___/
/ ___ / /__/ /__/ __/ / __/ / / /_/ / /_/ /_/ / /
/_/ |_\___/\___/\___/_/\___/_/ \__,_/\__/\____/_/
GITHUB:
https://github.com/aws-samples/simple-forecat-solution/
USAGE:
streamlit run -- ./app.py --local-dir LOCAL_DIR [--landing-page-url URL]
OPTIONS:
--local-dir LOCAL_DIR /path/to/ a local directory from which the UI
will look for files.
--landing-page-url URL URL of the AFA landing page
'''
import os
import sys
import io
import glob
import time
import datetime
import base64
import pathlib
import textwrap
import argparse
import re
import json
import logging
import gzip
import gc
import boto3
import numpy as np
import pandas as pd
import awswrangler as wr
import streamlit as st
import plotly.express as pex
import plotly.graph_objects as go
import cloudpickle
import gzip
from collections import OrderedDict, deque, namedtuple
from concurrent import futures
from urllib.parse import urlparse
from toolz.itertoolz import partition_all
from botocore.exceptions import ClientError
from sspipe import p, px
from streamlit import session_state as state
from textwrap import dedent
from stqdm import stqdm
from afa import (load_data, resample, run_pipeline, run_cv_select,
calc_smape, calc_wape,
make_demand_classification, process_forecasts, make_perf_summary,
make_health_summary, GROUP_COLS, EXP_COLS)
from lambdamap import LambdaExecutor, LambdaFunction
from awswrangler.exceptions import NoFilesFound
from streamlit import caching
from streamlit.uploaded_file_manager import UploadedFile
from streamlit.script_runner import RerunException
from st_aggrid import AgGrid, GridOptionsBuilder, JsCode
from joblib import Parallel, delayed
from humanfriendly import format_timespan
ST_STATIC_PATH = pathlib.Path(st.__path__[0]).joinpath("static")
ST_DOWNLOADS_PATH = ST_STATIC_PATH.joinpath("downloads")
LAMBDAMAP_FUNC = "AfaLambdaMapFunction"
LOCAL_DIR = "/home/ec2-user/SageMaker"
if not os.path.exists(ST_DOWNLOADS_PATH):
ST_DOWNLOADS_PATH.mkdir()
FREQ_MAP = OrderedDict(Daily="D", Weekly="W-MON", Monthly="MS")
FREQ_MAP_AFC = OrderedDict(Daily="D", Weekly="W", Monthly="M")
FREQ_MAP_LONG = {
"D": "Daily", "W-MON": "Weekly", "W": "Weekly", "M": "Monthly",
"MS": "Monthly"
}
FREQ_MAP_PD = {
"D": "D",
"W": "W-MON",
"W-SUN": "W-MON",
"W-MON": "W-MON",
"M": "MS",
"MS": "MS"
}
METRIC = "smape"
MAX_LAMBDAS = 1000
def validate(df):
"""Validate a dataset.
"""
err_msgs = []
warn_msgs = []
# check column names
for col in EXP_COLS:
if col not in df:
err_msgs.append(f"missing **{col}** column")
msgs = {
"errors": err_msgs,
"warnings": warn_msgs
}
is_valid_file = len(err_msgs) == 0
return df, msgs, is_valid_file
@st.cache
def load_file(path):
"""
"""
if path.endswith(".csv.gz"):
compression = "gzip"
elif path.endswith(".csv"):
compression = None
else:
raise NotImplementedError
return pd.read_csv(path, dtype={"timestamp": str}, compression=compression)
def _sum(y):
if np.all(pd.isnull(y)):
return np.nan
return np.nansum(y)
def _resample(df2, freq):
df2 = df2.groupby(["channel", "family", "item_id"]) \
.resample(freq) \
.demand \
.sum(min_count=1)
return df2
def process_data(df, freq, chunksize=None):
"""
"""
df["timestamp"] = pd.DatetimeIndex(df["timestamp"])
df.set_index("timestamp", inplace=True)
groups = df.groupby(["channel", "family", "item_id"], sort=False)
if chunksize is None:
chunksize = min(groups.ngroups, 1000)
total = int(np.ceil(groups.ngroups / chunksize))
all_results = []
for chunk in stqdm(partition_all(chunksize, groups), total=total, desc="Progress"):
results = Parallel(n_jobs=-1)(delayed(_resample)(dd, freq) for _, dd in chunk)
all_results.extend(results)
df = pd.concat(all_results) \
.reset_index(["channel", "family", "item_id"])
df.index.name = None
return df
class StreamlitExecutor(LambdaExecutor):
"""Custom LambdaExecutor to display a progress bar in the app.
"""
def map(self, func, payloads, local_mode=False):
"""
"""
if local_mode:
f = func
else:
f = LambdaFunction(func, self._client, self._lambda_arn)
ex = self._executor
wait_for = [ex.submit(f, *p["args"], **p["kwargs"]) for p in payloads]
return wait_for
def display_progress(wait_for, desc=None):
"""
"""
# display progress of the futures
pbar = stqdm(desc=desc, total=len(wait_for))
prev_n_done = 0
n_done = sum(f.done() for f in wait_for)
while n_done != len(wait_for):
diff = n_done - prev_n_done
pbar.update(diff)
prev_n_done = n_done
n_done = sum(f.done() for f in wait_for)
time.sleep(0.25)
diff = n_done - prev_n_done
pbar.update(diff)
return
def run_lambdamap(df, horiz, freq):
"""
"""
payloads = []
freq = FREQ_MAP_PD[freq]
if freq[0] == "W":
cv_periods = None
cv_stride = 2
elif freq[0] == "M":
cv_periods = None
cv_stride = 1
else:
raise NotImplementedError
from toolz.itertoolz import partition
from tqdm.auto import tqdm
#with st.spinner(f":rocket: Launching forecasts via AWS Lambda (λ)..."):
# resample the dataset to the forecast frequency before running
# lambdamap
start = time.time()
df2 = get_df_resampled(df, freq)
print(f"completed in {format_timespan(time.time()-start)}")
groups = df2.groupby(GROUP_COLS, as_index=False, sort=False)
# generate payload
for _, dd in groups:
payloads.append(
{"args": (dd, horiz, freq),
"kwargs": {"metric": "smape",
"cv_periods": cv_periods, "cv_stride": cv_stride}})
# launch jobs in chunks of 1000
executor = StreamlitExecutor(max_workers=min(MAX_LAMBDAS, len(payloads)),
lambda_arn=LAMBDAMAP_FUNC)
wait_for = executor.map(run_cv_select, payloads)
display_progress(wait_for, "🔥 Generating forecasts")
return wait_for
def get_df_resampled(df, freq):
groups = df.groupby(["channel", "family", "item_id"], sort=False)
chunksize = min(1000, groups.ngroups)
total = int(np.ceil(float(groups.ngroups) / chunksize))
all_results = []
for chunk in stqdm(partition_all(chunksize, groups), total=total,
desc="Batch Preparation Progress"):
results = Parallel(n_jobs=-1)(delayed(_resample)(dd, freq) for _, dd in chunk)
all_results.extend(results)
df2 = pd.concat(all_results) \
.reset_index(["channel", "family", "item_id"])
df2 = _resample(df, freq).reset_index(["channel", "family", "item_id"])
df2.index.name = None
state["report"]["data"]["df2"] = df2
return df2
def display_ag_grid(df, auto_height=False, paginate=False,
comma_cols=None, selection_mode=None, use_checkbox=False):
"""
Parameters
----------
df : pd.DataFrame
auto_height : bool
pagination : bool
comma_cols : tuple or list
Numeric columns to apply comma thousands separator.
"""
gb = GridOptionsBuilder.from_dataframe(df)
#gb.configure_selection("single")
gb.configure_auto_height(auto_height)
gb.configure_pagination(enabled=paginate)
if selection_mode is not None:
gb.configure_selection(selection_mode=selection_mode,
use_checkbox=use_checkbox)
comma_renderer = JsCode(textwrap.dedent("""
function(params) {
return params.value
.toString()
.split( /(?=(?:\d{3})+(?:\.|$))/g ).join( "," )
}
"""))
for col in comma_cols:
gb.configure_column(col, cellRenderer=comma_renderer)
response = AgGrid(df, gridOptions=gb.build(), allow_unsafe_jscode=True)
return response
def valid_launch_freqs():
data_freq = state.report["data"]["freq"]
valid_freqs = ["D", "W", "M"]
if data_freq in ("D",):
# don't allow daily forecasting yet
valid_freqs = valid_freqs[1:]
elif data_freq in ("W","W-MON",):
valid_freqs = valid_freqs[1:]
elif data_freq in ("M","MS",):
valid_freqs = valid_freqs[2:]
else:
raise NotImplementedError
return valid_freqs
def create_presigned_url(s3_path, expiration=3600):
"""Generate a presigned URL to share an S3 object
:param bucket_name: string
:param object_name: string
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
parsed_url = urlparse(s3_path, allow_fragments=False)
bucket_name = parsed_url.netloc
object_name = parsed_url.path.strip("/")
# Generate a presigned URL for the S3 object
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
# The response contains the presigned URL
return response
def make_df_backtests(df_results, parallel=False):
"""Expand df_results to a "long" dataframe with the columns:
channel, family, item_id, timestamp, actual, backtest.
"""
def _expand(dd):
ts = np.hstack(dd["ts_cv"].apply(np.hstack))
ys = np.hstack(dd["y_cv"].apply(np.hstack))
yp = np.hstack(dd["yp_cv"].apply(np.hstack))
df = pd.DataFrame({"timestamp": ts, "demand": ys, "backtest": yp})
return df
groups = df_results.query("rank == 1") \
.groupby(["channel", "family", "item_id"],
as_index=True, sort=False)
if parallel:
df_backtests = groups.parallel_apply(_expand)
else:
df_backtests = groups.apply(_expand)
df_backtests["timestamp"] = pd.DatetimeIndex(df_backtests["timestamp"])
return df_backtests.reset_index(["channel", "family", "item_id"])
def save_report(report_fn):
"""
"""
if "report" not in state or "name" not in state["report"]:
return
if "path" not in state["report"]["data"]:
st.warning(textwrap.dedent(f"""
Warning: unable to save report, no input data was loaded.
"""))
return
start = time.time()
with st.spinner(":hourglass_flowing_sand: Saving Report ..."):
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
local_path = f'/tmp/{report_fn}'
# save the report locally
cloudpickle.dump(state["report"], gzip.open(local_path, "wb"))
# upload the report to s3
s3_path = \
f'{state["report"]["afa"]["s3_afa_reports_path"]}/{report_fn}'
parsed_url = urlparse(s3_path, allow_fragments=False)
bucket = parsed_url.netloc
key = parsed_url.path.strip("/")
s3_client = boto3.client("s3")
try:
response = s3_client.upload_file(local_path, bucket, key)
signed_url = create_presigned_url(s3_path)
st.info(textwrap.dedent(f"""
The report can be downloaded [here]({signed_url}).
"""))
except ClientError as e:
logging.error(e)
st.text(f"(completed in {format_timespan(time.time() - start)})")
return
def make_df_reports(bucket, prefix):
s3 = boto3.client("s3")
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from datetime import date, timedelta
#import seaborn as sns
#import matplotlib.pyplot as plt
#import plotly.express as px
#import plotly.graph_objs as go
df = pd.read_csv('/Users/andresmauriciotrianareina/Documents/GitHub/datainfografias/Propuesta_levantamiento_información_V1.csv')
#df = pd.read_csv('https://raw.githubusercontent.com/andrestrianareina/datainfografias/master/Propuesta_levantamiento_información_V1.csv')
# Se extrae los datos de la Choco
choco = df[df['Departamento'] == 'CHOCO']
# Se cambian algunos nombres de de municipios para usar en el mapa
# guajira['Municipio'].replace(
# to_replace=['ALBANIA'],
# value='ALBANIA_g',
# inplace=True
# )
# guajira['Municipio'].replace(
# to_replace=['VILLANUEVA'],
# value='VILLA_NUEVA',
# inplace=True
# )
# guajira['Municipio'].replace(
# to_replace=['DISTRACCION'],
# value='DISTRACCIÓN',
# inplace=True
# )
##########################
###### covid
#Contagios
covid_contagios_choco = choco[choco['Subtema'] == 'Contagio']
covid_contagios_total_choco = "{:,.0f}".format(covid_contagios_choco['No. De personas/porcentaje/eventos'].sum())
# Fallecidos
covid_fallecidos_choco = choco[choco['Subtema'] == 'Fallecido']
covid_fallecidos_total_choco = "{:,.0f}".format(covid_fallecidos_choco['No. De personas/porcentaje/eventos'].sum())
# Recuperados
covid_recuperados_choco = choco[choco['Subtema'] == 'Recuperado']
covid_recuperados_total_choco = "{:,.0f}".format(covid_recuperados_choco['No. De personas/porcentaje/eventos'].sum())
# activos
covid_activos_choco = choco[choco['Subtema'] == 'Activo']
covid_activos_total_choco = "{:,.0f}".format(covid_activos_choco['No. De personas/porcentaje/eventos'].sum())
# Sin clasificar
covid_sinclasificar_choco= choco[choco['Subtema'] == 'Sin clasificar']
covid_sinclasificar_total_choco = "{:,.0f}".format(covid_sinclasificar_choco['No. De personas/porcentaje/eventos'].sum())
# vacunados
covid_vacunados_choco = choco[choco['Subtema'] == 'Dosis aplicadas']
covid_vacunados_total_choco = "{:,.0f}".format(covid_vacunados_choco['No. De personas/porcentaje/eventos'].sum())
# conexión json para el mapa
import json
from urllib.request import urlopen
#with urlopen('https://raw.githubusercontent.com/caticoa3/colombia_mapa/master/co_2018_MGN_MPIO_POLITICO.geojson') as response:
with urlopen('https://raw.githubusercontent.com/andresmtr/mapa_municipios_colombia_geojonson/master/co_2018_MGN_MPIO_POLITICO_AT.geojson') as response:
counties = json.load(response)
# Mapa covid contagios
locs = covid_contagios_choco['Municipio']
# for loc in counties['features']:
# loc['id'] = loc['properties']['MPIO_CNMBR']
# map_choco = go.Figure(go.Choroplethmapbox(
# geojson=counties,
# locations=locs,
# z=covid_contagios_choco['No. De personas/porcentaje/eventos'],
# colorscale='plotly3',
# colorbar_title="Total contagios"))
# map_choco.update_layout(mapbox_style="carto-positron",
# mapbox_zoom=6.7,
# mapbox_center = {"lat": 11.5, "lon": -73})
locs_choco = covid_contagios_choco['Municipio'].tolist()
z_choco=covid_contagios_choco['No. De personas/porcentaje/eventos'].tolist()
##########################
###### migración
# migrantes venezolanos
migracion_choco = choco[choco['Subtema'] == 'Número de Migrantes venezolanos']
migracion_choco_numero = "{:,.0f}".format(migracion_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje a nivel nacional
migracion_choco = choco[choco['Subtema'] == 'Porcentaje de Migrantes venezolanos ( comparado contra el nivel nacional)']
migracion_porcentaje_choco = "{:,.2%}".format(migracion_choco['No. De personas/porcentaje/eventos'].sum())
# primero la niñez
migracion_pn_choco = choco[choco['Subtema'] == 'Número de niños con con registro de nacimiento bajo el programa - Primero la niñez']
migracion_pn_numero_choco = "{:,.0f}".format(migracion_pn_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje primero la niñez
migracion_pn_porcentaje_choco = choco[choco['Subtema'] == 'Porcentaje de niños con con registro de nacimiento bajo el programa - Primero la niñez comparado a nivel nacional']
migracion_pn_porcentaje_choco = "{:,.2%}".format(migracion_pn_porcentaje_choco['No. De personas/porcentaje/eventos'].sum())
##########################
###### educación
# Numero de matriculados
educacion_choco = choco[choco['Subtema'] == 'Número de matriculas en el 2021']
educacion_choco_numero = "{:,.0f}".format(educacion_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje a nivel nacional
educacion_choco = choco[choco['Subtema'] == 'Porcenaje de matriculas en el 2021 ( comparado frente al nivel nacional)']
educacion_porcentaje_choco = "{:,.2%}".format(educacion_choco['No. De personas/porcentaje/eventos'].sum())
# matriculados venezolanos
educacion_migrante_choco = choco[choco['Subtema'] == 'Número de migrantes venezolanos matriculados en el 2021']
educaion_migrante_numero_choco = "{:,.0f}".format(educacion_migrante_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje matriculados venezolanos
educacion_migrante_choco = choco[choco['Subtema'] == 'Porcentaje de migrante venezolanos matriculados en 2021 (comparado frente a los migrantes matriculados a nivel nacional)']
educacion_migrante_porcentaje_choco = "{:,.2%}".format(educacion_migrante_choco['No. De personas/porcentaje/eventos'].sum())
##########################
###### servicios publicos
# porcentaje internet
internet_choco = choco[choco['Subtema'] == 'Porcentaje de Hogares con conexión a internet en el departamento']
internet_porcentaje_choco = "{:,.2%}".format(internet_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje acueducto
acueducto_choco = choco[choco['Subtema'] == 'Porcentaje de Cobertura acueducto en el departamento']
acueducto_porcentaje_choco = "{:,.2%}".format(acueducto_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje acueducto
alcantarillado_choco = choco[choco['Subtema'] == 'Porcentaje de Cobertura alcantarillado en el departamento']
alcantarillado_porcentaje_choco = "{:,.2%}".format(alcantarillado_choco['No. De personas/porcentaje/eventos'].sum())
##########################
###### desastres naturales
# Numero de inundaciones
inundaciones_choco = choco[choco['Subtema'] == 'Número de Inundaciones en 2021']
inundaciones_choco_numero = "{:,.0f}".format(inundaciones_choco['No. De personas/porcentaje/eventos'].sum())
# Numero de vendavales
vendavales_choco = choco[choco['Subtema'] == 'Número de Vendavales en 2021']
vendavales_choco_numero = "{:,.0f}".format(vendavales_choco['No. De personas/porcentaje/eventos'].sum())
# Numero de afectadas
per_afectadas_choco = choco[choco['Subtema'] == 'Número de personas afectadas por desastres naturales en 2021']
per_afectadas_choco_numero = "{:,.0f}".format(per_afectadas_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje acueducto
per_afectadas_choco = choco[choco['Subtema'] == 'Porcentaje de personas afectadas por desastres naturales en 2021']
per_afectada_porcentaje_choco = "{:,.2%}".format(per_afectadas_choco['No. De personas/porcentaje/eventos'].sum())
##########################
###### salud
# mortalidad materna
mortalidad_choco = choco[choco['Subtema'] == 'Mortalidad materna por cada 100.000 nacidos']
mortalidad_choco_numero = "{:,.1f}".format(mortalidad_choco['No. De personas/porcentaje/eventos'].sum())
# mortalidad perinatal y neonatal
perinatal_choco = choco[choco['Subtema'] == 'Mortalidad perinatal y neonatal por cada 1000 nacidos vivos']
perinatal_choco_numero = "{:,.1f}".format(perinatal_choco['No. De personas/porcentaje/eventos'].sum())
##########################
###### VBG
# numero de casos
vbg_choco = choco[choco['Subtema'] == 'Número de casos VBG en 2020']
vbg_choco_numero = "{:,.0f}".format(vbg_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje nacional
vbg_choco = choco[choco['Subtema'] == 'Porcentajes de casos VBG en 2020 comparado a nivel nacional']
vbg_porcentaje_choco = "{:,.2%}".format(vbg_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje niños
vbg_choco = choco[choco['Subtema'] == 'Porcentaje de casos contra niños y niñas']
vbg_porcentaje_ninos_choco = "{:,.2%}".format(vbg_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje victimario
vbg_choco = choco[choco['Subtema'] == 'Porcentajes de casos donde el victimario es familiar']
vbg_porcentaje_victimario_choco = "{:,.2%}".format(vbg_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje mujer
vbg_choco = choco[choco['Subtema'] == 'Porcentaje de casos donde la Victima fue mujer']
vbg_porcentaje_mujer_choco = "{:,.2%}".format(vbg_choco['No. De personas/porcentaje/eventos'].sum())
# porcentaje rural
vbg_choco = choco[choco['Subtema'] == 'Porcentaje de casos ocurridos en zona rural']
vbg_porcentaje_rural_choco = "{:,.2%}".format(vbg_choco['No. De personas/porcentaje/eventos'].sum())
##########################
###### Conflicto armado
# numero de eventos
eventos_choco = choco[choco['Subtema'] == 'Número de Eventos de violecia']
eventos_choco_numero = "{:,.0f}".format(eventos_choco['No. De personas/porcentaje/eventos'].sum())
# numero de personas afectadas
per_afectadas_choco = choco[choco['Subtema'] == 'Personas afectadas por el conflicto armado']
per_afectada_choco_numero = "{:,.0f}".format(per_afectadas_choco['No. De personas/porcentaje/eventos'].sum())
# numero de personas confinamiento
per_confinamiento_choco = choco[choco['Subtema'] == 'Número de personas afectados por confinamiento y/o desplazamiento']
per_confinamiento_choco_numero = "{:,.0f}".format(per_confinamiento_choco['No. De personas/porcentaje/eventos'].sum())
# numero de niños reclutados
ninos_reclutados_choco = choco[choco['Subtema'] == 'Número de niños con Reclutamiento o desvinculación']
ninos_reclutados_choco_numero = "{:,.0f}".format(ninos_reclutados_choco['No. De personas/porcentaje/eventos'].sum())
# numero de MAP-MUSE
MAP_MUSE_choco = choco[choco['Subtema'] == 'Número de Eventos MAP-MUSE']
MAP_MUSE_choco_numero = "{:,.0f}".format(MAP_MUSE_choco['No. De personas/porcentaje/eventos'].sum())
# numero de MAP-MUSE
MAP_MUSE_menores_choco = choco[choco['Subtema'] == 'Número de Eventos MAP-MUSE que involucran menores de edad']
MAP_MUSE_menores_choco_numero = "{:,.0f}".format(MAP_MUSE_menores_choco['No. De personas/porcentaje/eventos'].sum())
# Grafica
grafica = pd.read_csv('https://raw.githubusercontent.com/andrestrianareina/datainfografias/master/Situaci_n_V_ctimas_Minas_Antipersonal_en_Colombia.csv')
choco_mins = grafica[grafica['departamento'] == 'CHOCO']
choco_mins['Cantidad'] = 1
Estado_años_choco = choco_mins.groupby(['ano','estado']).sum().reset_index()
choco_heridos = Estado_años_choco[Estado_años_choco['estado']=='Herido']
choco_heridos_final = pd.concat([choco_heridos['ano'], choco_heridos['Cantidad']], axis=1)
choco_heridos_final.columns = ['Año', 'Heridos']
choco_fallecidos = Estado_años_choco[Estado_años_choco['estado']=='Muerto']
choco_fallecidos_final = | pd.concat([choco_fallecidos['ano'], choco_fallecidos['Cantidad']], axis=1) | pandas.concat |
import pandas as pd
data_av_week = pd.read_csv("data_av_week.csv")
supermarkt_urls = pd.read_csv("supermarkt_urls.csv")
s_details = pd.read_csv("notebooksdetailed_supermarkt_python_mined.csv", header= None)
migros_details = pd.read_csv("notebooksdetailed_Migros_python_mined.csv", header= None)
coop_details = pd.read_csv("notebooksdetailed_Coop_python_mined.csv", header= None)
data_av_week = data_av_week.drop(["Unnamed: 0"], axis=1)
data_av_week = data_av_week.rename({'url':'urls'}, axis=1)
head = ["name_supermarkt", "address", "lat", "long", "key_words", "codes", "postal_code", "address2", "url2"]
s_details.columns = head
s_details = s_details.drop(columns=['address2'])
migros_details.columns = head
migros_details = migros_details.drop(columns=['address2'])
coop_details.columns = head
coop_details = coop_details.drop(columns=['address2'])
# merge the supermarkt data
supermarkt_details = pd.merge(s_details, migros_details, how="outer")
supermarkt_details = pd.merge(supermarkt_details, coop_details, how="outer")
data_week_urls = pd.merge(supermarkt_urls, data_av_week, how="outer", on="urls")
data_names_week_all = pd.merge(supermarkt_details, data_week_urls, how="outer", on="codes")
data_names_week_all.to_csv("all_data_per_week.csv", index=False)
# Per day
data_av_day = pd.read_csv("data_av_day.csv")
data_av_day = data_av_day.drop(["Unnamed: 0"], axis=1)
data_av_day = data_av_day.rename({'url':'urls'}, axis=1)
data_days_urls = | pd.merge(supermarkt_urls, data_av_day, how="outer", on="urls") | pandas.merge |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xgboost as xgb
from keras.layers import Dense
from keras.models import Sequential
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.linear_model import Lasso
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
from sklearn.model_selection import train_test_split # used for splitting training and testing data
from sklearn.preprocessing import StandardScaler
# define ROC curve method
def plot_roc_curve(y_test, y_pred, name):
# Compute micro-average ROC curve and ROC area
fpr, tpr, _ = roc_curve(y_test.values, y_pred)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.02, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve - ' + name)
plt.legend(loc="lower right")
plt.show()
def preprocess(index):
x_train = pd.read_csv('hw07_target' + str(index) + '_training_data.csv')
x_test = pd.read_csv('hw07_target' + str(index) + '_test_data.csv')
y_train = pd.read_csv('hw07_target' + str(index) + '_training_label.csv')
# missing values for columns which has more null elements than non-nulls
miss = x_train.isnull().sum() / len(x_train)
miss = miss[miss > 0.5]
miss.sort_values(inplace=True)
# visualising missing values
# plot the missing value count
# miss = miss.to_frame()
# miss.columns = ['count']
# miss['Name'] = miss.index
# sns.set(style="whitegrid", color_codes=True)
# sns.barplot(x='Name', y='count', data=miss)
# plt.xticks(rotation=90)
# plt.show()
# drop these columns
x_train = x_train.drop(columns=miss.index)
x_test = x_test.drop(columns=miss.index)
# drop ID column
# del x_train['ID']
numeric_data = x_train.select_dtypes(include=[np.number])
non_numeric_data = x_train.select_dtypes(exclude=[np.number])
numeric_data_test = x_test.select_dtypes(include=[np.number])
non_numeric_data_test = x_test.select_dtypes(exclude=[np.number])
print("There are {} numeric and {} categorical columns in train data".format(numeric_data.shape[1],
non_numeric_data.shape[1]))
# Fill na values with column mean values
numeric_data = numeric_data.fillna(numeric_data.mean())
numeric_data_test = numeric_data_test.fillna(numeric_data_test.mean())
# Scale numeric values
ss = StandardScaler()
numeric_data = pd.DataFrame(ss.fit_transform(numeric_data))
numeric_data_test = pd.DataFrame(ss.fit_transform(numeric_data_test))
# Encode non-numeric columns
non_numeric_data = pd.get_dummies(non_numeric_data, columns=non_numeric_data.columns, drop_first=True)
non_numeric_data_test = pd.get_dummies(non_numeric_data_test, columns=non_numeric_data_test.columns,
drop_first=True)
# Dataset to be split
x_training = pd.concat([non_numeric_data, numeric_data], axis=1)
x_real_test = pd.concat([non_numeric_data_test, numeric_data_test], axis=1)
# X_train, X_test, Y_train, Y_test for training and validation, Real test set
X_train, X_test, Y_train, Y_test = train_test_split(x_training, y_train['TARGET'], test_size=0.2, random_state=2020)
return X_train, X_test, Y_train, Y_test, x_real_test
def bestFeatures(x_train, y_train, x_test):
# best k=50 features of the dataset according to F values of features
bestfeatures = SelectKBest(score_func=f_regression, k=50)
fit = bestfeatures.fit(x_train, y_train)
dfscores = | pd.DataFrame(fit.scores_) | pandas.DataFrame |
import pytest
from collections import OrderedDict
import pandas as pd
import dice_ml
from dice_ml.utils import helpers
@pytest.fixture
def public_data_object():
"""
Returns a public data object for the adult income dataset
"""
dataset = helpers.load_adult_income_dataset()
return dice_ml.Data(dataframe=dataset, continuous_features=['age', 'hours_per_week'], outcome_name='income')
@pytest.fixture
def private_data_object():
"""
Returns a private data object containing meta information about the adult income dataset
"""
features_dict = OrderedDict([('age', [17, 90]),
('workclass', ['Government', 'Other/Unknown', 'Private', 'Self-Employed']),
('education', ['Assoc', 'Bachelors', 'Doctorate', 'HS-grad', 'Masters', 'Prof-school', 'School', 'Some-college']),
('marital_status', ['Divorced', 'Married', 'Separated', 'Single', 'Widowed']),
('occupation', ['Blue-Collar', 'Other/Unknown', 'Professional', 'Sales', 'Service', 'White-Collar']),
('race', ['Other', 'White']),
('gender', ['Female', 'Male']),
('hours_per_week', [1, 99])]) # providing an OrderedDict to make it work for Python<=3.6
return dice_ml.Data(features=features_dict, outcome_name='income')
@pytest.fixture
def sample_adultincome_query():
"""
Returns a sample query instance for adult income dataset
"""
return {'age':22, 'workclass':'Private', 'education':'HS-grad', 'marital_status':'Single', 'occupation':'Service',
'race': 'White', 'gender':'Female', 'hours_per_week': 45}
@pytest.fixture
def sample_custom_query_1():
"""
Returns a sample query instance for the custom dataset
"""
return pd.DataFrame({'Categorical': ['a'], 'Numerical': [25]})
@pytest.fixture
def sample_custom_query_2():
"""
Returns a sample query instance for the custom dataset
"""
return | pd.DataFrame({'Categorical': ['b'], 'Numerical': [25]}) | pandas.DataFrame |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
"""
Test related to MultiIndex
"""
import re
import cupy as cp
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.core.column import as_column
from cudf.core.index import as_index
from cudf.tests.utils import assert_eq, assert_neq
def test_multiindex_levels_codes_validation():
levels = [["a", "b"], ["c", "d"]]
# Codes not a sequence of sequences
with pytest.raises(TypeError):
pd.MultiIndex(levels, [0, 1])
with pytest.raises(TypeError):
cudf.MultiIndex(levels, [0, 1])
# Codes don't match levels
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0], [1], [1]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0], [1], [1]])
# Largest code greater than number of levels
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0, 1], [0, 2]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0, 1], [0, 2]])
# Unequal code lengths
with pytest.raises(ValueError):
pd.MultiIndex(levels, [[0, 1], [0]])
with pytest.raises(ValueError):
cudf.MultiIndex(levels, [[0, 1], [0]])
# Didn't pass levels and codes
with pytest.raises(TypeError):
pd.MultiIndex()
with pytest.raises(TypeError):
cudf.MultiIndex()
# Didn't pass non zero levels and codes
with pytest.raises(ValueError):
pd.MultiIndex([], [])
with pytest.raises(ValueError):
cudf.MultiIndex([], [])
def test_multiindex_construction():
levels = [["a", "b"], ["c", "d"]]
codes = [[0, 1], [1, 0]]
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels, codes)
assert_eq(pmi, mi)
pmi = pd.MultiIndex(levels, codes)
mi = cudf.MultiIndex(levels=levels, codes=codes)
assert_eq(pmi, mi)
def test_multiindex_types():
codes = [[0, 1], [1, 0]]
levels = [[0, 1], [2, 3]]
pmi = | pd.MultiIndex(levels, codes) | pandas.MultiIndex |
import math
import os
from collections import OrderedDict
import pandas as pd
import plotly
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from scipy.optimize import minimize
from stravalib import unithelper
from app_tools import *
def get_training_data(client, activities, get_cals=True, before=datetime.date.today()):
race_day = before.replace(hour=0, minute=0, second=0, microsecond=0)
if get_cals:
all_days_before = [(a.start_date_local.date() - race_day.date()).days for a in activities]
all_cals = [client.get_activity(id).calories for id in [a.id for a in activities]]
cum_cals = np.cumsum(all_cals)
else:
all_days_before, cum_cals = None, None
runs = [act for act in activities if act.type == 'Run']
runs = [r for r in runs if
unithelper.miles(r.distance).num > 2 and unithelper.miles_per_hour(r.average_speed).num > 4]
dates = [r.start_date_local.date() for r in runs]
days_before = [(r.start_date_local.date() - race_day.date()).days for r in runs]
dist = [unithelper.miles(r.distance).num for r in runs]
cum = np.cumsum(dist)
pace = [60. / unithelper.miles_per_hour(r.average_speed).num for r in runs] # min/mile
speed = [60 / p for p in pace] # mph
# igood = [i for i in np.arange(len(dist)) if (speed[i] > 4 or dist[i] > 5)]
# days_before, dist, cum, pace, speed = days_before[igood], dist[igood], cum[igood], pace[igood], speed[igood]
return days_before, dist, cum, pace, speed, all_days_before, cum_cals, dates
def create_calbytype_fig(client, activities, before, img_path):
race_day = before.replace(hour=0, minute=0, second=0, microsecond=0)
days_before = np.array([(a.start_date_local.date() - race_day.date()).days for a in activities])
cals = np.array([client.get_activity(id).calories for id in [a.id for a in activities]])
type = np.array([a.type for a in activities])
calbytype_fig = make_subplots(rows=2, cols=1, vertical_spacing=.05, shared_xaxes=True)
current_day_of_week = race_day.weekday() # 0=Monday=Start of training week
cols = plotly.colors.DEFAULT_PLOTLY_COLORS
for i, typ in enumerate(np.unique(type)):
typecals = np.zeros_like(cals)
typecals[type == typ] = cals[type == typ]
calbytype_fig.add_trace(
go.Scatter(x=days_before, y=np.cumsum(typecals), mode='lines', line=dict(color=cols[i]),
showlegend=False, ), row=1, col=1)
calbytype_fig.add_trace(
go.Scatter(x=days_before[type == typ], y=np.cumsum(typecals)[type == typ], mode='markers',
marker=dict(color=cols[i]), showlegend=False), row=1, col=1)
calbytype_fig.add_trace(
go.Histogram(x=days_before[type == typ], name=typ,
xbins=dict(start=-7 * 18 - current_day_of_week, end=7 - current_day_of_week, size=7),
marker_color=cols[i]), row=2, col=1)
calbytype_fig.layout.update(height=750, barmode='stack', # 0.5 in tickvals to place grid between bins
xaxis1=dict(tickmode='array', tickvals=-7 * np.arange(19) - current_day_of_week - .5),
xaxis2=dict(title='Weeks Ago', tickmode='array', tickvals=-7 * np.arange(19),
ticktext=[str(int(i)) for i in abs(-7 * np.arange(19) / 7)]),
yaxis1=dict(title='Calories\n(cumulative)'),
yaxis2=dict(title='Activity Type Count'))
calbytype_fig.update_yaxes(automargin=True)
calbytype_fig.write_html(f'{img_path}calbytype.html')
print('saved calbytype image')
return [calbytype_fig]
def get_past_races(racekeys=None):
races = OrderedDict({})
# trail:
races.update({'Superior 50k 2018': datetime.datetime(2018, 5, 19),
'Driftless 50k 2018': datetime.datetime(2018, 9, 29),
'Superior 50k 2019': datetime.datetime(2019, 5, 18),
'Batona (virtual) 33M 2020': datetime.datetime(2020, 10, 10),
'Dirty German (virtual) 50k 2020': datetime.datetime(2020, 10, 31),
'Stone Mill 50M 2020': datetime.datetime(2020, 11, 14)})
# road:
races.update({'TC Marathon 2014': datetime.datetime(2014, 10, 5),
'Madison Marathon 2014': datetime.datetime(2014, 11, 9),
'TC Marathon 2015': datetime.datetime(2015, 10, 4)})
# remove races not in racekeys
if racekeys is not None:
[races.pop(k) for k in list(races.keys()) if k not in racekeys]
# order chronologically
races = {k: v for k, v in sorted(races.items(), key=lambda item: item[1])}
return races
def manual_tracking_plots(client):
analysis_startdate = datetime.datetime(2020, 9, 12, 0, 0, 0, 0) # hard coded start date
if os.path.isdir('C:/Users/Owner/Dropbox/'):
fn = 'C:/Users/Owner/Dropbox/training_data.xlsx'
elif os.path.isdir('C:/Users/wcapecch/Dropbox/'):
fn = 'C:/Users/wcapecch/Dropbox/training_data.xlsx'
else:
print('cannot locate training data file')
sho = pd.read_excel(fn, sheet_name='shoes', engine='openpyxl')
shoe_options = sho['shoe_options'].values
df = | pd.read_excel(fn, sheet_name='data', engine='openpyxl') | pandas.read_excel |
__author__ = 'saeedamen'
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Seasonality
Does simple seasonality calculations on data.
"""
import numpy
import pandas
from findatapy.timeseries import Calculations, Filter
from findatapy.util.commonman import CommonMan
from findatapy.util.configmanager import ConfigManager
from findatapy.util.loggermanager import LoggerManager
class Seasonality(object):
def __init__(self):
self.config = ConfigManager()
self.logger = LoggerManager().getLogger(__name__)
return
def time_of_day_seasonality(self, data_frame, years = False):
calculations = Calculations()
if years is False:
return calculations.average_by_hour_min_of_day_pretty_output(data_frame)
set_year = set(data_frame.index.year)
year = sorted(list(set_year))
intraday_seasonality = None
commonman = CommonMan()
for i in year:
temp_seasonality = calculations.average_by_hour_min_of_day_pretty_output(data_frame[data_frame.index.year == i])
temp_seasonality.columns = commonman.postfix_list(temp_seasonality.columns.values, " " + str(i))
if intraday_seasonality is None:
intraday_seasonality = temp_seasonality
else:
intraday_seasonality = intraday_seasonality.join(temp_seasonality)
return intraday_seasonality
def bus_day_of_month_seasonality_from_prices(self, data_frame,
month_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], cum = True,
cal = "FX", partition_by_month = True, add_average = False):
return self.bus_day_of_month_seasonality(self, data_frame,
month_list = month_list, cum = cum,
cal = cal, partition_by_month = partition_by_month,
add_average = add_average, price_index = True)
def bus_day_of_month_seasonality(self, data_frame,
month_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], cum = True,
cal = "FX", partition_by_month = True, add_average = False, price_index = False):
calculations = Calculations()
filter = Filter()
if price_index:
data_frame = data_frame.resample('B') # resample into business days
data_frame = calculations.calculate_returns(data_frame)
data_frame.index = | pandas.to_datetime(data_frame.index) | pandas.to_datetime |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level.
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provide expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.expanding.aggregate
DataFrame.rolling.aggregate
DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
def count(self, **kwargs):
return super().count(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
| nv.validate_expanding_func("mean", args, kwargs) | pandas.compat.numpy.function.validate_expanding_func |
# -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
import copy
import inspect
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.metrics import get_scorer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from hypernets.experiment import Experiment
from hypernets.tabular import dask_ex as dex
from hypernets.tabular import drift_detection as dd
from hypernets.tabular.cache import cache
from hypernets.tabular.data_cleaner import DataCleaner
from hypernets.tabular.ensemble import GreedyEnsemble, DaskGreedyEnsemble
from hypernets.tabular.feature_importance import permutation_importance_batch, select_by_feature_importance
from hypernets.tabular.feature_selection import select_by_multicollinearity
from hypernets.tabular.general import general_estimator, general_preprocessor
from hypernets.tabular.lifelong_learning import select_valid_oof
from hypernets.tabular.pseudo_labeling import sample_by_pseudo_labeling
from hypernets.utils import logging, const
logger = logging.get_logger(__name__)
DEFAULT_EVAL_SIZE = 0.3
def _set_log_level(log_level):
logging.set_level(log_level)
# if log_level >= logging.ERROR:
# import logging as pylogging
# pylogging.basicConfig(level=log_level)
class StepNames:
DATA_CLEAN = 'data_clean'
FEATURE_GENERATION = 'feature_generation'
MULITICOLLINEARITY_DETECTION = 'multicollinearity_detection'
DRIFT_DETECTION = 'drift_detection'
FEATURE_IMPORTANCE_SELECTION = 'feature_selection'
SPACE_SEARCHING = 'space_searching'
ENSEMBLE = 'ensemble'
TRAINING = 'training'
PSEUDO_LABELING = 'pseudo_labeling'
FEATURE_RESELECTION = 'feature_reselection'
FINAL_SEARCHING = 'two_stage_searching'
FINAL_ENSEMBLE = 'final_ensemble'
FINAL_TRAINING = 'final_train'
class ExperimentStep(BaseEstimator):
def __init__(self, experiment, name):
super(ExperimentStep, self).__init__()
self.name = name
self.experiment = experiment
# fitted
self.input_features_ = None
self.status_ = None # None(not fit) or True(fit succeed) or False(fit failed)
def step_progress(self, *args, **kwargs):
if self.experiment is not None:
self.experiment.step_progress(*args, **kwargs)
@property
def task(self):
return self.experiment.task if self.experiment is not None else None
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
self.input_features_ = X_train.columns.to_list()
# self.status_ = True
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
raise NotImplemented()
# return X
def is_transform_skipped(self):
return False
def get_fitted_params(self):
return {'input_features': self.input_features_}
# override this to remove 'experiment' from estimator __expr__
@classmethod
def _get_param_names(cls):
params = super()._get_param_names()
return filter(lambda x: x != 'experiment', params)
def __getstate__(self):
state = super().__getstate__()
# Don't pickle experiment
if 'experiment' in state.keys():
state['experiment'] = None
return state
def _repr_df_(self):
init_params = self.get_params()
fitted_params = self.get_fitted_params()
init_df = pd.Series(init_params, name='value').to_frame()
init_df['kind'] = 'settings'
fitted_df = pd.Series(fitted_params, name='value').to_frame()
fitted_df['kind'] = 'fitted'
df = pd.concat([init_df, fitted_df], axis=0)
df['key'] = df.index
df = df.set_index(['kind', 'key'])
return df
def _repr_html_(self):
df = self._repr_df_()
html = f'<h2>{self.name}</h2>{df._repr_html_()}'
return html
class FeatureSelectStep(ExperimentStep):
def __init__(self, experiment, name):
super().__init__(experiment, name)
# fitted
self.selected_features_ = None
def transform(self, X, y=None, **kwargs):
if self.selected_features_ is not None:
if logger.is_debug_enabled():
msg = f'{self.name} transform from {len(X.columns.tolist())} to {len(self.selected_features_)} features'
logger.debug(msg)
X = X[self.selected_features_]
return X
def cache_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
if self.selected_features_ is not None:
features = self.selected_features_
X_train = X_train[features]
if X_test is not None:
X_test = X_test[features]
if X_eval is not None:
X_eval = X_eval[features]
if logger.is_info_enabled():
logger.info(f'{self.name} cache_transform: {len(X_train.columns)} columns kept.')
else:
if logger.is_info_enabled():
logger.info(f'{self.name} cache_transform: {len(X_train.columns)} columns kept (do nothing).')
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def is_transform_skipped(self):
return self.selected_features_ is None
def get_fitted_params(self):
if self.selected_features_ is None:
unselected = None
else:
unselected = list(filter(lambda _: _ not in self.selected_features_, self.input_features_))
return {**super().get_fitted_params(),
'selected_features': self.selected_features_,
'unselected_features': unselected}
class DataCleanStep(FeatureSelectStep):
def __init__(self, experiment, name, data_cleaner_args=None,
cv=False, train_test_split_strategy=None, random_state=None):
super().__init__(experiment, name)
self.data_cleaner_args = data_cleaner_args if data_cleaner_args is not None else {}
self.cv = cv
self.train_test_split_strategy = train_test_split_strategy
self.random_state = random_state
# fitted
self.data_cleaner_ = None
self.detector_ = None
self.data_shapes_ = None
@cache(arg_keys='X_train,y_train,X_test,X_eval,y_eval',
strategy='transform', transformer='cache_transform',
attrs_to_restore='input_features_,selected_features_,data_cleaner_,detector_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
# 1. Clean Data
if self.cv and X_eval is not None and y_eval is not None:
logger.info(f'{self.name} cv enabled, so concat train data and eval data')
X_train = dex.concat_df([X_train, X_eval], axis=0)
y_train = dex.concat_df([y_train, y_eval], axis=0)
X_eval = None
y_eval = None
data_cleaner = DataCleaner(**self.data_cleaner_args)
logger.info(f'{self.name} fit_transform with train data')
X_train, y_train = data_cleaner.fit_transform(X_train, y_train)
self.step_progress('fit_transform train set')
if X_test is not None:
logger.info(f'{self.name} transform test data')
X_test = data_cleaner.transform(X_test)
self.step_progress('transform X_test')
if not self.cv:
if X_eval is None or y_eval is None:
eval_size = self.experiment.eval_size
if self.train_test_split_strategy == 'adversarial_validation' and X_test is not None:
logger.debug('DriftDetector.train_test_split')
detector = dd.DriftDetector()
detector.fit(X_train, X_test)
self.detector_ = detector
X_train, X_eval, y_train, y_eval = \
detector.train_test_split(X_train, y_train, test_size=eval_size)
else:
if self.task == const.TASK_REGRESSION or dex.is_dask_object(X_train):
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=eval_size,
random_state=self.random_state)
else:
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=eval_size,
random_state=self.random_state, stratify=y_train)
if self.task != const.TASK_REGRESSION:
y_train_uniques = set(y_train.unique()) if hasattr(y_train, 'unique') else set(y_train)
y_eval_uniques = set(y_eval.unique()) if hasattr(y_eval, 'unique') else set(y_eval)
assert y_train_uniques == y_eval_uniques, \
'The classes of `y_train` and `y_eval` must be equal. Try to increase eval_size.'
self.step_progress('split into train set and eval set')
else:
X_eval, y_eval = data_cleaner.transform(X_eval, y_eval)
self.step_progress('transform eval set')
selected_features = X_train.columns.to_list()
data_shapes = {'X_train.shape': X_train.shape,
'y_train.shape': y_train.shape,
'X_eval.shape': None if X_eval is None else X_eval.shape,
'y_eval.shape': None if y_eval is None else y_eval.shape,
'X_test.shape': None if X_test is None else X_test.shape
}
if dex.exist_dask_object(X_train, y_train, X_eval, y_eval, X_test):
data_shapes = {k: dex.compute(v) if v is not None else None
for k, v in data_shapes.items()}
logger.info(f'{self.name} keep {len(selected_features)} columns')
self.selected_features_ = selected_features
self.data_cleaner_ = data_cleaner
self.data_shapes_ = data_shapes
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def cache_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
# 1. Clean Data
if self.cv and X_eval is not None and y_eval is not None:
logger.info(f'{self.name} cv enabled, so concat train data and eval data')
X_train = dex.concat_df([X_train, X_eval], axis=0)
y_train = dex.concat_df([y_train, y_eval], axis=0)
X_eval = None
y_eval = None
data_cleaner = self.data_cleaner_
logger.info(f'{self.name} transform train data')
X_train, y_train = data_cleaner.transform(X_train, y_train)
self.step_progress('fit_transform train set')
if X_test is not None:
logger.info(f'{self.name} transform test data')
X_test = data_cleaner.transform(X_test)
self.step_progress('transform X_test')
if not self.cv:
if X_eval is None or y_eval is None:
eval_size = self.experiment.eval_size
if self.train_test_split_strategy == 'adversarial_validation' and X_test is not None:
logger.debug('DriftDetector.train_test_split')
detector = self.detector_
X_train, X_eval, y_train, y_eval = \
detector.train_test_split(X_train, y_train, test_size=eval_size)
else:
if self.task == const.TASK_REGRESSION or dex.is_dask_object(X_train):
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=eval_size,
random_state=self.random_state)
else:
X_train, X_eval, y_train, y_eval = \
dex.train_test_split(X_train, y_train, test_size=eval_size,
random_state=self.random_state, stratify=y_train)
if self.task != const.TASK_REGRESSION:
y_train_uniques = set(y_train.unique()) if hasattr(y_train, 'unique') else set(y_train)
y_eval_uniques = set(y_eval.unique()) if hasattr(y_eval, 'unique') else set(y_eval)
assert y_train_uniques == y_eval_uniques, \
'The classes of `y_train` and `y_eval` must be equal. Try to increase eval_size.'
self.step_progress('split into train set and eval set')
else:
X_eval, y_eval = data_cleaner.transform(X_eval, y_eval)
self.step_progress('transform eval set')
selected_features = self.selected_features_
data_shapes = {'X_train.shape': X_train.shape,
'y_train.shape': y_train.shape,
'X_eval.shape': None if X_eval is None else X_eval.shape,
'y_eval.shape': None if y_eval is None else y_eval.shape,
'X_test.shape': None if X_test is None else X_test.shape
}
if dex.exist_dask_object(X_train, y_train, X_eval, y_eval, X_test):
data_shapes = {k: dex.compute(v) if v is not None else None
for k, v in data_shapes.items()}
logger.info(f'{self.name} keep {len(selected_features)} columns')
self.data_shapes_ = data_shapes
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
return self.data_cleaner_.transform(X, y, **kwargs)
def get_fitted_params(self):
dc = self.data_cleaner_
def get_reason(c):
if dc is None:
return 'unknown'
if dc.dropped_constant_columns_ is not None and c in dc.dropped_constant_columns_:
return 'constant'
elif dc.dropped_idness_columns_ is not None and c in dc.dropped_idness_columns_:
return 'idness'
elif dc.dropped_duplicated_columns_ is not None and c in dc.dropped_duplicated_columns_:
return 'duplicated'
else:
return 'others'
params = super().get_fitted_params()
data_shapes = self.data_shapes_ if self.data_shapes_ is not None else {}
unselected_features = params.get('unselected_features', [])
if dc is not None:
unselected_reason = {f: get_reason(f) for f in unselected_features}
else:
unselected_reason = None
return {**params,
**data_shapes,
'unselected_reason': unselected_reason,
}
class TransformerAdaptorStep(ExperimentStep):
def __init__(self, experiment, name, transformer_creator, **kwargs):
assert transformer_creator is not None
self.transformer_creator = transformer_creator
self.transformer_kwargs = kwargs
super(TransformerAdaptorStep, self).__init__(experiment, name)
# fitted
self.transformer_ = None
@cache(arg_keys='X_train, y_train, X_test, X_eval, y_eval',
strategy='transform', transformer='cache_transform',
attrs_to_restore='transformer_kwargs,transformer_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
logger.info(f'{self.name} fit')
init_kwargs = self.transformer_kwargs.copy()
if 'task' in init_kwargs.keys():
init_kwargs['task'] = self.task
transformer = self.transformer_creator(**init_kwargs)
transformer.fit(X_train, y_train, **kwargs)
self.transformer_ = transformer
return self.cache_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval,
**kwargs)
def cache_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
logger.info(f'{self.name} cache_transform')
transformer = self.transformer_
X_train = transformer.transform(X_train)
if X_eval is not None:
X_eval = transformer.transform(X_eval, y_eval)
if X_test is not None:
X_test = transformer.transform(X_test)
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
logger.info(f'{self.name} transform')
if y is None:
return self.transformer_.transform(X)
else:
return self.transformer_.transform(X, y)
def __getattribute__(self, item):
try:
return super(TransformerAdaptorStep, self).__getattribute__(item)
except AttributeError as e:
transformer_kwargs = self.transformer_kwargs
if item in transformer_kwargs.keys():
return transformer_kwargs[item]
else:
raise e
def __dir__(self):
transformer_kwargs = self.transformer_kwargs
return set(super(TransformerAdaptorStep, self).__dir__()).union(set(transformer_kwargs.keys()))
class FeatureGenerationStep(TransformerAdaptorStep):
def __init__(self, experiment, name,
trans_primitives=None,
continuous_cols=None,
datetime_cols=None,
categories_cols=None,
latlong_cols=None,
text_cols=None,
max_depth=1,
feature_selection_args=None):
from hypernets.tabular.feature_generators import FeatureGenerationTransformer
drop_cols = []
if text_cols is not None:
drop_cols += list(text_cols)
if latlong_cols is not None:
drop_cols += list(latlong_cols)
super(FeatureGenerationStep, self).__init__(experiment, name,
FeatureGenerationTransformer,
trans_primitives=trans_primitives,
fix_input=False,
continuous_cols=continuous_cols,
datetime_cols=datetime_cols,
categories_cols=categories_cols,
latlong_cols=latlong_cols,
text_cols=text_cols,
drop_cols=drop_cols if len(drop_cols) > 0 else None,
max_depth=max_depth,
feature_selection_args=feature_selection_args,
task=None, # fixed by super
)
def get_fitted_params(self):
t = self.transformer_
return {**super(FeatureGenerationStep, self).get_fitted_params(),
'trans_primitives': t.trans_primitives if t is not None else None,
'output_feature_names': t.transformed_feature_names_ if t is not None else None,
}
class MulticollinearityDetectStep(FeatureSelectStep):
def __init__(self, experiment, name):
super().__init__(experiment, name)
# fitted
self.corr_linkage_ = None
@cache(arg_keys='X_train',
strategy='transform', transformer='cache_transform',
attrs_to_restore='input_features_,selected_features_,corr_linkage_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
corr_linkage, remained, dropped = select_by_multicollinearity(X_train)
self.step_progress('calc correlation')
if dropped:
self.selected_features_ = remained
X_train = X_train[self.selected_features_]
if X_eval is not None:
X_eval = X_eval[self.selected_features_]
if X_test is not None:
X_test = X_test[self.selected_features_]
self.step_progress('drop features')
else:
self.selected_features_ = None
self.corr_linkage_ = corr_linkage
logger.info(f'{self.name} drop {len(dropped)} columns, {len(remained)} kept')
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_fitted_params(self):
return {**super().get_fitted_params(),
'corr_linkage': self.corr_linkage_,
}
class DriftDetectStep(FeatureSelectStep):
def __init__(self, experiment, name, remove_shift_variable, variable_shift_threshold,
threshold, remove_size, min_features, num_folds):
super().__init__(experiment, name)
self.remove_shift_variable = remove_shift_variable
self.variable_shift_threshold = variable_shift_threshold
self.threshold = threshold
self.remove_size = remove_size if 1.0 > remove_size > 0 else 0.1
self.min_features = min_features if min_features > 1 else 10
self.num_folds = num_folds if num_folds > 1 else 5
# fitted
self.history_ = None
self.scores_ = None
@cache(arg_keys='X_train,X_test',
strategy='transform', transformer='cache_transform',
attrs_to_restore='input_features_,selected_features_,history_,scores_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
if X_test is not None:
features, history, scores = dd.feature_selection(X_train, X_test,
remove_shift_variable=self.remove_shift_variable,
variable_shift_threshold=self.variable_shift_threshold,
auc_threshold=self.threshold,
min_features=self.min_features,
remove_size=self.remove_size,
cv=self.num_folds)
dropped = set(X_train.columns.to_list()) - set(features)
if dropped:
self.selected_features_ = features
X_train = X_train[features]
X_test = X_test[features]
if X_eval is not None:
X_eval = X_eval[features]
else:
self.selected_features_ = None
self.history_ = history
self.scores_ = scores
logger.info(f'{self.name} drop {len(dropped)} columns, {len(features)} kept')
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_fitted_params(self):
return {**super().get_fitted_params(),
'history': self.history_,
'scores': self.scores_,
}
class FeatureImportanceSelectionStep(FeatureSelectStep):
def __init__(self, experiment, name, strategy, threshold, quantile, number):
super(FeatureImportanceSelectionStep, self).__init__(experiment, name)
self.strategy = strategy
self.threshold = threshold
self.quantile = quantile
self.number = number
# fitted
self.importances_ = None
@cache(arg_keys='X_train,y_train',
strategy='transform', transformer='cache_transform',
attrs_to_restore='input_features_,selected_features_,importances_')
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
preprocessor = general_preprocessor(X_train)
estimator = general_estimator(X_train, task=self.task)
estimator.fit(preprocessor.fit_transform(X_train, y_train), y_train)
importances = estimator.feature_importances_
self.step_progress('training general estimator')
selected, unselected = \
select_by_feature_importance(importances, self.strategy,
threshold=self.threshold,
quantile=self.quantile,
number=self.number)
features = X_train.columns.to_list()
selected_features = [features[i] for i in selected]
unselected_features = [features[i] for i in unselected]
self.step_progress('select by importances')
if unselected_features:
X_train = X_train[selected_features]
if X_eval is not None:
X_eval = X_eval[selected_features]
if X_test is not None:
X_test = X_test[selected_features]
self.step_progress('drop features')
logger.info(f'{self.name} drop {len(unselected_features)} columns, {len(selected_features)} kept')
self.selected_features_ = selected_features if len(unselected_features) > 0 else None
self.importances_ = importances
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_fitted_params(self):
return {**super().get_fitted_params(),
'importances': self.importances_,
}
class PermutationImportanceSelectionStep(FeatureSelectStep):
def __init__(self, experiment, name, scorer, estimator_size,
strategy, threshold, quantile, number):
assert scorer is not None
super().__init__(experiment, name)
self.scorer = scorer
self.estimator_size = estimator_size
self.strategy = strategy
self.threshold = threshold
self.quantile = quantile
self.number = number
# fixed
self.importances_ = None
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
best_trials = hyper_model.get_top_trials(self.estimator_size)
estimators = [hyper_model.load_estimator(trial.model_file) for trial in best_trials]
self.step_progress('load estimators')
if X_eval is None or y_eval is None:
importances = permutation_importance_batch(estimators, X_train, y_train, self.scorer, n_repeats=5)
else:
importances = permutation_importance_batch(estimators, X_eval, y_eval, self.scorer, n_repeats=5)
# feature_index = np.argwhere(importances.importances_mean < self.threshold)
# selected_features = [feat for i, feat in enumerate(X_train.columns.to_list()) if i not in feature_index]
# unselected_features = list(set(X_train.columns.to_list()) - set(selected_features))
selected, unselected = select_by_feature_importance(importances.importances_mean,
self.strategy,
threshold=self.threshold,
quantile=self.quantile,
number=self.number)
if len(selected) > 0:
selected_features = [importances.columns[i] for i in selected]
unselected_features = [importances.columns[i] for i in unselected]
else:
msg = f'{self.name}: All features will be dropped with importance:{importances.importances_mean},' \
f' so drop nothing. Change settings and try again pls.'
logger.warning(msg)
selected_features = importances.columns
unselected_features = []
self.step_progress('calc importance')
if unselected_features:
X_train = X_train[selected_features]
if X_eval is not None:
X_eval = X_eval[selected_features]
if X_test is not None:
X_test = X_test[selected_features]
self.step_progress('drop features')
logger.info(f'{self.name} drop {len(unselected_features)} columns, {len(selected_features)} kept')
self.selected_features_ = selected_features if len(unselected_features) > 0 else None
self.importances_ = importances
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_fitted_params(self):
return {**super().get_fitted_params(),
'importances': self.importances_,
}
class SpaceSearchStep(ExperimentStep):
def __init__(self, experiment, name, cv=False, num_folds=3):
super().__init__(experiment, name)
self.cv = cv
self.num_folds = num_folds
# fitted
self.history_ = None
self.best_reward_ = None
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
if not dex.is_dask_object(X_eval):
kwargs['eval_set'] = (X_eval, y_eval)
model = copy.deepcopy(self.experiment.hyper_model) # copy from original hyper_model instance
model.search(X_train, y_train, X_eval, y_eval, cv=self.cv, num_folds=self.num_folds, **kwargs)
if model.get_best_trial() is None or model.get_best_trial().reward == 0:
raise RuntimeError('Not found available trial, change experiment settings and try again pls.')
logger.info(f'{self.name} best_reward: {model.get_best_trial().reward}')
self.history_ = model.history
self.best_reward_ = model.get_best_trial().reward
return model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
return X
def is_transform_skipped(self):
return True
def get_fitted_params(self):
return {**super().get_fitted_params(),
'best_reward': self.best_reward_,
'history': self.history_,
}
class DaskSpaceSearchStep(SpaceSearchStep):
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
X_train, y_train, X_test, X_eval, y_eval = \
[v.persist() if dex.is_dask_object(v) else v for v in (X_train, y_train, X_test, X_eval, y_eval)]
return super().fit_transform(hyper_model, X_train, y_train, X_test, X_eval, y_eval, **kwargs)
class EstimatorBuilderStep(ExperimentStep):
def __init__(self, experiment, name):
super().__init__(experiment, name)
# fitted
self.estimator_ = None
def transform(self, X, y=None, **kwargs):
return X
def is_transform_skipped(self):
return True
def get_fitted_params(self):
return {**super().get_fitted_params(),
'estimator': self.estimator_,
}
class EnsembleStep(EstimatorBuilderStep):
def __init__(self, experiment, name, scorer=None, ensemble_size=7):
assert ensemble_size > 1
super().__init__(experiment, name)
self.scorer = scorer if scorer is not None else get_scorer('neg_log_loss')
self.ensemble_size = ensemble_size
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
best_trials = hyper_model.get_top_trials(self.ensemble_size)
estimators = [hyper_model.load_estimator(trial.model_file) for trial in best_trials]
ensemble = self.get_ensemble(estimators, X_train, y_train)
if all(['oof' in trial.memo.keys() for trial in best_trials]):
logger.info('ensemble with oofs')
oofs = self.get_ensemble_predictions(best_trials, ensemble)
assert oofs is not None
if hasattr(oofs, 'shape'):
y_, oofs_ = select_valid_oof(y_train, oofs)
ensemble.fit(None, y_, oofs_)
else:
ensemble.fit(None, y_train, oofs)
else:
ensemble.fit(X_eval, y_eval)
self.estimator_ = ensemble
logger.info(f'ensemble info: {ensemble}')
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_ensemble(self, estimators, X_train, y_train):
return GreedyEnsemble(self.task, estimators, scoring=self.scorer, ensemble_size=self.ensemble_size)
def get_ensemble_predictions(self, trials, ensemble):
oofs = None
for i, trial in enumerate(trials):
if 'oof' in trial.memo.keys():
oof = trial.memo['oof']
if oofs is None:
if len(oof.shape) == 1:
oofs = np.zeros((oof.shape[0], len(trials)), dtype=np.float64)
else:
oofs = np.zeros((oof.shape[0], len(trials), oof.shape[-1]), dtype=np.float64)
oofs[:, i] = oof
return oofs
class DaskEnsembleStep(EnsembleStep):
def get_ensemble(self, estimators, X_train, y_train):
if dex.exist_dask_object(X_train, y_train):
predict_kwargs = {}
if all(['use_cache' in inspect.signature(est.predict).parameters.keys()
for est in estimators]):
predict_kwargs['use_cache'] = False
return DaskGreedyEnsemble(self.task, estimators, scoring=self.scorer,
ensemble_size=self.ensemble_size,
predict_kwargs=predict_kwargs)
return super().get_ensemble(estimators, X_train, y_train)
def get_ensemble_predictions(self, trials, ensemble):
if isinstance(ensemble, DaskGreedyEnsemble):
oofs = [trial.memo.get('oof') for trial in trials]
return oofs if any([oof is not None for oof in oofs]) else None
return super().get_ensemble_predictions(trials, ensemble)
class FinalTrainStep(EstimatorBuilderStep):
def __init__(self, experiment, name, retrain_on_wholedata=False):
super().__init__(experiment, name)
self.retrain_on_wholedata = retrain_on_wholedata
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
if self.retrain_on_wholedata:
trial = hyper_model.get_best_trial()
X_all = dex.concat_df([X_train, X_eval], axis=0)
y_all = dex.concat_df([y_train, y_eval], axis=0)
estimator = hyper_model.final_train(trial.space_sample, X_all, y_all, **kwargs)
else:
estimator = hyper_model.load_estimator(hyper_model.get_best_trial().model_file)
self.estimator_ = estimator
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
class PseudoLabelStep(ExperimentStep):
def __init__(self, experiment, name, estimator_builder,
strategy=None, proba_threshold=None, proba_quantile=None, sample_number=None,
resplit=False, random_state=None):
super().__init__(experiment, name)
assert hasattr(estimator_builder, 'estimator_')
self.estimator_builder = estimator_builder
self.strategy = strategy
self.proba_threshold = proba_threshold
self.proba_quantile = proba_quantile
self.sample_number = sample_number
self.resplit = resplit
self.random_state = random_state
self.plot_sample_size = 3000
# fitted
self.test_proba_ = None
self.pseudo_label_stat_ = None
def transform(self, X, y=None, **kwargs):
return X
def is_transform_skipped(self):
return True
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
# build estimator
hyper_model, X_train, y_train, X_test, X_eval, y_eval = \
self.estimator_builder.fit_transform(hyper_model, X_train, y_train, X_test=X_test,
X_eval=X_eval, y_eval=y_eval, **kwargs)
estimator = self.estimator_builder.estimator_
# start here
X_pseudo = None
y_pseudo = None
test_proba = None
pseudo_label_stat = None
if self.task in [const.TASK_BINARY, const.TASK_MULTICLASS] and X_test is not None:
proba = estimator.predict_proba(X_test)
classes = estimator.classes_
X_pseudo, y_pseudo = sample_by_pseudo_labeling(X_test, classes, proba,
strategy=self.strategy,
threshold=self.proba_threshold,
quantile=self.proba_quantile,
number=self.sample_number,
)
pseudo_label_stat = self.stat_pseudo_label(y_pseudo, classes)
test_proba = dex.compute(proba)[0] if dex.is_dask_object(proba) else proba
if test_proba.shape[0] > self.plot_sample_size:
test_proba, _ = dex.train_test_split(test_proba,
train_size=self.plot_sample_size,
random_state=self.random_state)
if X_pseudo is not None:
X_train, y_train, X_eval, y_eval = \
self.merge_pseudo_label(X_train, y_train, X_eval, y_eval, X_pseudo, y_pseudo)
self.test_proba_ = test_proba
self.pseudo_label_stat_ = pseudo_label_stat
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
@staticmethod
def stat_pseudo_label(y_pseudo, classes):
stat = OrderedDict()
if dex.is_dask_object(y_pseudo):
u = dex.da.unique(y_pseudo, return_counts=True)
u = dex.compute(u)[0]
else:
u = np.unique(y_pseudo, return_counts=True)
u = {c: n for c, n in zip(*u)}
for c in classes:
stat[c] = u[c] if c in u.keys() else 0
return stat
def merge_pseudo_label(self, X_train, y_train, X_eval, y_eval, X_pseudo, y_pseudo, **kwargs):
if self.resplit:
x_list = [X_train, X_pseudo]
y_list = [y_train, | pd.Series(y_pseudo) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Analize the SRAG data and export the statistics to generate the figure 1
Needs the filter_SRAG.py csv output to run
"""
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from scipy.stats import norm, binom
def median_estimate(X, CI):
n = len(X)
lmd = binom.ppf((1-CI)/2, n, 0.5)
mmd = binom.ppf((1+CI)/2, n, 0.5)
Xo = np.sort(X)
return np.median(Xo), Xo[int(lmd)], Xo[int(mmd)-1]
def freq_estimate(X, CI):
n = len(X)
P = (X==True).sum()
lmd = binom.ppf((1-CI)/2, n, P/n)
mmd = binom.ppf((1+CI)/2, n, P/n)
return P/n, lmd/n, mmd/n
def create_filter_cont(data, ycol, xcols, fname, col_extra=None, CI=0.95):
lme = norm.ppf((1-CI)/2)
mme = norm.ppf((1+CI)/2)
data = data[~pd.isna(data[ycol])]
saida = {'name': [], 'mean': [], 'CIme_L':[], 'CIme_H':[], 'median':[], \
'CImd_L':[], 'CImd_H':[]}
saida['name'].append('All')
saida['mean'].append(np.mean(data[ycol]))
saida['CIme_L'].append(np.mean(data[ycol]) + lme*np.std(data[ycol])/len(data[ycol]))
saida['CIme_H'].append(np.mean(data[ycol]) + mme*np.std(data[ycol])/len(data[ycol]))
med, cl, ch = median_estimate(data[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
if col_extra != None:
for val_extra in data[col_extra].unique():
data_extra = data[data[col_extra]==val_extra]
saida['name'].append('All_'+str(val_extra))
saida['mean'].append(np.mean(data_extra[ycol]))
saida['CIme_L'].append(np.mean(data_extra[ycol]) + lme*np.std(data_extra[ycol])/len(data_extra[ycol]))
saida['CIme_H'].append(np.mean(data_extra[ycol]) + mme*np.std(data_extra[ycol])/len(data_extra[ycol]))
med, cl, ch = median_estimate(data_extra[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
for xcol in xcols:
for val in data[xcol].unique():
if val is np.nan:
data_fil = data[pd.isna(data[xcol])]
else:
data_fil = data[data[xcol]==val]
data_fil = data_fil[~pd.isna(data_fil[ycol])]
saida['name'].append(str(xcol)+'_'+str(val))
saida['mean'].append(np.mean(data_fil[ycol]))
saida['CIme_L'].append(np.mean(data_fil[ycol]) + lme*np.std(data_fil[ycol])/len(data_fil[ycol]))
saida['CIme_H'].append(np.mean(data_fil[ycol]) + mme*np.std(data_fil[ycol])/len(data_fil[ycol]))
med, cl, ch = median_estimate(data_fil[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
if col_extra != None:
for val_extra in data_fil[col_extra].unique():
data_extra = data_fil[data_fil[col_extra]==val_extra]
saida['name'].append(str(xcol)+'_'+str(val)+'_'+str(val_extra))
saida['mean'].append(np.mean(data_extra[ycol]))
saida['CIme_L'].append(np.mean(data_extra[ycol]) + lme*np.std(data_extra[ycol])/len(data_extra[ycol]))
saida['CIme_H'].append(np.mean(data_extra[ycol]) + mme*np.std(data_extra[ycol])/len(data_extra[ycol]))
med, cl, ch = median_estimate(data_extra[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
saida = pd.DataFrame(saida)
saida.to_csv(fname, index=False)
def create_filter_binary(data, ycol, xcols, fname, CI=0.95):
lme = norm.ppf((1-CI)/2)
mme = norm.ppf((1+CI)/2)
data = data[~pd.isna(data[ycol])]
saida = {'name': [], 'mean': [], 'CIme_L':[], 'CIme_H':[]}
mea, cl, ch = freq_estimate(data[ycol], CI)
saida['name'].append('All')
saida['mean'].append(mea)
saida['CIme_L'].append(cl)
saida['CIme_H'].append(ch)
for xcol in xcols:
for val in data[xcol].unique():
if val is np.nan:
data_fil = data[pd.isna(data[xcol])]
else:
data_fil = data[data[xcol]==val]
data_fil = data_fil[~pd.isna(data_fil[ycol])]
mea, cl, ch = freq_estimate(data_fil[ycol], CI)
saida['name'].append(str(xcol)+'_'+str(val))
saida['mean'].append(mea)
saida['CIme_L'].append(cl)
saida['CIme_H'].append(ch)
saida = pd.DataFrame(saida)
saida.to_csv(fname, index=False)
path = '../Results/'
ref = datetime.date(2019, 12, 31)
max_dur = 90
data0 = pd.read_csv('../Data/SRAG_filtered_morb.csv')
for col in data0.columns:
if (col[:2] == 'DT') or (col[:4] == 'DOSE'):
data0.loc[:,col] = pd.to_datetime(data0[col], format='%Y/%m/%d', errors='coerce')
ages = [0, 18, 30, 40, 50, 65, 75, 85, np.inf]
nsep = len(ages) - 1
data0['AGEGRP'] = ''
for i in range(nsep):
if i == nsep-1:
data0.loc[(data0.NU_IDADE_N>=ages[i]),'AGEGRP'] = 'AG85+'
else:
data0.loc[(data0.NU_IDADE_N>=ages[i])&(data0.NU_IDADE_N<ages[i+1]), 'AGEGRP'] = 'AG{}t{}'.format(ages[i],ages[i+1])
trad_raca = {1:'Branca', 2:'Preta', 3:'Amarela', 4:'Parda', 5:'Indigena'}
data0['RACA'] = data0['CS_RACA'].map(trad_raca)
ibpv = [data0.ibp.quantile(x) for x in [0.0,0.2,0.4,0.6,0.8,1.0]]
names = [ 'BDI' + i for i in ['0', '1', '2', '3', '4']]
data0['BDIGRP'] = ''
for i in range(5):
if i == 4:
data0.loc[(data0.ibp>=ibpv[i]),'BDIGRP'] = names[i]
else:
data0.loc[(data0.ibp>=ibpv[i])&(data0.ibp<ibpv[i+1]), 'BDIGRP'] = names[i]
gr_risco = ['PNEUMOPATI', 'IMUNODEPRE', 'OBESIDADE', 'SIND_DOWN', \
'RENAL', 'NEUROLOGIC', 'DIABETES', 'PUERPERA', 'OUT_MORBI', \
'HEMATOLOGI', 'ASMA', 'HEPATICA', 'CARDIOPATI']
data0['COMOR'] = 'NO'
for risco in gr_risco:
data0.loc[data0[risco]==1,'COMOR'] = 'YES'
data0['MORTE'] = 'OTHER'
data0.loc[data0.EVOLUCAO==2, 'MORTE'] = "MORTE"
data0.loc[data0.EVOLUCAO==1, 'MORTE'] = "CURA"
#removing unknown outcomes
data0 = data0[data0.MORTE !='OTHER']
data0['VACINA'] = (data0.VACINA_COV == 1)
data0['TSM'] = (data0.DT_EVOLUCA-data0.DT_SIN_PRI).dt.days
data0.loc[data0.MORTE!="MORTE", 'TSM'] = np.nan
data0['TSH'] = (data0.DT_INTERNA-data0.DT_SIN_PRI).dt.days
data0['TSI'] = (data0.DT_ENTUTI-data0.DT_SIN_PRI).dt.days
create_filter_cont(data0, 'UTI_dur', ['AGEGRP', 'CS_SEXO', 'RACA', 'BDIGRP', 'VACINA', 'COMOR' ], path + 'ICU_dur.csv', 'MORTE' )
create_filter_cont(data0, 'HOSP_dur', ['AGEGRP', 'CS_SEXO', 'RACA', 'BDIGRP', 'VACINA', 'COMOR' ], path + 'HOSP_dur.csv', 'MORTE' )
create_filter_cont(data0, 'TSM', ['AGEGRP', 'CS_SEXO', 'RACA', 'BDIGRP', 'VACINA', 'COMOR' ], path + 'TimeSintomasMorte.csv', 'MORTE' )
create_filter_cont(data0, 'TSH', ['AGEGRP', 'CS_SEXO', 'RACA', 'BDIGRP', 'VACINA', 'COMOR' ], path + 'TimeSintomasInterna.csv', 'MORTE' )
create_filter_cont(data0, 'TSI', ['AGEGRP', 'CS_SEXO', 'RACA', 'BDIGRP', 'VACINA', 'COMOR' ], path + 'TimeSintomasICU.csv', 'MORTE' )
data_m = data0[data0.MORTE != 'OTHER']
data_m['MORTE'] = (data_m.MORTE=='MORTE')
create_filter_binary(data_m[~pd.isna(data_m.DT_ENTUTI)], 'MORTE', ['AGEGRP', 'CS_SEXO', 'RACA', 'BDIGRP', 'VACINA', 'COMOR' ], path + 'Mortalidade_ICU.csv')
create_filter_binary(data_m[pd.isna(data_m.DT_ENTUTI)], 'MORTE', ['AGEGRP', 'CS_SEXO', 'RACA', 'BDIGRP', 'VACINA', 'COMOR' ], path + 'Mortalidade_HOSP.csv')
data0['THI'] = (data0.DT_ENTUTI-data0.DT_INTERNA).dt.days
data0['DirICU'] = (data0.THI == 0)
create_filter_binary(data0, 'DirICU', ['AGEGRP', 'CS_SEXO', 'RACA', 'BDIGRP', 'VACINA', 'COMOR' ], path + 'Direct_to_ICU.csv')
dataind = data0[data0.THI != 0]
dataind['frac'] = (~ | pd.isna(dataind.DT_ENTUTI) | pandas.isna |
# Module: Regression
# Author: <NAME> <<EMAIL>>
# License: MIT
# Release: PyCaret 2.1
# Last modified : 17/08/2020
def setup(data,
target,
train_size = 0.7,
sampling = True,
sample_estimator = None,
categorical_features = None,
categorical_imputation = 'constant',
ordinal_features = None,
high_cardinality_features = None,
high_cardinality_method = 'frequency',
numeric_features = None,
numeric_imputation = 'mean',
date_features = None,
ignore_features = None,
normalize = False,
normalize_method = 'zscore',
transformation = False,
transformation_method = 'yeo-johnson',
handle_unknown_categorical = True,
unknown_categorical_method = 'least_frequent',
pca = False,
pca_method = 'linear',
pca_components = None,
ignore_low_variance = False,
combine_rare_levels = False,
rare_level_threshold = 0.10,
bin_numeric_features = None,
remove_outliers = False,
outliers_threshold = 0.05,
remove_multicollinearity = False,
multicollinearity_threshold = 0.9,
remove_perfect_collinearity = False, #added in pycaret==2.0.0
create_clusters = False,
cluster_iter = 20,
polynomial_features = False,
polynomial_degree = 2,
trigonometry_features = False,
polynomial_threshold = 0.1,
group_features = None,
group_names = None,
feature_selection = False,
feature_selection_threshold = 0.8,
feature_selection_method = 'classic',
feature_interaction = False,
feature_ratio = False,
interaction_threshold = 0.01,
transform_target = False,
transform_target_method = 'box-cox',
data_split_shuffle = True, #added in pycaret==2.0.0
folds_shuffle = False, #added in pycaret==2.0.0
n_jobs = -1, #added in pycaret==2.0.0
use_gpu = False, #added in pycaret==2.1
html = True, #added in pycaret==2.0.0
session_id = None,
log_experiment = False, #added in pycaret==2.0.0
experiment_name = None, #added in pycaret==2.0.0
log_plots = False, #added in pycaret==2.0.0
log_profile = False, #added in pycaret==2.0.0
log_data = False, #added in pycaret==2.0.0
silent = False,
verbose = True, #added in pycaret==2.0.0
profile = False):
"""
This function initializes the environment in pycaret and creates the transformation
pipeline to prepare the data for modeling and deployment. setup() must called before
executing any other function in pycaret. It takes two mandatory parameters:
dataframe {array-like, sparse matrix} and name of the target column.
All other parameters are optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> experiment_name = setup(data = boston, target = 'medv')
'boston' is a pandas.DataFrame and 'medv' is the name of target column.
Parameters
----------
data : pandas.DataFrame
Shape (n_samples, n_features) where n_samples is the number of samples and n_features is the number of features.
target: string
Name of target column to be passed in as string.
train_size: float, default = 0.7
Size of the training set. By default, 70% of the data will be used for training
and validation. The remaining data will be used for test / hold-out set.
sampling: bool, default = True
When the sample size exceeds 25,000 samples, pycaret will build a base estimator
at various sample sizes from the original dataset. This will return a performance
plot of R2 values at various sample levels, that will assist in deciding the
preferred sample size for modeling. The desired sample size must then be entered
for training and validation in the pycaret environment. When sample_size entered
is less than 1, the remaining dataset (1 - sample) is used for fitting the model
only when finalize_model() is called.
sample_estimator: object, default = None
If None, Linear Regression is used by default.
categorical_features: string, default = None
If the inferred data types are not correct, categorical_features can be used to
overwrite the inferred type. If when running setup the type of 'column1' is
inferred as numeric instead of categorical, then this parameter can be used
to overwrite the type by passing categorical_features = ['column1'].
categorical_imputation: string, default = 'constant'
If missing values are found in categorical features, they will be imputed with
a constant 'not_available' value. The other available option is 'mode' which
imputes the missing value using most frequent value in the training dataset.
ordinal_features: dictionary, default = None
When the data contains ordinal features, they must be encoded differently using
the ordinal_features param. If the data has a categorical variable with values
of 'low', 'medium', 'high' and it is known that low < medium < high, then it can
be passed as ordinal_features = { 'column_name' : ['low', 'medium', 'high'] }.
The list sequence must be in increasing order from lowest to highest.
high_cardinality_features: string, default = None
When the data containts features with high cardinality, they can be compressed
into fewer levels by passing them as a list of column names with high cardinality.
Features are compressed using method defined in high_cardinality_method param.
high_cardinality_method: string, default = 'frequency'
When method set to 'frequency' it will replace the original value of feature
with the frequency distribution and convert the feature into numeric. Other
available method is 'clustering' which performs the clustering on statistical
attribute of data and replaces the original value of feature with cluster label.
The number of clusters is determined using a combination of Calinski-Harabasz and
Silhouette criterion.
numeric_features: string, default = None
If the inferred data types are not correct, numeric_features can be used to
overwrite the inferred type. If when running setup the type of 'column1' is
inferred as a categorical instead of numeric, then this parameter can be used
to overwrite by passing numeric_features = ['column1'].
numeric_imputation: string, default = 'mean'
If missing values are found in numeric features, they will be imputed with the
mean value of the feature. The other available option is 'median' which imputes
the value using the median value in the training dataset.
date_features: string, default = None
If the data has a DateTime column that is not automatically detected when running
setup, this parameter can be used by passing date_features = 'date_column_name'.
It can work with multiple date columns. Date columns are not used in modeling.
Instead, feature extraction is performed and date columns are dropped from the
dataset. If the date column includes a time stamp, features related to time will
also be extracted.
ignore_features: string, default = None
If any feature should be ignored for modeling, it can be passed to the param
ignore_features. The ID and DateTime columns when inferred, are automatically
set to ignore for modeling.
normalize: bool, default = False
When set to True, the feature space is transformed using the normalized_method
param. Generally, linear algorithms perform better with normalized data however,
the results may vary and it is advised to run multiple experiments to evaluate
the benefit of normalization.
normalize_method: string, default = 'zscore'
Defines the method to be used for normalization. By default, normalize method
is set to 'zscore'. The standard zscore is calculated as z = (x - u) / s. The
other available options are:
'minmax' : scales and translates each feature individually such that it is in
the range of 0 - 1.
'maxabs' : scales and translates each feature individually such that the maximal
absolute value of each feature will be 1.0. It does not shift/center
the data, and thus does not destroy any sparsity.
'robust' : scales and translates each feature according to the Interquartile range.
When the dataset contains outliers, robust scaler often gives better
results.
transformation: bool, default = False
When set to True, a power transformation is applied to make the data more normal /
Gaussian-like. This is useful for modeling issues related to heteroscedasticity or
other situations where normality is desired. The optimal parameter for stabilizing
variance and minimizing skewness is estimated through maximum likelihood.
transformation_method: string, default = 'yeo-johnson'
Defines the method for transformation. By default, the transformation method is set
to 'yeo-johnson'. The other available option is 'quantile' transformation. Both
the transformation transforms the feature set to follow a Gaussian-like or normal
distribution. Note that the quantile transformer is non-linear and may distort linear
correlations between variables measured at the same scale.
handle_unknown_categorical: bool, default = True
When set to True, unknown categorical levels in new / unseen data are replaced by
the most or least frequent level as learned in the training data. The method is
defined under the unknown_categorical_method param.
unknown_categorical_method: string, default = 'least_frequent'
Method used to replace unknown categorical levels in unseen data. Method can be
set to 'least_frequent' or 'most_frequent'.
pca: bool, default = False
When set to True, dimensionality reduction is applied to project the data into
a lower dimensional space using the method defined in pca_method param. In
supervised learning pca is generally performed when dealing with high feature
space and memory is a constraint. Note that not all datasets can be decomposed
efficiently using a linear PCA technique and that applying PCA may result in loss
of information. As such, it is advised to run multiple experiments with different
pca_methods to evaluate the impact.
pca_method: string, default = 'linear'
The 'linear' method performs Linear dimensionality reduction using Singular Value
Decomposition. The other available options are:
kernel : dimensionality reduction through the use of RVF kernel.
incremental : replacement for 'linear' pca when the dataset to be decomposed is
too large to fit in memory
pca_components: int/float, default = 0.99
Number of components to keep. if pca_components is a float, it is treated as a
target percentage for information retention. When pca_components is an integer
it is treated as the number of features to be kept. pca_components must be strictly
less than the original number of features in the dataset.
ignore_low_variance: bool, default = False
When set to True, all categorical features with statistically insignificant variances
are removed from the dataset. The variance is calculated using the ratio of unique
values to the number of samples, and the ratio of the most common value to the
frequency of the second most common value.
combine_rare_levels: bool, default = False
When set to True, all levels in categorical features below the threshold defined
in rare_level_threshold param are combined together as a single level. There must be
atleast two levels under the threshold for this to take effect. rare_level_threshold
represents the percentile distribution of level frequency. Generally, this technique
is applied to limit a sparse matrix caused by high numbers of levels in categorical
features.
rare_level_threshold: float, default = 0.1
Percentile distribution below which rare categories are combined. Only comes into
effect when combine_rare_levels is set to True.
bin_numeric_features: list, default = None
When a list of numeric features is passed they are transformed into categorical
features using KMeans, where values in each bin have the same nearest center of a
1D k-means cluster. The number of clusters are determined based on the 'sturges'
method. It is only optimal for gaussian data and underestimates the number of bins
for large non-gaussian datasets.
remove_outliers: bool, default = False
When set to True, outliers from the training data are removed using PCA linear
dimensionality reduction using the Singular Value Decomposition technique.
outliers_threshold: float, default = 0.05
The percentage / proportion of outliers in the dataset can be defined using
the outliers_threshold param. By default, 0.05 is used which means 0.025 of the
values on each side of the distribution's tail are dropped from training data.
remove_multicollinearity: bool, default = False
When set to True, the variables with inter-correlations higher than the threshold
defined under the multicollinearity_threshold param are dropped. When two features
are highly correlated with each other, the feature that is less correlated with
the target variable is dropped.
multicollinearity_threshold: float, default = 0.9
Threshold used for dropping the correlated features. Only comes into effect when
remove_multicollinearity is set to True.
remove_perfect_collinearity: bool, default = False
When set to True, perfect collinearity (features with correlation = 1) is removed
from the dataset, When two features are 100% correlated, one of it is randomly
dropped from the dataset.
create_clusters: bool, default = False
When set to True, an additional feature is created where each instance is assigned
to a cluster. The number of clusters is determined using a combination of
Calinski-Harabasz and Silhouette criterion.
cluster_iter: int, default = 20
Number of iterations used to create a cluster. Each iteration represents cluster
size. Only comes into effect when create_clusters param is set to True.
polynomial_features: bool, default = False
When set to True, new features are created based on all polynomial combinations
that exist within the numeric features in a dataset to the degree defined in
polynomial_degree param.
polynomial_degree: int, default = 2
Degree of polynomial features. For example, if an input sample is two dimensional
and of the form [a, b], the polynomial features with degree = 2 are:
[1, a, b, a^2, ab, b^2].
trigonometry_features: bool, default = False
When set to True, new features are created based on all trigonometric combinations
that exist within the numeric features in a dataset to the degree defined in the
polynomial_degree param.
polynomial_threshold: float, default = 0.1
This is used to compress a sparse matrix of polynomial and trigonometric features.
Polynomial and trigonometric features whose feature importance based on the
combination of Random Forest, AdaBoost and Linear correlation falls within the
percentile of the defined threshold are kept in the dataset. Remaining features
are dropped before further processing.
group_features: list or list of list, default = None
When a dataset contains features that have related characteristics, the group_features
param can be used for statistical feature extraction. For example, if a dataset has
numeric features that are related with each other (i.e 'Col1', 'Col2', 'Col3'), a list
containing the column names can be passed under group_features to extract statistical
information such as the mean, median, mode and standard deviation.
group_names: list, default = None
When group_features is passed, a name of the group can be passed into the group_names
param as a list containing strings. The length of a group_names list must equal to the
length of group_features. When the length doesn't match or the name is not passed, new
features are sequentially named such as group_1, group_2 etc.
feature_selection: bool, default = False
When set to True, a subset of features are selected using a combination of various
permutation importance techniques including Random Forest, Adaboost and Linear
correlation with target variable. The size of the subset is dependent on the
feature_selection_param. Generally, this is used to constrain the feature space
in order to improve efficiency in modeling. When polynomial_features and
feature_interaction are used, it is highly recommended to define the
feature_selection_threshold param with a lower value. Feature selection algorithm
by default is 'classic' but could be 'boruta', which will lead PyCaret to create
use the Boruta selection algorithm.
feature_selection_threshold: float, default = 0.8
Threshold used for feature selection (including newly created polynomial features).
A higher value will result in a higher feature space. It is recommended to do multiple
trials with different values of feature_selection_threshold specially in cases where
polynomial_features and feature_interaction are used. Setting a very low value may be
efficient but could result in under-fitting.
feature_selection_method: str, default = 'classic'
Can be either 'classic' or 'boruta'. Selects the algorithm responsible for
choosing a subset of features. For the 'classic' selection method, PyCaret will use various
permutation importance techniques. For the 'boruta' algorithm, PyCaret will create
an instance of boosted trees model, which will iterate with permutation over all
features and choose the best ones based on the distributions of feature importance.
More in: https://pdfs.semanticscholar.org/85a8/b1d9c52f9f795fda7e12376e751526953f38.pdf%3E
feature_interaction: bool, default = False
When set to True, it will create new features by interacting (a * b) for all numeric
variables in the dataset including polynomial and trigonometric features (if created).
This feature is not scalable and may not work as expected on datasets with large
feature space.
feature_ratio: bool, default = False
When set to True, it will create new features by calculating the ratios (a / b) of all
numeric variables in the dataset. This feature is not scalable and may not work as
expected on datasets with large feature space.
interaction_threshold: bool, default = 0.01
Similar to polynomial_threshold, It is used to compress a sparse matrix of newly
created features through interaction. Features whose importance based on the
combination of Random Forest, AdaBoost and Linear correlation falls within the
percentile of the defined threshold are kept in the dataset. Remaining features
are dropped before further processing.
transform_target: bool, default = False
When set to True, target variable is transformed using the method defined in
transform_target_method param. Target transformation is applied separately from
feature transformations.
transform_target_method: string, default = 'box-cox'
'Box-cox' and 'yeo-johnson' methods are supported. Box-Cox requires input data to
be strictly positive, while Yeo-Johnson supports both positive or negative data.
When transform_target_method is 'box-cox' and target variable contains negative
values, method is internally forced to 'yeo-johnson' to avoid exceptions.
data_split_shuffle: bool, default = True
If set to False, prevents shuffling of rows when splitting data.
folds_shuffle: bool, default = True
If set to False, prevents shuffling of rows when using cross validation.
n_jobs: int, default = -1
The number of jobs to run in parallel (for functions that supports parallel
processing) -1 means using all processors. To run all functions on single processor
set n_jobs to None.
use_gpu: bool, default = False
If set to True, algorithms that supports gpu are trained using gpu.
html: bool, default = True
If set to False, prevents runtime display of monitor. This must be set to False
when using environment that doesnt support HTML.
session_id: int, default = None
If None, a random seed is generated and returned in the Information grid. The
unique number is then distributed as a seed in all functions used during the
experiment. This can be used for later reproducibility of the entire experiment.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on MLFlow server.
experiment_name: str, default = None
Name of experiment for logging. When set to None, 'reg' is by default used as
alias for the experiment name.
log_plots: bool, default = False
When set to True, specific plots are logged in MLflow as a png file. By default,
it is set to False.
log_profile: bool, default = False
When set to True, data profile is also logged on MLflow as a html file. By default,
it is set to False.
log_data: bool, default = False
When set to True, train and test dataset are logged as csv.
silent: bool, default = False
When set to True, confirmation of data types is not required. All preprocessing will
be performed assuming automatically inferred data types. Not recommended for direct use
except for established pipelines.
verbose: Boolean, default = True
Information grid is not printed when verbose is set to False.
profile: bool, default = False
If set to true, a data profile for Exploratory Data Analysis will be displayed
in an interactive HTML report.
Returns
-------
info_grid
Information grid is printed.
environment
This function returns various outputs that are stored in variable
as tuple. They are used by other functions in pycaret.
"""
#exception checking
import sys
from pycaret.utils import __version__
ver = __version__()
import logging
# create logger
global logger
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("PyCaret Regression Module")
logger.info('version ' + str(ver))
logger.info("Initializing setup()")
#generate USI for mlflow tracking
import secrets
global USI
USI = secrets.token_hex(nbytes=2)
logger.info('USI: ' + str(USI))
logger.info("""setup(data={}, target={}, train_size={}, sampling={}, sample_estimator={}, categorical_features={}, categorical_imputation={}, ordinal_features={},
high_cardinality_features={}, high_cardinality_method={}, numeric_features={}, numeric_imputation={}, date_features={}, ignore_features={}, normalize={},
normalize_method={}, transformation={}, transformation_method={}, handle_unknown_categorical={}, unknown_categorical_method={}, pca={}, pca_method={},
pca_components={}, ignore_low_variance={}, combine_rare_levels={}, rare_level_threshold={}, bin_numeric_features={}, remove_outliers={}, outliers_threshold={},
remove_multicollinearity={}, multicollinearity_threshold={}, remove_perfect_collinearity={}, create_clusters={}, cluster_iter={},
polynomial_features={}, polynomial_degree={}, trigonometry_features={}, polynomial_threshold={}, group_features={},
group_names={}, feature_selection={}, feature_selection_threshold={}, feature_interaction={}, feature_ratio={}, interaction_threshold={}, transform_target={},
transform_target_method={}, data_split_shuffle={}, folds_shuffle={}, n_jobs={}, html={}, session_id={}, log_experiment={},
experiment_name={}, log_plots={}, log_profile={}, log_data={}, silent={}, verbose={}, profile={})""".format(\
str(data.shape), str(target), str(train_size), str(sampling), str(sample_estimator), str(categorical_features), str(categorical_imputation), str(ordinal_features),\
str(high_cardinality_features), str(high_cardinality_method), str(numeric_features), str(numeric_imputation), str(date_features), str(ignore_features),\
str(normalize), str(normalize_method), str(transformation), str(transformation_method), str(handle_unknown_categorical), str(unknown_categorical_method), str(pca),\
str(pca_method), str(pca_components), str(ignore_low_variance), str(combine_rare_levels), str(rare_level_threshold), str(bin_numeric_features), str(remove_outliers),\
str(outliers_threshold), str(remove_multicollinearity), str(multicollinearity_threshold), str(remove_perfect_collinearity), str(create_clusters), str(cluster_iter),\
str(polynomial_features), str(polynomial_degree), str(trigonometry_features), str(polynomial_threshold), str(group_features), str(group_names),\
str(feature_selection), str(feature_selection_threshold), str(feature_interaction), str(feature_ratio), str(interaction_threshold), str(transform_target),\
str(transform_target_method), str(data_split_shuffle), str(folds_shuffle), str(n_jobs), str(html), str(session_id),\
str(log_experiment), str(experiment_name), str(log_plots), str(log_profile), str(log_data), str(silent), str(verbose), str(profile)))
#logging environment and libraries
logger.info("Checking environment")
from platform import python_version, platform, python_build, machine
try:
logger.info("python_version: " + str(python_version()))
except:
logger.warning("cannot find platform.python_version")
try:
logger.info("python_build: " + str(python_build()))
except:
logger.warning("cannot find platform.python_build")
try:
logger.info("machine: " + str(machine()))
except:
logger.warning("cannot find platform.machine")
try:
logger.info("platform: " + str(platform()))
except:
logger.warning("cannot find platform.platform")
try:
import psutil
logger.info("Memory: " + str(psutil.virtual_memory()))
logger.info("Physical Core: " + str(psutil.cpu_count(logical=False)))
logger.info("Logical Core: " + str(psutil.cpu_count(logical=True)))
except:
logger.warning("cannot find psutil installation. memory not traceable. Install psutil using pip to enable memory logging. ")
logger.info("Checking libraries")
try:
from pandas import __version__
logger.info("pd==" + str(__version__))
except:
logger.warning("pandas not found")
try:
from numpy import __version__
logger.info("numpy==" + str(__version__))
except:
logger.warning("numpy not found")
try:
from sklearn import __version__
logger.info("sklearn==" + str(__version__))
except:
logger.warning("sklearn not found")
try:
from xgboost import __version__
logger.info("xgboost==" + str(__version__))
except:
logger.warning("xgboost not found")
try:
from lightgbm import __version__
logger.info("lightgbm==" + str(__version__))
except:
logger.warning("lightgbm not found")
try:
from catboost import __version__
logger.info("catboost==" + str(__version__))
except:
logger.warning("catboost not found")
try:
from mlflow.version import VERSION
import warnings
warnings.filterwarnings('ignore')
logger.info("mlflow==" + str(VERSION))
except:
logger.warning("mlflow not found")
#run_time
import datetime, time
runtime_start = time.time()
logger.info("Checking Exceptions")
#checking data type
if hasattr(data,'shape') is False:
sys.exit('(Type Error): data passed must be of type pandas.DataFrame')
#checking train size parameter
if type(train_size) is not float:
sys.exit('(Type Error): train_size parameter only accepts float value.')
#checking sampling parameter
if type(sampling) is not bool:
sys.exit('(Type Error): sampling parameter only accepts True or False.')
#checking sampling parameter
if target not in data.columns:
sys.exit('(Value Error): Target parameter doesnt exist in the data provided.')
#checking session_id
if session_id is not None:
if type(session_id) is not int:
sys.exit('(Type Error): session_id parameter must be an integer.')
#checking sampling parameter
if type(profile) is not bool:
sys.exit('(Type Error): profile parameter only accepts True or False.')
#checking normalize parameter
if type(normalize) is not bool:
sys.exit('(Type Error): normalize parameter only accepts True or False.')
#checking transformation parameter
if type(transformation) is not bool:
sys.exit('(Type Error): transformation parameter only accepts True or False.')
#checking categorical imputation
allowed_categorical_imputation = ['constant', 'mode']
if categorical_imputation not in allowed_categorical_imputation:
sys.exit("(Value Error): categorical_imputation param only accepts 'constant' or 'mode' ")
#ordinal_features
if ordinal_features is not None:
if type(ordinal_features) is not dict:
sys.exit("(Type Error): ordinal_features must be of type dictionary with column name as key and ordered values as list. ")
#ordinal features check
if ordinal_features is not None:
data_cols = data.columns
data_cols = data_cols.drop(target)
ord_keys = ordinal_features.keys()
for i in ord_keys:
if i not in data_cols:
sys.exit("(Value Error) Column name passed as a key in ordinal_features param doesnt exist. ")
for k in ord_keys:
if data[k].nunique() != len(ordinal_features.get(k)):
sys.exit("(Value Error) Levels passed in ordinal_features param doesnt match with levels in data. ")
for i in ord_keys:
value_in_keys = ordinal_features.get(i)
value_in_data = list(data[i].unique().astype(str))
for j in value_in_keys:
if j not in value_in_data:
text = "Column name '" + str(i) + "' doesnt contain any level named '" + str(j) + "'."
sys.exit(text)
#high_cardinality_features
if high_cardinality_features is not None:
if type(high_cardinality_features) is not list:
sys.exit("(Type Error): high_cardinality_features param only accepts name of columns as a list. ")
if high_cardinality_features is not None:
data_cols = data.columns
data_cols = data_cols.drop(target)
for i in high_cardinality_features:
if i not in data_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#checking numeric imputation
allowed_numeric_imputation = ['mean', 'median']
if numeric_imputation not in allowed_numeric_imputation:
sys.exit("(Value Error): numeric_imputation param only accepts 'mean' or 'median' ")
#checking normalize method
allowed_normalize_method = ['zscore', 'minmax', 'maxabs', 'robust']
if normalize_method not in allowed_normalize_method:
sys.exit("(Value Error): normalize_method param only accepts 'zscore', 'minxmax', 'maxabs' or 'robust'. ")
#checking transformation method
allowed_transformation_method = ['yeo-johnson', 'quantile']
if transformation_method not in allowed_transformation_method:
sys.exit("(Value Error): transformation_method param only accepts 'yeo-johnson' or 'quantile' ")
#handle unknown categorical
if type(handle_unknown_categorical) is not bool:
sys.exit('(Type Error): handle_unknown_categorical parameter only accepts True or False.')
#unknown categorical method
unknown_categorical_method_available = ['least_frequent', 'most_frequent']
if unknown_categorical_method not in unknown_categorical_method_available:
sys.exit("(Type Error): unknown_categorical_method only accepts 'least_frequent' or 'most_frequent'.")
#check pca
if type(pca) is not bool:
sys.exit('(Type Error): PCA parameter only accepts True or False.')
#pca method check
allowed_pca_methods = ['linear', 'kernel', 'incremental',]
if pca_method not in allowed_pca_methods:
sys.exit("(Value Error): pca method param only accepts 'linear', 'kernel', or 'incremental'. ")
#pca components check
if pca is True:
if pca_method != 'linear':
if pca_components is not None:
if(type(pca_components)) is not int:
sys.exit("(Type Error): pca_components parameter must be integer when pca_method is not 'linear'. ")
#pca components check 2
if pca is True:
if pca_method != 'linear':
if pca_components is not None:
if pca_components > len(data.columns)-1:
sys.exit("(Type Error): pca_components parameter cannot be greater than original features space.")
#pca components check 3
if pca is True:
if pca_method == 'linear':
if pca_components is not None:
if type(pca_components) is not float:
if pca_components > len(data.columns)-1:
sys.exit("(Type Error): pca_components parameter cannot be greater than original features space or float between 0 - 1.")
#check ignore_low_variance
if type(ignore_low_variance) is not bool:
sys.exit('(Type Error): ignore_low_variance parameter only accepts True or False.')
#check ignore_low_variance
if type(combine_rare_levels) is not bool:
sys.exit('(Type Error): combine_rare_levels parameter only accepts True or False.')
#check rare_level_threshold
if type(rare_level_threshold) is not float:
sys.exit('(Type Error): rare_level_threshold must be a float between 0 and 1. ')
#bin numeric features
if bin_numeric_features is not None:
all_cols = list(data.columns)
all_cols.remove(target)
for i in bin_numeric_features:
if i not in all_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#check transform_target
if type(transform_target) is not bool:
sys.exit('(Type Error): transform_target parameter only accepts True or False.')
#transform_target_method
allowed_transform_target_method = ['box-cox', 'yeo-johnson']
if transform_target_method not in allowed_transform_target_method:
sys.exit("(Value Error): transform_target_method param only accepts 'box-cox' or 'yeo-johnson'. ")
#remove_outliers
if type(remove_outliers) is not bool:
sys.exit('(Type Error): remove_outliers parameter only accepts True or False.')
#outliers_threshold
if type(outliers_threshold) is not float:
sys.exit('(Type Error): outliers_threshold must be a float between 0 and 1. ')
#remove_multicollinearity
if type(remove_multicollinearity) is not bool:
sys.exit('(Type Error): remove_multicollinearity parameter only accepts True or False.')
#multicollinearity_threshold
if type(multicollinearity_threshold) is not float:
sys.exit('(Type Error): multicollinearity_threshold must be a float between 0 and 1. ')
#create_clusters
if type(create_clusters) is not bool:
sys.exit('(Type Error): create_clusters parameter only accepts True or False.')
#cluster_iter
if type(cluster_iter) is not int:
sys.exit('(Type Error): cluster_iter must be a integer greater than 1. ')
#polynomial_features
if type(polynomial_features) is not bool:
sys.exit('(Type Error): polynomial_features only accepts True or False. ')
#polynomial_degree
if type(polynomial_degree) is not int:
sys.exit('(Type Error): polynomial_degree must be an integer. ')
#polynomial_features
if type(trigonometry_features) is not bool:
sys.exit('(Type Error): trigonometry_features only accepts True or False. ')
#polynomial threshold
if type(polynomial_threshold) is not float:
sys.exit('(Type Error): polynomial_threshold must be a float between 0 and 1. ')
#group features
if group_features is not None:
if type(group_features) is not list:
sys.exit('(Type Error): group_features must be of type list. ')
if group_names is not None:
if type(group_names) is not list:
sys.exit('(Type Error): group_names must be of type list. ')
#cannot drop target
if ignore_features is not None:
if target in ignore_features:
sys.exit("(Value Error): cannot drop target column. ")
#feature_selection
if type(feature_selection) is not bool:
sys.exit('(Type Error): feature_selection only accepts True or False. ')
#feature_selection_threshold
if type(feature_selection_threshold) is not float:
sys.exit('(Type Error): feature_selection_threshold must be a float between 0 and 1. ')
#feature_selection_method
if feature_selection_method not in ['boruta', 'classic']:
sys.exit("(Type Error): feature_selection_method must be string 'boruta', 'classic'")
#feature_interaction
if type(feature_interaction) is not bool:
sys.exit('(Type Error): feature_interaction only accepts True or False. ')
#feature_ratio
if type(feature_ratio) is not bool:
sys.exit('(Type Error): feature_ratio only accepts True or False. ')
#interaction_threshold
if type(interaction_threshold) is not float:
sys.exit('(Type Error): interaction_threshold must be a float between 0 and 1. ')
#cannot drop target
if ignore_features is not None:
if target in ignore_features:
sys.exit("(Value Error): cannot drop target column. ")
#forced type check
all_cols = list(data.columns)
all_cols.remove(target)
#categorical
if categorical_features is not None:
for i in categorical_features:
if i not in all_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#numeric
if numeric_features is not None:
for i in numeric_features:
if i not in all_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#date features
if date_features is not None:
for i in date_features:
if i not in all_cols:
sys.exit("(Value Error): Column type forced is either target column or doesn't exist in the dataset.")
#drop features
if ignore_features is not None:
for i in ignore_features:
if i not in all_cols:
sys.exit("(Value Error): Feature ignored is either target column or doesn't exist in the dataset.")
#silent
if type(silent) is not bool:
sys.exit("(Type Error): silent parameter only accepts True or False. ")
#remove_perfect_collinearity
if type(remove_perfect_collinearity) is not bool:
sys.exit('(Type Error): remove_perfect_collinearity parameter only accepts True or False.')
#html
if type(html) is not bool:
sys.exit('(Type Error): html parameter only accepts True or False.')
#folds_shuffle
if type(folds_shuffle) is not bool:
sys.exit('(Type Error): folds_shuffle parameter only accepts True or False.')
#data_split_shuffle
if type(data_split_shuffle) is not bool:
sys.exit('(Type Error): data_split_shuffle parameter only accepts True or False.')
#log_experiment
if type(log_experiment) is not bool:
sys.exit('(Type Error): log_experiment parameter only accepts True or False.')
#log_plots
if type(log_plots) is not bool:
sys.exit('(Type Error): log_plots parameter only accepts True or False.')
#log_data
if type(log_data) is not bool:
sys.exit('(Type Error): log_data parameter only accepts True or False.')
#log_profile
if type(log_profile) is not bool:
sys.exit('(Type Error): log_profile parameter only accepts True or False.')
logger.info("Preloading libraries")
#pre-load libraries
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
import os
#pandas option
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
#global html_param
global html_param
#create html_param
html_param = html
#silent parameter to also set sampling to False
if silent:
sampling = False
logger.info("Preparing display monitor")
#progress bar
if sampling:
max = 10 + 3
else:
max = 3
progress = ipw.IntProgress(value=0, min=0, max=max, step=1 , description='Processing: ')
if verbose:
if html_param:
display(progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Loading Dependencies' ],
['ETC' , '. . . . . . . . . . . . . . . . . .', 'Calculating ETC'] ],
columns=['', ' ', ' ']).set_index('')
if verbose:
if html_param:
display(monitor, display_id = 'monitor')
logger.info("Importing libraries")
#general dependencies
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import metrics
import random
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
#setting sklearn config to print all parameters including default
import sklearn
sklearn.set_config(print_changed_only=False)
#define highlight function for function grid to display
def highlight_max(s):
is_max = s == True
return ['background-color: yellow' if v else '' for v in is_max]
#cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
logger.info("Declaring global variables")
#declaring global variables to be accessed by other functions
global X, y, X_train, X_test, y_train, y_test, seed, prep_pipe, target_inverse_transformer, experiment__,\
preprocess, folds_shuffle_param, n_jobs_param, create_model_container, master_model_container,\
display_container, exp_name_log, logging_param, log_plots_param, data_before_preprocess, target_param,\
gpu_param
logger.info("Copying data for preprocessing")
#copy original data for pandas profiler
data_before_preprocess = data.copy()
#generate seed to be used globally
if session_id is None:
seed = random.randint(150,9000)
else:
seed = session_id
"""
preprocessing starts here
"""
monitor.iloc[1,1:] = 'Preparing Data for Modeling'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
#define parameters for preprocessor
logger.info("Declaring preprocessing parameters")
#categorical features
if categorical_features is None:
cat_features_pass = []
else:
cat_features_pass = categorical_features
#numeric features
if numeric_features is None:
numeric_features_pass = []
else:
numeric_features_pass = numeric_features
#drop features
if ignore_features is None:
ignore_features_pass = []
else:
ignore_features_pass = ignore_features
#date features
if date_features is None:
date_features_pass = []
else:
date_features_pass = date_features
#categorical imputation strategy
if categorical_imputation == 'constant':
categorical_imputation_pass = 'not_available'
elif categorical_imputation == 'mode':
categorical_imputation_pass = 'most frequent'
#transformation method strategy
if transformation_method == 'yeo-johnson':
trans_method_pass = 'yj'
elif transformation_method == 'quantile':
trans_method_pass = 'quantile'
#pass method
if pca_method == 'linear':
pca_method_pass = 'pca_liner'
elif pca_method == 'kernel':
pca_method_pass = 'pca_kernal'
elif pca_method == 'incremental':
pca_method_pass = 'incremental'
elif pca_method == 'pls':
pca_method_pass = 'pls'
#pca components
if pca is True:
if pca_components is None:
if pca_method == 'linear':
pca_components_pass = 0.99
else:
pca_components_pass = int((len(data.columns)-1)*0.5)
else:
pca_components_pass = pca_components
else:
pca_components_pass = 0.99
if bin_numeric_features is None:
apply_binning_pass = False
features_to_bin_pass = []
else:
apply_binning_pass = True
features_to_bin_pass = bin_numeric_features
#trignometry
if trigonometry_features is False:
trigonometry_features_pass = []
else:
trigonometry_features_pass = ['sin', 'cos', 'tan']
#group features
#=============#
#apply grouping
if group_features is not None:
apply_grouping_pass = True
else:
apply_grouping_pass = False
#group features listing
if apply_grouping_pass is True:
if type(group_features[0]) is str:
group_features_pass = []
group_features_pass.append(group_features)
else:
group_features_pass = group_features
else:
group_features_pass = [[]]
#group names
if apply_grouping_pass is True:
if (group_names is None) or (len(group_names) != len(group_features_pass)):
group_names_pass = list(np.arange(len(group_features_pass)))
group_names_pass = ['group_' + str(i) for i in group_names_pass]
else:
group_names_pass = group_names
else:
group_names_pass = []
#feature interactions
if feature_interaction or feature_ratio:
apply_feature_interactions_pass = True
else:
apply_feature_interactions_pass = False
interactions_to_apply_pass = []
if feature_interaction:
interactions_to_apply_pass.append('multiply')
if feature_ratio:
interactions_to_apply_pass.append('divide')
#unknown categorical
if unknown_categorical_method == 'least_frequent':
unknown_categorical_method_pass = 'least frequent'
elif unknown_categorical_method == 'most_frequent':
unknown_categorical_method_pass = 'most frequent'
#ordinal_features
if ordinal_features is not None:
apply_ordinal_encoding_pass = True
else:
apply_ordinal_encoding_pass = False
if apply_ordinal_encoding_pass is True:
ordinal_columns_and_categories_pass = ordinal_features
else:
ordinal_columns_and_categories_pass = {}
if high_cardinality_features is not None:
apply_cardinality_reduction_pass = True
else:
apply_cardinality_reduction_pass = False
if high_cardinality_method == 'frequency':
cardinal_method_pass = 'count'
elif high_cardinality_method == 'clustering':
cardinal_method_pass = 'cluster'
if apply_cardinality_reduction_pass:
cardinal_features_pass = high_cardinality_features
else:
cardinal_features_pass = []
if silent:
display_dtypes_pass = False
else:
display_dtypes_pass = True
#transform target method
if transform_target_method == 'box-cox':
transform_target_method_pass = 'bc'
elif transform_target_method == 'yeo-johnson':
transform_target_method_pass = 'yj'
logger.info("Importing preprocessing module")
#import library
import pycaret.preprocess as preprocess
logger.info("Creating preprocessing pipeline")
data = preprocess.Preprocess_Path_One(train_data = data,
target_variable = target,
categorical_features = cat_features_pass,
apply_ordinal_encoding = apply_ordinal_encoding_pass,
ordinal_columns_and_categories = ordinal_columns_and_categories_pass,
apply_cardinality_reduction = apply_cardinality_reduction_pass,
cardinal_method = cardinal_method_pass,
cardinal_features = cardinal_features_pass,
numerical_features = numeric_features_pass,
time_features = date_features_pass,
features_todrop = ignore_features_pass,
numeric_imputation_strategy = numeric_imputation,
categorical_imputation_strategy = categorical_imputation_pass,
scale_data = normalize,
scaling_method = normalize_method,
Power_transform_data = transformation,
Power_transform_method = trans_method_pass,
apply_untrained_levels_treatment= handle_unknown_categorical,
untrained_levels_treatment_method = unknown_categorical_method_pass,
apply_pca = pca,
pca_method = pca_method_pass,
pca_variance_retained_or_number_of_components = pca_components_pass,
apply_zero_nearZero_variance = ignore_low_variance,
club_rare_levels = combine_rare_levels,
rara_level_threshold_percentage = rare_level_threshold,
apply_binning = apply_binning_pass,
features_to_binn = features_to_bin_pass,
remove_outliers = remove_outliers,
outlier_contamination_percentage = outliers_threshold,
outlier_methods = ['pca'], #pca hardcoded
remove_multicollinearity = remove_multicollinearity,
maximum_correlation_between_features = multicollinearity_threshold,
remove_perfect_collinearity = remove_perfect_collinearity,
cluster_entire_data = create_clusters,
range_of_clusters_to_try = cluster_iter,
apply_polynomial_trigonometry_features = polynomial_features,
max_polynomial = polynomial_degree,
trigonometry_calculations = trigonometry_features_pass,
top_poly_trig_features_to_select_percentage = polynomial_threshold,
apply_grouping = apply_grouping_pass,
features_to_group_ListofList = group_features_pass,
group_name = group_names_pass,
apply_feature_selection = feature_selection,
feature_selection_top_features_percentage = feature_selection_threshold,
feature_selection_method = feature_selection_method,
apply_feature_interactions = apply_feature_interactions_pass,
feature_interactions_to_apply = interactions_to_apply_pass,
feature_interactions_top_features_to_select_percentage=interaction_threshold,
display_types = display_dtypes_pass,
target_transformation = transform_target,
target_transformation_method = transform_target_method_pass,
random_state = seed)
progress.value += 1
logger.info("Preprocessing pipeline created successfully")
if hasattr(preprocess.dtypes, 'replacement'):
label_encoded = preprocess.dtypes.replacement
label_encoded = str(label_encoded).replace("'", '')
label_encoded = str(label_encoded).replace("{", '')
label_encoded = str(label_encoded).replace("}", '')
else:
label_encoded = 'None'
try:
res_type = ['quit','Quit','exit','EXIT','q','Q','e','E','QUIT','Exit']
res = preprocess.dtypes.response
if res in res_type:
sys.exit("(Process Exit): setup has been interupted with user command 'quit'. setup must rerun." )
except:
pass
#save prep pipe
prep_pipe = preprocess.pipe
#save target inverse transformer
try:
target_inverse_transformer = preprocess.pt_target.p_transform_target
except:
target_inverse_transformer = None
logger.info("No inverse transformer found")
logger.info("Creating grid variables")
#generate values for grid show
missing_values = data_before_preprocess.isna().sum().sum()
if missing_values > 0:
missing_flag = True
else:
missing_flag = False
if normalize is True:
normalize_grid = normalize_method
else:
normalize_grid = 'None'
if transformation is True:
transformation_grid = transformation_method
else:
transformation_grid = 'None'
if pca is True:
pca_method_grid = pca_method
else:
pca_method_grid = 'None'
if pca is True:
pca_components_grid = pca_components_pass
else:
pca_components_grid = 'None'
if combine_rare_levels:
rare_level_threshold_grid = rare_level_threshold
else:
rare_level_threshold_grid = 'None'
if bin_numeric_features is None:
numeric_bin_grid = False
else:
numeric_bin_grid = True
if remove_outliers is False:
outliers_threshold_grid = None
else:
outliers_threshold_grid = outliers_threshold
if remove_multicollinearity is False:
multicollinearity_threshold_grid = None
else:
multicollinearity_threshold_grid = multicollinearity_threshold
if create_clusters is False:
cluster_iter_grid = None
else:
cluster_iter_grid = cluster_iter
if polynomial_features:
polynomial_degree_grid = polynomial_degree
else:
polynomial_degree_grid = None
if polynomial_features or trigonometry_features:
polynomial_threshold_grid = polynomial_threshold
else:
polynomial_threshold_grid = None
if feature_selection:
feature_selection_threshold_grid = feature_selection_threshold
else:
feature_selection_threshold_grid = None
if feature_interaction or feature_ratio:
interaction_threshold_grid = interaction_threshold
else:
interaction_threshold_grid = None
if ordinal_features is not None:
ordinal_features_grid = True
else:
ordinal_features_grid = False
if handle_unknown_categorical:
unknown_categorical_method_grid = unknown_categorical_method
else:
unknown_categorical_method_grid = None
if group_features is not None:
group_features_grid = True
else:
group_features_grid = False
if high_cardinality_features is not None:
high_cardinality_features_grid = True
else:
high_cardinality_features_grid = False
if high_cardinality_features_grid:
high_cardinality_method_grid = high_cardinality_method
else:
high_cardinality_method_grid = None
learned_types = preprocess.dtypes.learent_dtypes
learned_types.drop(target, inplace=True)
float_type = 0
cat_type = 0
for i in preprocess.dtypes.learent_dtypes:
if 'float' in str(i):
float_type += 1
elif 'object' in str(i):
cat_type += 1
elif 'int' in str(i):
float_type += 1
#target transformation method
if transform_target is False:
transform_target_method_grid = None
else:
transform_target_method_grid = preprocess.pt_target.function_to_apply
"""
preprocessing ends here
"""
#reset pandas option
pd.reset_option("display.max_rows")
pd.reset_option("display.max_columns")
logger.info("Creating global containers")
#create an empty list for pickling later.
experiment__ = []
#create folds_shuffle_param
folds_shuffle_param = folds_shuffle
#create n_jobs_param
n_jobs_param = n_jobs
#create create_model_container
create_model_container = []
#create master_model_container
master_model_container = []
#create display container
display_container = []
#create logging parameter
logging_param = log_experiment
#create exp_name_log param incase logging is False
exp_name_log = 'no_logging'
#create an empty log_plots_param
if log_plots:
log_plots_param = True
else:
log_plots_param = False
# create target param
target_param = target
# create gpu param
gpu_param = use_gpu
#sample estimator
if sample_estimator is None:
model = LinearRegression(n_jobs=n_jobs_param)
else:
model = sample_estimator
model_name = str(model).split("(")[0]
if 'CatBoostRegressor' in model_name:
model_name = 'CatBoostRegressor'
#creating variables to be used later in the function
X = data.drop(target,axis=1)
y = data[target]
progress.value += 1
if sampling is True and data.shape[0] > 25000: #change back to 25000
split_perc = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99]
split_perc_text = ['10%','20%','30%','40%','50%','60%', '70%', '80%', '90%', '100%']
split_perc_tt = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99]
split_perc_tt_total = []
split_percent = []
metric_results = []
metric_name = []
counter = 0
for i in split_perc:
progress.value += 1
t0 = time.time()
'''
MONITOR UPDATE STARTS
'''
perc_text = split_perc_text[counter]
monitor.iloc[1,1:] = 'Fitting Model on ' + perc_text + ' sample'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
X_, X__, y_, y__ = train_test_split(X, y, test_size=1-i, random_state=seed, shuffle=data_split_shuffle)
X_train, X_test, y_train, y_test = train_test_split(X_, y_, test_size=1-train_size, random_state=seed, shuffle=data_split_shuffle)
model.fit(X_train,y_train)
pred_ = model.predict(X_test)
r2 = metrics.r2_score(y_test,pred_)
metric_results.append(r2)
metric_name.append('R2')
split_percent.append(i)
t1 = time.time()
'''
Time calculation begins
'''
tt = t1 - t0
total_tt = tt / i
split_perc_tt.pop(0)
for remain in split_perc_tt:
ss = total_tt * remain
split_perc_tt_total.append(ss)
ttt = sum(split_perc_tt_total) / 60
ttt = np.around(ttt, 2)
if ttt < 1:
ttt = str(np.around((ttt * 60), 2))
ETC = ttt + ' Seconds Remaining'
else:
ttt = str (ttt)
ETC = ttt + ' Minutes Remaining'
monitor.iloc[2,1:] = ETC
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
Time calculation Ends
'''
split_perc_tt_total = []
counter += 1
model_results = pd.DataFrame({'Sample' : split_percent, 'Metric' : metric_results, 'Metric Name': metric_name})
fig = px.line(model_results, x='Sample', y='Metric', color='Metric Name', line_shape='linear', range_y = [0,1])
fig.update_layout(plot_bgcolor='rgb(245,245,245)')
title= str(model_name) + ' Metric and Sample %'
fig.update_layout(title={'text': title, 'y':0.95,'x':0.45,'xanchor': 'center','yanchor': 'top'})
fig.show()
monitor.iloc[1,1:] = 'Waiting for input'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
print('Please Enter the sample % of data you would like to use for modeling. Example: Enter 0.3 for 30%.')
print('Press Enter if you would like to use 100% of the data.')
print(' ')
sample_size = input("Sample Size: ")
if sample_size == '' or sample_size == '1':
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1-train_size, random_state=seed, shuffle=data_split_shuffle)
else:
sample_n = float(sample_size)
X_selected, X_discard, y_selected, y_discard = train_test_split(X, y, test_size=1-sample_n,
random_state=seed, shuffle=data_split_shuffle)
X_train, X_test, y_train, y_test = train_test_split(X_selected, y_selected, test_size=1-train_size,
random_state=seed, shuffle=data_split_shuffle)
else:
monitor.iloc[1,1:] = 'Splitting Data'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1-train_size, random_state=seed, shuffle=data_split_shuffle)
progress.value += 1
'''
Final display Starts
'''
clear_output()
if verbose:
print(' ')
if profile:
print('Setup Succesfully Completed. Loading Profile Now... Please Wait!')
else:
if verbose:
print('Setup Succesfully Completed.')
functions = pd.DataFrame ( [ ['session_id', seed ],
['Transform Target ', transform_target],
['Transform Target Method', transform_target_method_grid],
['Original Data', data_before_preprocess.shape ],
['Missing Values ', missing_flag],
['Numeric Features ', str(float_type) ],
['Categorical Features ', str(cat_type) ],
['Ordinal Features ', ordinal_features_grid],
['High Cardinality Features ', high_cardinality_features_grid],
['High Cardinality Method ', high_cardinality_method_grid],
['Sampled Data', '(' + str(X_train.shape[0] + X_test.shape[0]) + ', ' + str(data_before_preprocess.shape[1]) + ')' ],
['Transformed Train Set', X_train.shape ],
['Transformed Test Set',X_test.shape ],
['Numeric Imputer ', numeric_imputation],
['Categorical Imputer ', categorical_imputation],
['Normalize ', normalize ],
['Normalize Method ', normalize_grid ],
['Transformation ', transformation ],
['Transformation Method ', transformation_grid ],
['PCA ', pca],
['PCA Method ', pca_method_grid],
['PCA Components ', pca_components_grid],
['Ignore Low Variance ', ignore_low_variance],
['Combine Rare Levels ', combine_rare_levels],
['Rare Level Threshold ', rare_level_threshold_grid],
['Numeric Binning ', numeric_bin_grid],
['Remove Outliers ', remove_outliers],
['Outliers Threshold ', outliers_threshold_grid],
['Remove Multicollinearity ', remove_multicollinearity],
['Multicollinearity Threshold ', multicollinearity_threshold_grid],
['Clustering ', create_clusters],
['Clustering Iteration ', cluster_iter_grid],
['Polynomial Features ', polynomial_features],
['Polynomial Degree ', polynomial_degree_grid],
['Trignometry Features ', trigonometry_features],
['Polynomial Threshold ', polynomial_threshold_grid],
['Group Features ', group_features_grid],
['Feature Selection ', feature_selection],
['Features Selection Threshold ', feature_selection_threshold_grid],
['Feature Interaction ', feature_interaction],
['Feature Ratio ', feature_ratio],
['Interaction Threshold ', interaction_threshold_grid],
], columns = ['Description', 'Value'] )
functions_ = functions.style.apply(highlight_max)
if verbose:
if html_param:
display(functions_)
else:
print(functions_.data)
if profile:
try:
import pandas_profiling
pf = pandas_profiling.ProfileReport(data_before_preprocess)
clear_output()
display(pf)
except:
print('Data Profiler Failed. No output to show, please continue with Modeling.')
'''
Final display Ends
'''
#log into experiment
experiment__.append(('Regression Setup Config', functions))
experiment__.append(('X_training Set', X_train))
experiment__.append(('y_training Set', y_train))
experiment__.append(('X_test Set', X_test))
experiment__.append(('y_test Set', y_test))
experiment__.append(('Transformation Pipeline', prep_pipe))
try:
experiment__.append(('Target Inverse Transformer', target_inverse_transformer))
except:
pass
#end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
if logging_param:
logger.info("Logging experiment in MLFlow")
import mlflow
from pathlib import Path
if experiment_name is None:
exp_name_ = 'reg-default-name'
else:
exp_name_ = experiment_name
URI = secrets.token_hex(nbytes=4)
exp_name_log = exp_name_
try:
mlflow.create_experiment(exp_name_log)
except:
pass
#mlflow logging
mlflow.set_experiment(exp_name_log)
run_name_ = 'Session Initialized ' + str(USI)
with mlflow.start_run(run_name=run_name_) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
k = functions.copy()
k.set_index('Description',drop=True,inplace=True)
kdict = k.to_dict()
params = kdict.get('Value')
mlflow.log_params(params)
#set tag of compare_models
mlflow.set_tag("Source", "setup")
import secrets
URI = secrets.token_hex(nbytes=4)
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
# Log the transformation pipeline
logger.info("SubProcess save_model() called ==================================")
save_model(prep_pipe, 'Transformation Pipeline', verbose=False)
logger.info("SubProcess save_model() end ==================================")
mlflow.log_artifact('Transformation Pipeline' + '.pkl')
os.remove('Transformation Pipeline.pkl')
# Log pandas profile
if log_profile:
import pandas_profiling
pf = pandas_profiling.ProfileReport(data_before_preprocess)
pf.to_file("Data Profile.html")
mlflow.log_artifact("Data Profile.html")
os.remove("Data Profile.html")
clear_output()
display(functions_)
# Log training and testing set
if log_data:
X_train.join(y_train).to_csv('Train.csv')
X_test.join(y_test).to_csv('Test.csv')
mlflow.log_artifact("Train.csv")
mlflow.log_artifact("Test.csv")
os.remove('Train.csv')
os.remove('Test.csv')
logger.info("create_model_container: " + str(len(create_model_container)))
logger.info("master_model_container: " + str(len(master_model_container)))
logger.info("display_container: " + str(len(display_container)))
logger.info("setup() succesfully completed......................................")
return X, y, X_train, X_test, y_train, y_test, seed, prep_pipe, target_inverse_transformer,\
experiment__, folds_shuffle_param, n_jobs_param, html_param, create_model_container,\
master_model_container, display_container, exp_name_log, logging_param, log_plots_param, USI,\
data_before_preprocess, target_param
def compare_models(exclude = None,
include = None, #added in pycaret==2.0.0
fold = 10,
round = 4,
sort = 'R2',
n_select = 1, #added in pycaret==2.0.0
budget_time = 0, #added in pycaret==2.1.0
turbo = True,
verbose = True): #added in pycaret==2.0.0
"""
This function train all the models available in the model library and scores them
using Kfold Cross Validation. The output prints a score grid with MAE, MSE
RMSE, R2, RMSLE and MAPE (averaged accross folds), determined by fold parameter.
This function returns the best model based on metric defined in sort parameter.
To select top N models, use n_select parameter that is set to 1 by default.
Where n_select parameter > 1, it will return a list of trained model objects.
When turbo is set to True ('kr', 'ard' and 'mlp') are excluded due to longer
training times. By default turbo param is set to True.
Example
--------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> experiment_name = setup(data = boston, target = 'medv')
>>> best_model = compare_models()
This will return the averaged score grid of all models except 'kr', 'ard'
and 'mlp'. When turbo param is set to False, all models including 'kr',
'ard' and 'mlp' are used, but this may result in longer training times.
>>> best_model = compare_models(exclude = ['knn','gbr'], turbo = False)
This will return a comparison of all models except K Nearest Neighbour and
Gradient Boosting Regressor.
>>> best_model = compare_models(exclude = ['knn','gbr'] , turbo = True)
This will return a comparison of all models except K Nearest Neighbour,
Gradient Boosting Regressor, Kernel Ridge Regressor, Automatic Relevance
Determinant and Multi Level Perceptron.
Parameters
----------
exclude: list of strings, default = None
In order to omit certain models from the comparison model ID's can be passed as
a list of strings in exclude param.
include: list of strings, default = None
In order to run only certain models for the comparison, the model ID's can be
passed as a list of strings in include param.
fold: integer, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
round: integer, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
sort: string, default = 'MAE'
The scoring measure specified is used for sorting the average score grid
Other options are 'MAE', 'MSE', 'RMSE', 'R2', 'RMSLE' and 'MAPE'.
n_select: int, default = 1
Number of top_n models to return. use negative argument for bottom selection.
for example, n_select = -3 means bottom 3 models.
budget_time: int or float, default = 0
If set above 0, will terminate execution of the function after budget_time minutes have
passed and return results up to that point.
turbo: Boolean, default = True
When turbo is set to True, it excludes estimators that have longer
training times.
verbose: Boolean, default = True
Score grid is not printed when verbose is set to False.
Returns
-------
score_grid
A table containing the scores of the model across the kfolds.
Scoring metrics used are MAE, MSE, RMSE, R2, RMSLE and MAPE
Mean and standard deviation of the scores across the folds is
also returned.
Warnings
--------
- compare_models() though attractive, might be time consuming with large
datasets. By default turbo is set to True, which excludes models that
have longer training times. Changing turbo parameter to False may result
in very high training times with datasets where number of samples exceed
10,000.
"""
'''
ERROR HANDLING STARTS HERE
'''
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing compare_models()")
logger.info("""compare_models(exclude={}, include={}, fold={}, round={}, sort={}, n_select={}, turbo={}, verbose={})""".\
format(str(exclude), str(include), str(fold), str(round), str(sort), str(n_select), str(turbo), str(verbose)))
logger.info("Checking exceptions")
#exception checking
import sys
#checking error for exclude (string)
available_estimators = ['lr', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard', 'par',
'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf', 'et', 'ada', 'gbr',
'mlp', 'xgboost', 'lightgbm', 'catboost']
if exclude != None:
for i in exclude:
if i not in available_estimators:
sys.exit('(Value Error): Estimator Not Available. Please see docstring for list of available estimators.')
if include != None:
for i in include:
if i not in available_estimators:
sys.exit('(Value Error): Estimator Not Available. Please see docstring for list of available estimators.')
#include and exclude together check
if include is not None:
if exclude is not None:
sys.exit('(Type Error): Cannot use exclude parameter when include is used to compare models.')
#checking fold parameter
if type(fold) is not int:
sys.exit('(Type Error): Fold parameter only accepts integer value.')
#checking round parameter
if type(round) is not int:
sys.exit('(Type Error): Round parameter only accepts integer value.')
#checking n_select parameter
if type(n_select) is not int:
sys.exit('(Type Error): n_select parameter only accepts integer value.')
#checking budget_time parameter
if type(budget_time) is not int and type(budget_time) is not float:
sys.exit('(Type Error): budget_time parameter only accepts integer or float values.')
#checking sort parameter
allowed_sort = ['MAE', 'MSE', 'RMSE', 'R2', 'RMSLE', 'MAPE']
if sort not in allowed_sort:
sys.exit('(Value Error): Sort method not supported. See docstring for list of available parameters.')
'''
ERROR HANDLING ENDS HERE
'''
logger.info("Preloading libraries")
#pre-load libraries
import pandas as pd
import time, datetime
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
pd.set_option('display.max_columns', 500)
logger.info("Preparing display monitor")
#progress bar
if exclude is None:
len_of_exclude = 0
else:
len_of_exclude = len(exclude)
if turbo:
len_mod = 22 - len_of_exclude
else:
len_mod = 25 - len_of_exclude
#n_select param
if type(n_select) is list:
n_select_num = len(n_select)
else:
n_select_num = abs(n_select)
if n_select_num > len_mod:
n_select_num = len_mod
if include is not None:
wl = len(include)
bl = len_of_exclude
len_mod = wl - bl
if include is not None:
opt = 10
else:
opt = 30
#display
progress = ipw.IntProgress(value=0, min=0, max=(fold*len_mod)+opt+n_select_num, step=1 , description='Processing: ')
master_display = pd.DataFrame(columns=['Model', 'MAE','MSE','RMSE', 'R2', 'RMSLE', 'MAPE', 'TT (Sec)'])
#display monitor only when html_param is set to True
if verbose:
if html_param:
display(progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Loading Dependencies' ],
['Estimator' , '. . . . . . . . . . . . . . . . . .' , 'Compiling Library' ],
['ETC' , '. . . . . . . . . . . . . . . . . .', 'Calculating ETC'] ],
columns=['', ' ', ' ']).set_index('')
#display only when html_param is set to True
if verbose:
if html_param:
display(monitor, display_id = 'monitor')
display_ = display(master_display, display_id=True)
display_id = display_.display_id
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
#general dependencies
import numpy as np
import random
from sklearn import metrics
from sklearn.model_selection import KFold
import pandas.io.formats.style
logger.info("Copying training dataset")
#Storing X_train and y_train in data_X and data_y parameter
data_X = X_train.copy()
data_y = y_train.copy()
#reset index
data_X.reset_index(drop=True, inplace=True)
data_y.reset_index(drop=True, inplace=True)
progress.value += 1
logger.info("Importing libraries")
#import sklearn dependencies
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lars
from sklearn.linear_model import LassoLars
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import BayesianRidge
from sklearn.linear_model import ARDRegression
from sklearn.linear_model import PassiveAggressiveRegressor
from sklearn.linear_model import RANSACRegressor
from sklearn.linear_model import TheilSenRegressor
from sklearn.linear_model import HuberRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from xgboost import XGBRegressor
from catboost import CatBoostRegressor
try:
import lightgbm as lgb
except:
pass
logger.info("LightGBM import failed")
progress.value += 1
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[1,1:] = 'Loading Estimator'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
logger.info("Importing untrained models")
#creating model object
lr = LinearRegression(n_jobs=n_jobs_param)
lasso = Lasso(random_state=seed)
ridge = Ridge(random_state=seed)
en = ElasticNet(random_state=seed)
lar = Lars()
llar = LassoLars()
omp = OrthogonalMatchingPursuit()
br = BayesianRidge()
ard = ARDRegression()
par = PassiveAggressiveRegressor(random_state=seed)
ransac = RANSACRegressor(min_samples=0.5, random_state=seed)
tr = TheilSenRegressor(random_state=seed, n_jobs=n_jobs_param)
huber = HuberRegressor()
kr = KernelRidge()
svm = SVR()
knn = KNeighborsRegressor(n_jobs=n_jobs_param)
dt = DecisionTreeRegressor(random_state=seed)
rf = RandomForestRegressor(random_state=seed, n_jobs=n_jobs_param)
et = ExtraTreesRegressor(random_state=seed, n_jobs=n_jobs_param)
ada = AdaBoostRegressor(random_state=seed)
gbr = GradientBoostingRegressor(random_state=seed)
mlp = MLPRegressor(random_state=seed)
xgboost = XGBRegressor(random_state=seed, n_jobs=n_jobs_param, verbosity=0)
lightgbm = lgb.LGBMRegressor(random_state=seed, n_jobs=n_jobs_param)
catboost = CatBoostRegressor(random_state=seed, silent = True, thread_count=n_jobs_param)
logger.info("Import successful")
progress.value += 1
model_dict = {'Linear Regression' : 'lr',
'Lasso Regression' : 'lasso',
'Ridge Regression' : 'ridge',
'Elastic Net' : 'en',
'Least Angle Regression' : 'lar',
'Lasso Least Angle Regression' : 'llar',
'Orthogonal Matching Pursuit' : 'omp',
'Bayesian Ridge' : 'br',
'Automatic Relevance Determination' : 'ard',
'Passive Aggressive Regressor' : 'par',
'Random Sample Consensus' : 'ransac',
'TheilSen Regressor' : 'tr',
'Huber Regressor' : 'huber',
'Kernel Ridge' : 'kr',
'Support Vector Machine' : 'svm',
'K Neighbors Regressor' : 'knn',
'Decision Tree' : 'dt',
'Random Forest' : 'rf',
'Extra Trees Regressor' : 'et',
'AdaBoost Regressor' : 'ada',
'Gradient Boosting Regressor' : 'gbr',
'Multi Level Perceptron' : 'mlp',
'Extreme Gradient Boosting' : 'xgboost',
'Light Gradient Boosting Machine' : 'lightgbm',
'CatBoost Regressor' : 'catboost'}
model_library = [lr, lasso, ridge, en, lar, llar, omp, br, ard, par, ransac, tr, huber, kr,
svm, knn, dt, rf, et, ada, gbr, mlp, xgboost, lightgbm, catboost]
model_names = ['Linear Regression',
'Lasso Regression',
'Ridge Regression',
'Elastic Net',
'Least Angle Regression',
'Lasso Least Angle Regression',
'Orthogonal Matching Pursuit',
'Bayesian Ridge',
'Automatic Relevance Determination',
'Passive Aggressive Regressor',
'Random Sample Consensus',
'TheilSen Regressor',
'Huber Regressor',
'Kernel Ridge',
'Support Vector Machine',
'K Neighbors Regressor',
'Decision Tree',
'Random Forest',
'Extra Trees Regressor',
'AdaBoost Regressor',
'Gradient Boosting Regressor',
'Multi Level Perceptron',
'Extreme Gradient Boosting',
'Light Gradient Boosting Machine',
'CatBoost Regressor']
#checking for exclude models
model_library_str = ['lr', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard',
'par', 'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf',
'et', 'ada', 'gbr', 'mlp', 'xgboost', 'lightgbm', 'catboost']
model_library_str_ = ['lr', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard',
'par', 'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf',
'et', 'ada', 'gbr', 'mlp', 'xgboost', 'lightgbm', 'catboost']
if exclude is not None:
if turbo:
internal_exclude = ['kr', 'ard', 'mlp']
compiled_exclude = exclude + internal_exclude
exclude = list(set(compiled_exclude))
else:
exclude = exclude
for i in exclude:
model_library_str_.remove(i)
si = []
for i in model_library_str_:
s = model_library_str.index(i)
si.append(s)
model_library_ = []
model_names_= []
for i in si:
model_library_.append(model_library[i])
model_names_.append(model_names[i])
model_library = model_library_
model_names = model_names_
if exclude is None and turbo is True:
model_library = [lr, lasso, ridge, en, lar, llar, omp, br, par, ransac, tr, huber,
svm, knn, dt, rf, et, ada, gbr, xgboost, lightgbm, catboost]
model_names = ['Linear Regression',
'Lasso Regression',
'Ridge Regression',
'Elastic Net',
'Least Angle Regression',
'Lasso Least Angle Regression',
'Orthogonal Matching Pursuit',
'Bayesian Ridge',
'Passive Aggressive Regressor',
'Random Sample Consensus',
'TheilSen Regressor',
'Huber Regressor',
'Support Vector Machine',
'K Neighbors Regressor',
'Decision Tree',
'Random Forest',
'Extra Trees Regressor',
'AdaBoost Regressor',
'Gradient Boosting Regressor',
'Extreme Gradient Boosting',
'Light Gradient Boosting Machine',
'CatBoost Regressor']
#checking for include models
if include is not None:
model_library = []
model_names = []
for i in include:
if i == 'lr':
model_library.append(lr)
model_names.append('Linear Regression')
elif i == 'lasso':
model_library.append(lasso)
model_names.append('Lasso Regression')
elif i == 'ridge':
model_library.append(ridge)
model_names.append('Ridge Regression')
elif i == 'en':
model_library.append(en)
model_names.append('Elastic Net')
elif i == 'lar':
model_library.append(lar)
model_names.append('Least Angle Regression')
elif i == 'llar':
model_library.append(llar)
model_names.append('Lasso Least Angle Regression')
elif i == 'omp':
model_library.append(omp)
model_names.append('Orthogonal Matching Pursuit')
elif i == 'br':
model_library.append(br)
model_names.append('Bayesian Ridge')
elif i == 'ard':
model_library.append(ard)
model_names.append('Automatic Relevance Determination')
elif i == 'par':
model_library.append(par)
model_names.append('Passive Aggressive Regressor')
elif i == 'ransac':
model_library.append(ransac)
model_names.append('Random Sample Consensus')
elif i == 'tr':
model_library.append(tr)
model_names.append('TheilSen Regressor')
elif i == 'huber':
model_library.append(huber)
model_names.append('Huber Regressor')
elif i == 'kr':
model_library.append(kr)
model_names.append('Kernel Ridge')
elif i == 'svm':
model_library.append(svm)
model_names.append('Support Vector Machine')
elif i == 'knn':
model_library.append(knn)
model_names.append('K Neighbors Regressor')
elif i == 'dt':
model_library.append(dt)
model_names.append('Decision Tree')
elif i == 'rf':
model_library.append(rf)
model_names.append('Random Forest')
elif i == 'et':
model_library.append(et)
model_names.append('Extra Trees Regressor')
elif i == 'ada':
model_library.append(ada)
model_names.append('AdaBoost Regressor')
elif i == 'gbr':
model_library.append(gbr)
model_names.append('Gradient Boosting Regressor')
elif i == 'mlp':
model_library.append(mlp)
model_names.append('Multi Level Perceptron')
elif i == 'xgboost':
model_library.append(xgboost)
model_names.append('Extreme Gradient Boosting')
elif i == 'lightgbm':
model_library.append(lightgbm)
model_names.append('Light Gradient Boosting Machine')
elif i == 'catboost':
model_library.append(catboost)
model_names.append('CatBoost Regressor')
progress.value += 1
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[1,1:] = 'Initializing CV'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
#cross validation setup starts here
logger.info("Defining folds")
kf = KFold(fold, random_state=seed, shuffle=folds_shuffle_param)
logger.info("Declaring metric variables")
score_mae =np.empty((0,0))
score_mse =np.empty((0,0))
score_rmse =np.empty((0,0))
score_rmsle =np.empty((0,0))
score_r2 =np.empty((0,0))
score_mape =np.empty((0,0))
score_training_time=np.empty((0,0))
avgs_mae =np.empty((0,0))
avgs_mse =np.empty((0,0))
avgs_rmse =np.empty((0,0))
avgs_rmsle =np.empty((0,0))
avgs_r2 =np.empty((0,0))
avgs_mape =np.empty((0,0))
avgs_training_time=np.empty((0,0))
def calculate_mape(actual, prediction):
mask = actual != 0
return (np.fabs(actual - prediction)/actual)[mask].mean()
#create URI (before loop)
import secrets
URI = secrets.token_hex(nbytes=4)
name_counter = 0
model_store = []
total_runtime_start = time.time()
total_runtime = 0
over_time_budget = False
if budget_time and budget_time > 0:
logger.info(f"Time budget is {budget_time} minutes")
for model in model_library:
logger.info("Initializing " + str(model_names[name_counter]))
#run_time
runtime_start = time.time()
progress.value += 1
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[2,1:] = model_names[name_counter]
monitor.iloc[3,1:] = 'Calculating ETC'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
fold_num = 1
model_store_by_fold = []
for train_i , test_i in kf.split(data_X,data_y):
logger.info("Initializing Fold " + str(fold_num))
progress.value += 1
t0 = time.time()
total_runtime += (t0 - total_runtime_start)/60
logger.info(f"Total runtime is {total_runtime} minutes")
over_time_budget = budget_time and budget_time > 0 and total_runtime > budget_time
if over_time_budget:
logger.info(f"Total runtime {total_runtime} is over time budget by {total_runtime - budget_time}, breaking loop")
break
total_runtime_start = t0
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[1,1:] = 'Fitting Fold ' + str(fold_num) + ' of ' + str(fold)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
Xtrain,Xtest = data_X.iloc[train_i], data_X.iloc[test_i]
ytrain,ytest = data_y.iloc[train_i], data_y.iloc[test_i]
time_start=time.time()
logger.info("Fitting Model")
model_store_by_fold.append(model.fit(Xtrain,ytrain))
logger.info("Evaluating Metrics")
time_end=time.time()
pred_ = model.predict(Xtest)
try:
pred_ = target_inverse_transformer.inverse_transform(np.array(pred_).reshape(-1,1))
ytest = target_inverse_transformer.inverse_transform(np.array(ytest).reshape(-1,1))
pred_ = np.nan_to_num(pred_)
ytest = np.nan_to_num(ytest)
except:
pass
logger.info("No inverse transformer found")
logger.info("Compiling Metrics")
mae = metrics.mean_absolute_error(ytest,pred_)
mse = metrics.mean_squared_error(ytest,pred_)
rmse = np.sqrt(mse)
r2 = metrics.r2_score(ytest,pred_)
rmsle = np.sqrt(np.mean(np.power(np.log(np.array(abs(pred_))+1) - np.log(np.array(abs(ytest))+1), 2)))
mape = calculate_mape(ytest,pred_)
training_time=time_end-time_start
score_mae = np.append(score_mae,mae)
score_mse = np.append(score_mse,mse)
score_rmse = np.append(score_rmse,rmse)
score_rmsle = np.append(score_rmsle,rmsle)
score_r2 =np.append(score_r2,r2)
score_mape = np.append(score_mape,mape)
score_training_time=np.append(score_training_time,training_time)
'''
TIME CALCULATION SUB-SECTION STARTS HERE
'''
t1 = time.time()
tt = (t1 - t0) * (fold-fold_num) / 60
tt = np.around(tt, 2)
if tt < 1:
tt = str(np.around((tt * 60), 2))
ETC = tt + ' Seconds Remaining'
else:
tt = str (tt)
ETC = tt + ' Minutes Remaining'
fold_num += 1
'''
MONITOR UPDATE STARTS
'''
monitor.iloc[3,1:] = ETC
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
'''
MONITOR UPDATE ENDS
'''
if over_time_budget:
break
model_store.append(model_store_by_fold[0])
logger.info("Calculating mean and std")
avgs_mae = np.append(avgs_mae,np.mean(score_mae))
avgs_mse = np.append(avgs_mse,np.mean(score_mse))
avgs_rmse = np.append(avgs_rmse,np.mean(score_rmse))
avgs_rmsle = np.append(avgs_rmsle,np.mean(score_rmsle))
avgs_r2 = np.append(avgs_r2,np.mean(score_r2))
avgs_mape = np.append(avgs_mape,np.mean(score_mape))
avgs_training_time = np.append(avgs_training_time,np.mean(score_training_time))
logger.info("Creating metrics dataframe")
compare_models_ = pd.DataFrame({'Model':model_names[name_counter], 'MAE':avgs_mae, 'MSE':avgs_mse,
'RMSE':avgs_rmse, 'R2':avgs_r2, 'RMSLE':avgs_rmsle, 'MAPE':avgs_mape, 'TT (Sec)':avgs_training_time})
master_display = pd.concat([master_display, compare_models_],ignore_index=True)
master_display = master_display.round(round)
if sort == 'R2':
master_display = master_display.sort_values(by=sort,ascending=False)
else:
master_display = master_display.sort_values(by=sort,ascending=True)
master_display.reset_index(drop=True, inplace=True)
if verbose:
if html_param:
update_display(master_display, display_id = display_id)
#end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
"""
MLflow logging starts here
"""
if logging_param:
logger.info("Creating MLFlow logs")
import mlflow
from pathlib import Path
import os
run_name = model_names[name_counter]
with mlflow.start_run(run_name=run_name) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
params = model.get_params()
for i in list(params):
v = params.get(i)
if len(str(v)) > 250:
params.pop(i)
mlflow.log_params(params)
#set tag of compare_models
mlflow.set_tag("Source", "compare_models")
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
#Log top model metrics
mlflow.log_metric("MAE", avgs_mae[0])
mlflow.log_metric("MSE", avgs_mse[0])
mlflow.log_metric("RMSE", avgs_rmse[0])
mlflow.log_metric("R2", avgs_r2[0])
mlflow.log_metric("RMSLE", avgs_rmsle[0])
mlflow.log_metric("MAPE", avgs_mape[0])
mlflow.log_metric("TT", avgs_training_time[0])
# Log model and transformation pipeline
from copy import deepcopy
# get default conda env
from mlflow.sklearn import get_default_conda_env
default_conda_env = get_default_conda_env()
default_conda_env['name'] = str(exp_name_log) + '-env'
default_conda_env.get('dependencies').pop(-3)
dependencies = default_conda_env.get('dependencies')[-1]
from pycaret.utils import __version__
dep = 'pycaret==' + str(__version__())
dependencies['pip'] = [dep]
# define model signature
from mlflow.models.signature import infer_signature
signature = infer_signature(data_before_preprocess.drop([target_param], axis=1))
input_example = data_before_preprocess.drop([target_param], axis=1).iloc[0].to_dict()
# log model as sklearn flavor
prep_pipe_temp = deepcopy(prep_pipe)
prep_pipe_temp.steps.append(['trained model', model])
mlflow.sklearn.log_model(prep_pipe_temp, "model", conda_env = default_conda_env, signature = signature, input_example = input_example)
del(prep_pipe_temp)
score_mae =np.empty((0,0))
score_mse =np.empty((0,0))
score_rmse =np.empty((0,0))
score_rmsle =np.empty((0,0))
score_r2 =np.empty((0,0))
score_mape =np.empty((0,0))
score_training_time=np.empty((0,0))
avgs_mae = np.empty((0,0))
avgs_mse = np.empty((0,0))
avgs_rmse = np.empty((0,0))
avgs_rmsle = np.empty((0,0))
avgs_r2 = np.empty((0,0))
avgs_mape = np.empty((0,0))
avgs_training_time=np.empty((0,0))
name_counter += 1
progress.value += 1
def highlight_min(s):
if s.name=='R2':# min
to_highlight = s == s.max()
else:
to_highlight = s == s.min()
return ['background-color: yellow' if v else '' for v in to_highlight]
def highlight_cols(s):
color = 'lightgrey'
return 'background-color: %s' % color
compare_models_ = master_display.style.apply(highlight_min,subset=['MAE','MSE','RMSE','R2','RMSLE','MAPE'])\
.applymap(highlight_cols, subset = ['TT (Sec)'])
compare_models_ = compare_models_.set_precision(round)
compare_models_ = compare_models_.set_properties(**{'text-align': 'left'})
compare_models_ = compare_models_.set_table_styles([dict(selector='th', props=[('text-align', 'left')])])
progress.value += 1
monitor.iloc[1,1:] = 'Compiling Final Model'
monitor.iloc[3,1:] = 'Almost Finished'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
sorted_model_names = list(compare_models_.data['Model'])
n_select = n_select if n_select <= len(sorted_model_names) else len(sorted_model_names)
if n_select < 0:
sorted_model_names = sorted_model_names[n_select:]
else:
sorted_model_names = sorted_model_names[:n_select]
model_store_final = []
logger.info("Finalizing top_n models")
logger.info("SubProcess create_model() called ==================================")
for i in sorted_model_names:
monitor.iloc[2,1:] = i
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
progress.value += 1
k = model_dict.get(i)
m = create_model(estimator=k, verbose = False, system=False, cross_validation=True)
model_store_final.append(m)
logger.info("SubProcess create_model() end ==================================")
if len(model_store_final) == 1:
model_store_final = model_store_final[0]
clear_output()
if verbose:
if html_param:
display(compare_models_)
else:
print(compare_models_.data)
pd.reset_option("display.max_columns")
#store in display container
display_container.append(compare_models_.data)
logger.info("create_model_container: " + str(len(create_model_container)))
logger.info("master_model_container: " + str(len(master_model_container)))
logger.info("display_container: " + str(len(display_container)))
logger.info(str(model_store_final))
logger.info("compare_models() succesfully completed......................................")
return model_store_final
def create_model(estimator = None,
ensemble = False,
method = None,
fold = 10,
round = 4,
cross_validation = True, #added in pycaret==2.0.0
verbose = True,
system = True, #added in pycaret==2.0.0
**kwargs): #added in pycaret==2.0.0
"""
This function creates a model and scores it using Kfold Cross Validation.
The output prints a score grid that shows MAE, MSE, RMSE, RMSLE, R2 and
MAPE by fold (default = 10 Fold).
This function returns a trained model object.
setup() function must be called before using create_model()
Example
-------
>>> from pycaret.datasets import get_data
>>> boston = get_data('boston')
>>> experiment_name = setup(data = boston, target = 'medv')
>>> lr = create_model('lr')
This will create a trained Linear Regression model.
Parameters
----------
estimator : string / object, default = None
Enter ID of the estimators available in model library or pass an untrained model
object consistent with fit / predict API to train and evaluate model. All estimators
support binary or multiclass problem. List of estimators in model library (ID - Name):
* 'lr' - Linear Regression
* 'lasso' - Lasso Regression
* 'ridge' - Ridge Regression
* 'en' - Elastic Net
* 'lar' - Least Angle Regression
* 'llar' - Lasso Least Angle Regression
* 'omp' - Orthogonal Matching Pursuit
* 'br' - Bayesian Ridge
* 'ard' - Automatic Relevance Determination
* 'par' - Passive Aggressive Regressor
* 'ransac' - Random Sample Consensus
* 'tr' - TheilSen Regressor
* 'huber' - Huber Regressor
* 'kr' - Kernel Ridge
* 'svm' - Support Vector Machine
* 'knn' - K Neighbors Regressor
* 'dt' - Decision Tree
* 'rf' - Random Forest
* 'et' - Extra Trees Regressor
* 'ada' - AdaBoost Regressor
* 'gbr' - Gradient Boosting Regressor
* 'mlp' - Multi Level Perceptron
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Regressor
ensemble: Boolean, default = False
True would result in an ensemble of estimator using the method parameter defined.
method: String, 'Bagging' or 'Boosting', default = None.
method must be defined when ensemble is set to True. Default method is set to None.
fold: integer, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
round: integer, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
cross_validation: bool, default = True
When cross_validation set to False fold parameter is ignored and model is trained
on entire training dataset. No metric evaluation is returned.
verbose: Boolean, default = True
Score grid is not printed when verbose is set to False.
system: Boolean, default = True
Must remain True all times. Only to be changed by internal functions.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns
-------
score_grid
A table containing the scores of the model across the kfolds.
Scoring metrics used are MAE, MSE, RMSE, RMSLE, R2 and MAPE.
Mean and standard deviation of the scores across the folds are
also returned.
model
Trained model object.
"""
'''
ERROR HANDLING STARTS HERE
'''
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing create_model()")
logger.info("""create_model(estimator={}, ensemble={}, method={}, fold={}, round={}, cross_validation={}, verbose={}, system={})""".\
format(str(estimator), str(ensemble), str(method), str(fold), str(round), str(cross_validation), str(verbose), str(system)))
logger.info("Checking exceptions")
#exception checking
import sys
#run_time
import datetime, time
runtime_start = time.time()
#checking error for estimator (string)
available_estimators = ['lr', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard', 'par',
'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf', 'et', 'ada', 'gbr',
'mlp', 'xgboost', 'lightgbm', 'catboost']
#only raise exception of estimator is of type string.
if type(estimator) is str:
if estimator not in available_estimators:
sys.exit('(Value Error): Estimator Not Available. Please see docstring for list of available estimators.')
#checking error for ensemble:
if type(ensemble) is not bool:
sys.exit('(Type Error): Ensemble parameter can only take argument as True or False.')
#checking error for method:
#1 Check When method given and ensemble is not set to True.
if ensemble is False and method is not None:
sys.exit('(Type Error): Method parameter only accepts value when ensemble is set to True.')
#2 Check when ensemble is set to True and method is not passed.
if ensemble is True and method is None:
sys.exit("(Type Error): Method parameter missing. Pass method = 'Bagging' or 'Boosting'.")
#3 Check when ensemble is set to True and method is passed but not allowed.
available_method = ['Bagging', 'Boosting']
if ensemble is True and method not in available_method:
sys.exit("(Value Error): Method parameter only accepts two values 'Bagging' or 'Boosting'.")
#checking fold parameter
if type(fold) is not int:
sys.exit('(Type Error): Fold parameter only accepts integer value.')
#checking round parameter
if type(round) is not int:
sys.exit('(Type Error): Round parameter only accepts integer value.')
#checking verbose parameter
if type(verbose) is not bool:
sys.exit('(Type Error): Verbose parameter can only take argument as True or False.')
#checking system parameter
if type(system) is not bool:
sys.exit('(Type Error): System parameter can only take argument as True or False.')
#checking cross_validation parameter
if type(cross_validation) is not bool:
sys.exit('(Type Error): cross_validation parameter can only take argument as True or False.')
'''
ERROR HANDLING ENDS HERE
'''
logger.info("Preloading libraries")
#pre-load libraries
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
logger.info("Preparing display monitor")
#progress bar
progress = ipw.IntProgress(value=0, min=0, max=fold+4, step=1 , description='Processing: ')
master_display = | pd.DataFrame(columns=['MAE','MSE','RMSE', 'R2', 'RMSLE', 'MAPE']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import datetime
import pandas as pd
import numpy as np
from findy.database.schema.fundamental.finance import BalanceSheet
from findy.database.plugins.eastmoney.common import to_report_period_type
from findy.database.plugins.eastmoney.finance.base_china_stock_finance_recorder import BaseChinaStockFinanceRecorder
from findy.utils.convert import to_float
balance_sheet_map = {
# 流动资产
#
# 货币资金
"Monetaryfund": "cash_and_cash_equivalents",
# 应收票据
"Billrec": "note_receivable",
# 应收账款
"Accountrec": "accounts_receivable",
# 预付款项
"Advancepay": "advances_to_suppliers",
# 其他应收款
"Otherrec": "other_receivables",
# 存货
"Inventory": "inventories",
# 一年内到期的非流动资产
"Nonlassetoneyear": "current_portion_of_non_current_assets",
# 其他流动资产
"Otherlasset": "other_current_assets",
# 流动资产合计
"Sumlasset": "total_current_assets",
# 非流动资产
#
# 可供出售金融资产
"Saleablefasset": "fi_assets_saleable",
# 长期应收款
"Ltrec": "long_term_receivables",
# 长期股权投资
"Ltequityinv": "long_term_equity_investment",
# 投资性房地产
"Estateinvest": "real_estate_investment",
# 固定资产
"Fixedasset": "fixed_assets",
# 在建工程
"Constructionprogress": "construction_in_process",
# 无形资产
"Intangibleasset": "intangible_assets",
# 商誉
"Goodwill": "goodwill",
# 长期待摊费用
"Ltdeferasset": "long_term_prepaid_expenses",
# 递延所得税资产
"Deferincometaxasset": "deferred_tax_assets",
# 其他非流动资产
"Othernonlasset": "other_non_current_assets",
# 非流动资产合计
"Sumnonlasset": "total_non_current_assets",
# 资产总计
"Sumasset": "total_assets",
# 流动负债
#
# 短期借款
"Stborrow": "short_term_borrowing",
# 吸收存款及同业存放
"Deposit": "accept_money_deposits",
# 应付账款
"Accountpay": "accounts_payable",
# 预收款项
"Advancereceive": "advances_from_customers",
# 应付职工薪酬
"Salarypay": "employee_benefits_payable",
# 应交税费
"Taxpay": "taxes_payable",
# 应付利息
"Interestpay": "interest_payable",
# 其他应付款
"Otherpay": "other_payable",
# 一年内到期的非流动负债
"Nonlliaboneyear": "current_portion_of_non_current_liabilities",
# 其他流动负债
"Otherlliab": "other_current_liabilities",
# 流动负债合计
"Sumlliab": "total_current_liabilities",
# 非流动负债
#
# 长期借款
"Ltborrow": "long_term_borrowing",
# 长期应付款
"Ltaccountpay": "long_term_payable",
# 递延收益
"Deferincome": "deferred_revenue",
# 递延所得税负债
"Deferincometaxliab": "deferred_tax_liabilities",
# 其他非流动负债
"Othernonlliab": "other_non_current_liabilities",
# 非流动负债合计
"Sumnonlliab": "total_non_current_liabilities",
# 负债合计
"Sumliab": "total_liabilities",
# 所有者权益(或股东权益)
#
# 实收资本(或股本)
"Sharecapital": "capital",
# 资本公积
"Capitalreserve": "capital_reserve",
# 专项储备
"Specialreserve": "special_reserve",
# 盈余公积
"Surplusreserve": "surplus_reserve",
# 未分配利润
"Retainedearning": "undistributed_profits",
# 归属于母公司股东权益合计
"Sumparentequity": "equity",
# 少数股东权益
"Minorityequity": "equity_as_minority_interest",
# 股东权益合计
"Sumshequity": "total_equity",
# 负债和股东权益合计
"Sumliabshequity": "total_liabilities_and_equity",
# 银行相关
# 资产
# 现金及存放中央银行款项
"Cashanddepositcbank": "fi_cash_and_deposit_in_central_bank",
# 存放同业款项
"Depositinfi": "fi_deposit_in_other_fi",
# 贵金属
"Preciousmetal": "fi_expensive_metals",
# 拆出资金
"Lendfund": "fi_lending_to_other_fi",
# 以公允价值计量且其变动计入当期损益的金融资产
"Fvaluefasset": "fi_financial_assets_effect_current_income",
# 衍生金融资产
"Derivefasset": "fi_financial_derivative_asset",
# 买入返售金融资产
"Buysellbackfasset": "fi_buying_sell_back_fi__asset",
# 应收账款
#
# 应收利息
"Interestrec": "fi_interest_receivable",
# 发放贷款及垫款
"Loanadvances": "fi_disbursing_loans_and_advances",
# 可供出售金融资产
#
# 持有至到期投资
"Heldmaturityinv": "fi_held_to_maturity_investment",
# 应收款项类投资
"Investrec": "fi_account_receivable_investment",
# 投资性房地产
#
# 固定资产
#
# 无形资产
#
# 商誉
#
# 递延所得税资产
#
# 其他资产
"Otherasset": "fi_other_asset",
# 资产总计
#
# 负债
#
# 向中央银行借款
"Borrowfromcbank": "fi_borrowings_from_central_bank",
# 同业和其他金融机构存放款项
"Fideposit": "fi_deposit_from_other_fi",
# 拆入资金
"Borrowfund": "fi_borrowings_from_fi",
# 以公允价值计量且其变动计入当期损益的金融负债
"Fvaluefliab": "fi_financial_liability_effect_current_income",
# 衍生金融负债
"Derivefliab": "fi_financial_derivative_liability",
# 卖出回购金融资产款
"Sellbuybackfasset": "fi_sell_buy_back_fi_asset",
# 吸收存款
"Acceptdeposit": "fi_savings_absorption",
# 存款证及应付票据
"Cdandbillrec": "fi_notes_payable",
# 应付职工薪酬
#
# 应交税费
#
# 应付利息
#
# 预计负债
"Anticipateliab": "fi_estimated_liabilities",
# 应付债券
"Bondpay": "fi_bond_payable",
# 其他负债
"Otherliab": "fi_other_liability",
# 负债合计
#
# 所有者权益(或股东权益)
# 股本
"Shequity": "fi_capital",
# 其他权益工具
"Otherequity": "fi_other_equity_instruments",
# 其中:优先股
"Preferredstock": "fi_preferred_stock",
# 资本公积
#
# 盈余公积
#
# 一般风险准备
"Generalriskprepare": "fi_generic_risk_reserve",
# 未分配利润
#
# 归属于母公司股东权益合计
#
# 股东权益合计
#
# 负债及股东权益总计
# 券商相关
# 资产
#
# 货币资金
#
# 其中: 客户资金存款
"Clientfund": "fi_client_fund",
# 结算备付金
"Settlementprovision": "fi_deposit_reservation_for_balance",
# 其中: 客户备付金
"Clientprovision": "fi_client_deposit_reservation_for_balance",
# 融出资金
"Marginoutfund": "fi_margin_out_fund",
# 以公允价值计量且其变动计入当期损益的金融资产
#
# 衍生金融资产
#
# 买入返售金融资产
#
# 应收利息
#
# 应收款项
"Receivables": "fi_receivables",
# 存出保证金
"Gdepositpay": "fi_deposit_for_recognizance",
# 可供出售金融资产
#
# 持有至到期投资
#
# 长期股权投资
#
# 固定资产
#
# 在建工程
#
# 无形资产
#
# 商誉
#
# 递延所得税资产
#
# 其他资产
#
# 资产总计
#
# 负债
#
# 短期借款
#
# 拆入资金
#
# 以公允价值计量且其变动计入当期损益的金融负债
#
# 衍生金融负债
#
# 卖出回购金融资产款
#
# 代理买卖证券款
"Agenttradesecurity": "fi_receiving_as_agent",
# 应付账款
#
# 应付职工薪酬
#
# 应交税费
#
# 应付利息
#
# 应付短期融资款
"Shortfinancing": "fi_short_financing_payable",
# 预计负债
#
# 应付债券
#
# 递延所得税负债
#
# 其他负债
#
# 负债合计
#
# 所有者权益(或股东权益)
#
# 股本
#
# 资本公积
#
# 其他权益工具
#
# 盈余公积
#
# 一般风险准备
#
# 交易风险准备
"Traderiskprepare": "fi_trade_risk_reserve",
# 未分配利润
#
# 归属于母公司股东权益合计
#
# 少数股东权益
#
# 股东权益合计
#
# 负债和股东权益总计
# 保险相关
# 应收保费
"Premiumrec": "fi_premiums_receivable",
"Rirec": "fi_reinsurance_premium_receivable",
# 应收分保合同准备金
"Ricontactreserverec": "fi_reinsurance_contract_reserve",
# 保户质押贷款
"Insuredpledgeloan": "fi_policy_pledge_loans",
# 定期存款
"Tdeposit": "fi_time_deposit",
# 可供出售金融资产
#
# 持有至到期投资
#
# 应收款项类投资
#
# 应收账款
#
# 长期股权投资
#
# 存出资本保证金
"Capitalgdepositpay": "fi_deposit_for_capital_recognizance",
# 投资性房地产
#
# 固定资产
#
# 无形资产
#
# 商誉
#
# 递延所得税资产
#
# 其他资产
#
# 独立账户资产
"Independentasset": "fi_capital_in_independent_accounts",
# 资产总计
#
# 负债
#
# 短期借款
#
# 同业及其他金融机构存放款项
#
# 拆入资金
#
# 以公允价值计量且其变动计入当期损益的金融负债
#
# 衍生金融负债
#
# 卖出回购金融资产款
#
# 吸收存款
#
# 代理买卖证券款
#
# 应付账款
#
# 预收账款
"Advancerec": "fi_advance_from_customers",
# 预收保费
"Premiumadvance": "fi_advance_premium",
# 应付手续费及佣金
"Commpay": "fi_fees_and_commissions_payable",
# 应付分保账款
"Ripay": "fi_dividend_payable_for_reinsurance",
# 应付职工薪酬
#
# 应交税费
#
# 应付利息
#
# 预计负债
#
# 应付赔付款
"Claimpay": "fi_claims_payable",
# 应付保单红利
"Policydivipay": "fi_policy_holder_dividend_payable",
# 保户储金及投资款
"Insureddepositinv": "fi_policy_holder_deposits_and_investment_funds",
# 保险合同准备金
"Contactreserve": "fi_contract_reserve",
# 长期借款
#
# 应付债券
#
# 递延所得税负债
#
# 其他负债
#
# 独立账户负债
"Independentliab": "fi_independent_liability",
# 负债合计
#
# 所有者权益(或股东权益)
#
# 股本
#
# 资本公积
#
# 盈余公积
#
# 一般风险准备
#
# 未分配利润
#
# 归属于母公司股东权益总计
#
# 少数股东权益
#
# 股东权益合计
#
# 负债和股东权益总计
}
class ChinaStockBalanceSheetRecorder(BaseChinaStockFinanceRecorder):
data_schema = BalanceSheet
url = 'https://emh5.eastmoney.com/api/CaiWuFenXi/GetZiChanFuZhaiBiaoList'
finance_report_type = 'ZiChanFuZhaiBiaoList'
data_type = 3
def format(self, entity, df):
cols = list(df.columns)
str_cols = ['Title']
date_cols = [self.get_original_time_field()]
float_cols = list(set(cols) - set(str_cols) - set(date_cols))
for column in float_cols:
df[column] = df[column].apply(lambda x: to_float(x[0]))
df.rename(columns=balance_sheet_map, inplace=True)
df.update(df.select_dtypes(include=[np.number]).fillna(0))
if 'timestamp' not in df.columns:
df['timestamp'] = pd.to_datetime(df[self.get_original_time_field()])
elif not isinstance(df['timestamp'].dtypes, datetime):
df['timestamp'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
import numpy as np
from IPython import embed
import os
from collections import OrderedDict
import pandas as pd
from warnings import warn
def sigm_tf(x):
return 1./(1 + np.exp(-1 * x))
#def sigm(x):
# return 2./(1 + np.exp(-2 * x)) - 1
def flatten(l):
return [item for sublist in l for item in sublist]
class QuaLiKizMultiNN():
def __init__(self, nns):
self._nns = nns
feature_names = nns[0]
for nn in self._nns:
if len(nn._target_names) == 1:
name = nn._target_names[0]
else:
NotImplementedError('Multitarget not implemented yet')
if np.all(nn._feature_names.ne(feature_names)):
Exception('Supplied NNs have different feature names')
if np.any(self._feature_min > self._feature_max):
raise Exception('Feature min > feature max')
self._target_min = pd.concat(
[nn._target_min for nn in self._nns])
self._target_max = pd.concat(
[nn._target_max for nn in self._nns])
@property
def _target_names(self):
targets = []
for nn in self._nns:
targets.extend(list(nn._target_names))
return targets
def get_output(self, input, output_pandas=True, clip_low=True, clip_high=True, low_bound=None, high_bound=None, **kwargs):
results = pd.DataFrame()
feature_max = -np.inf
feature_min = np.inf
out_tot = np.empty((input.shape[0], len(self._nns)))
out_name = []
nn_input, kwargs['safe'], clip_low, clip_high, low_bound, high_bound = \
determine_settings(self, input, kwargs['safe'], clip_low, clip_high, low_bound, high_bound)
for ii, nn in enumerate(self._nns):
if len(nn._target_names) == 1:
out = nn.get_output(input, clip_low=False, clip_high=False, **kwargs)
out_tot[:, ii] = np.squeeze(out)
if output_pandas:
out_name.extend(out.columns.values)
elif target in nn.target_names.values:
NotImplementedError('Multitarget not implemented yet')
out_tot = clip_to_bounds(out_tot, clip_low=clip_low, clip_high=clip_high, low_bound=low_bound, high_bound=high_bound)
if output_pandas == True:
results = pd.DataFrame(out_tot, columns=out_name)
else:
results = out_tot
return results
@property
def _target_names(self):
return flatten([list(nn._target_names) for nn in self._nns])
@property
def _feature_names(self):
return self._nns[0]._feature_names
@property
def _feature_max(self):
feature_max = pd.Series(np.full_like(self._nns[0]._feature_max, np.inf),
index=self._nns[0]._feature_max.index)
for nn in self._nns:
feature_max = nn._feature_max.combine(feature_max, min)
return feature_max
@property
def _feature_min(self):
feature_min = pd.Series(np.full_like(self._nns[0]._feature_min, -np.inf),
index=self._nns[0]._feature_min.index)
for nn in self._nns:
feature_min = nn._feature_min.combine(feature_min, max)
return feature_min
class QuaLiKizComboNN():
def __init__(self, target_names, nns, combo_func):
self._nns = nns
feature_names = nns[0]
for nn in self._nns:
if np.all(nn._feature_names.ne(feature_names)):
Exception('Supplied NNs have different feature names')
if np.any(self._feature_min > self._feature_max):
raise Exception('Feature min > feature max')
self._combo_func = combo_func
self._target_names = target_names
self._target_min = pd.Series(
self._combo_func(*[nn._target_min.values for nn in nns]),
index=self._target_names)
self._target_max = pd.Series(
self._combo_func(*[nn._target_max.values for nn in nns]),
index=self._target_names)
def get_output(self, input, output_pandas=True, clip_low=True, clip_high=True, low_bound=None, high_bound=None, **kwargs):
nn_input, kwargs['safe'], clip_low, clip_high, low_bound, high_bound = \
determine_settings(self, input, kwargs['safe'], clip_low, clip_high, low_bound, high_bound)
output = self._combo_func(*[nn.get_output(input, output_pandas=False, clip_low=False, clip_high=False, **kwargs) for nn in self._nns])
output = clip_to_bounds(output, clip_low=clip_low, clip_high=clip_high, low_bound=low_bound, high_bound=high_bound)
if output_pandas is True:
output = pd.DataFrame(output, columns=self._target_names)
return output
@property
def _feature_names(self):
return self._nns[0]._feature_names
@property
def _feature_max(self):
feature_max = pd.Series(np.full_like(self._nns[0]._feature_max, np.inf),
index=self._nns[0]._feature_max.index)
for nn in self._nns:
feature_max = nn._feature_max.combine(feature_max, min)
return feature_max
@property
def _feature_min(self):
feature_min = pd.Series(np.full_like(self._nns[0]._feature_min, -np.inf),
index=self._nns[0]._feature_min.index)
for nn in self._nns:
feature_min = nn._feature_min.combine(feature_min, max)
return feature_min
class QuaLiKizDuoNN():
def __init__(self, target_names, nn1, nn2, combo_funcs):
self._nn1 = nn1
self._nn2 = nn2
if np.any(self._feature_min > self._feature_max):
raise Exception('Feature min > feature max')
if np.all(nn1._feature_names.ne(nn2._feature_names)):
raise Exception('Supplied NNs have different feature names')
if not len(target_names) == len(combo_funcs):
raise Exception('len(target_names) = {.f} and len(combo_func) = {.f}'
.format(len(target_names), len(combo_funcs)))
self._combo_funcs = combo_funcs
self._target_names = target_names
def get_output(self, input, **kwargs):
output = pd.DataFrame()
output1 = self._nn1.get_output(input, **kwargs)
output2 = self._nn2.get_output(input, **kwargs)
for target_name, combo_func in zip(self._target_names, self._combo_funcs):
output[target_name] = np.squeeze(combo_func(output1, output2))
return output
@property
def _feature_names(self):
return self._nn1._feature_names
@property
def _feature_max(self):
return self._nn1._feature_max.combine(self._nn2._feature_max, min)
@property
def _feature_min(self):
return self._nn1._feature_min.combine(self._nn2._feature_min, max)
class QuaLiKizNDNN():
def __init__(self, nn_dict, target_names_mask=None, layer_mode=None):
""" General ND fully-connected multilayer perceptron neural network
Initialize this class using a nn_dict. This dict is usually read
directly from JSON, and has a specific structure. Generate this JSON
file using the supplied function in QuaLiKiz-Tensorflow
"""
parsed = {}
if layer_mode is None:
try:
import qlknn
except:
layer_mode = 'classic'
else:
layer_mode = 'intel'
elif layer_mode == 'intel':
import qlknn
elif layer_mode == 'cython':
import cython_mkl_ndnn
# Read and parse the json. E.g. put arrays in arrays and the rest in a dict
for name, value in nn_dict.items():
if name == 'hidden_activation' or name == 'output_activation':
parsed[name] = value
elif value.__class__ == list:
parsed[name] = np.array(value)
else:
parsed[name] = dict(value)
# These variables do not depend on the amount of layers in the NN
for set in ['feature', 'target']:
setattr(self, '_' + set + '_names', pd.Series(parsed.pop(set + '_names')))
for set in ['feature', 'target']:
for subset in ['min', 'max']:
setattr(self, '_'.join(['', set, subset]), pd.Series(parsed.pop('_'.join([set, subset])))[getattr(self, '_' + set + '_names')])
for subset in ['bias', 'factor']:
setattr(self, '_'.join(['_feature_prescale', subset]), pd.Series(parsed['prescale_' + subset])[self._feature_names])
setattr(self, '_'.join(['_target_prescale', subset]), pd.Series(parsed.pop('prescale_' + subset))[self._target_names])
self.layers = []
# Now find out the amount of layers in our NN, and save the weigths and biases
activations = parsed['hidden_activation'] + [parsed['output_activation']]
for ii in range(1, len(activations) + 1):
try:
name = 'layer' + str(ii)
weight = parsed.pop(name + '/weights/Variable:0')
bias = parsed.pop(name + '/biases/Variable:0')
activation = activations.pop(0)
if layer_mode == 'classic':
if activation == 'tanh':
act = np.tanh
elif activation == 'relu':
act = _act_relu
elif activation == 'none':
act = _act_none
self.layers.append(QuaLiKizNDNN.NNLayer(weight, bias, act))
elif layer_mode == 'intel':
self.layers.append(qlknn.Layer(weight, bias, activation))
elif layer_mode == 'cython':
self.layers.append(cython_mkl_ndnn.Layer(weight, bias, activation))
except KeyError:
# This name does not exist in the JSON,
# so our previously read layer was the output layer
break
if len(activations) == 0:
del parsed['hidden_activation']
del parsed['output_activation']
try:
self._clip_bounds = parsed['_metadata']['clip_bounds']
except KeyError:
self._clip_bounds = False
self._target_names_mask = target_names_mask
# Ignore metadata
try:
self._metadata = parsed.pop('_metadata')
except KeyError:
pass
if any(parsed):
warn('nn_dict not fully parsed! ' + str(parsed))
def apply_layers(self, input, output=None):
""" Apply all NN layers to the given input
The given input has to be array-like, but can be of size 1
"""
input = np.ascontiguousarray(input)
# 3x30 network:
#14.1 µs ± 913 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
#20.9 µs ± 2.43 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
#19.1 µs ± 240 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
#2.67 µs ± 29.7 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
for layer in self.layers:
output = np.empty([input.shape[0], layer._weights.shape[1]])
output = layer.apply(input, output)
input = output
return input
class NNLayer():
""" A single (hidden) NN layer
A hidden NN layer is just does
output = activation(weight * input + bias)
Where weight is generally a matrix; output, input and bias a vector
and activation a (sigmoid) function.
"""
def __init__(self, weight, bias, activation):
self._weights = weight
self._biases = bias
self._activation = activation
#@jit(float64[:,:](float64[:,:]), nopython=True)
#def _apply_layer(input):
# preactivation = np.dot(input, weight) + bias
# result = activation(preactivation)
# return result
#self.apply = lambda input: activation(np.dot(input, weight) + bias)
#_create_apply(weight, bias, activation)
def apply(self, input, output=None):
preactivation = np.dot(input, self._weights) + self._biases
result = self._activation(preactivation)
return result
def shape(self):
return self.weight.shape
def __str__(self):
return ('NNLayer shape ' + str(self.shape()))
def get_output(self, input, clip_low=True, clip_high=True, low_bound=None, high_bound=None, safe=True, output_pandas=True):
""" Calculate the output given a specific input
This function accepts inputs in the form of a dict with
as keys the name of the specific input variable (usually
at least the feature_names) and as values 1xN same-length
arrays.
"""
#49.1 ns ± 1.53 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
nn_input, safe, clip_low, clip_high, low_bound, high_bound = \
determine_settings(self, input, safe, clip_low, clip_high, low_bound, high_bound)
#nn_input = self._feature_prescale_factors.values[np.newaxis, :] * nn_input + self._feature_prescale_biases.values
#14.3 µs ± 1.08 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)
nn_input = _prescale(nn_input,
self._feature_prescale_factor.values,
self._feature_prescale_bias.values)
# Apply all NN layers an re-scale the outputs
# 104 µs ± 19.7 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
# 70.9 µs ± 384 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) (only apply layers)
output = (self.apply_layers(nn_input) - np.atleast_2d(self._target_prescale_bias)) / np.atleast_2d(self._target_prescale_factor)
#for name in self._target_names:
# nn_output = (np.squeeze(self.apply_layers(nn_input)) - self._target_prescale_biases[name]) / self._target_prescale_factors[name]
# output[name] = nn_output
output = clip_to_bounds(output, clip_low=clip_low, clip_high=clip_high, low_bound=low_bound, high_bound=high_bound)
# 118 µs ± 3.83 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
if output_pandas:
output = pd.DataFrame(output, columns=self._target_names)
# 47.4 ns ± 1.79 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
if self._target_names_mask is not None:
output.columns = self._target_names_mask
return output
@classmethod
def from_json(cls, json_file, **kwargs):
with open(json_file) as file_:
dict_ = json.load(file_)
nn = QuaLiKizNDNN(dict_, **kwargs)
return nn
@property
def l2_norm(self):
l2_norm = 0
for layer in self.layers:
l2_norm += np.sum(np.square(layer.weight))
l2_norm /= 2
return l2_norm
@property
def l1_norm(self):
l1_norm = 0
for layer in self.layers:
l1_norm += np.sum(np.abs(layer.weight))
return l1_norm
def clip_to_bounds(output, clip_low, clip_high, low_bound, high_bound):
if clip_low:
for ii, bound in enumerate(low_bound):
output[:, ii][output[:, ii] < bound] = bound
if clip_high:
for ii, bound in enumerate(high_bound):
output[:, ii][output[:, ii] > bound] = bound
return output
def determine_settings(network, input, safe, clip_low, clip_high, low_bound, high_bound):
if safe:
if input.__class__ == pd.DataFrame:
nn_input = input[network._feature_names]
else:
raise Exception('Please pass a pandas.DataFrame for safe mode')
if low_bound is not None:
low_bound = low_bound[network._target_names].values
if high_bound is not None:
high_bound = high_bound[network._target_names].values
else:
if input.__class__ == pd.DataFrame:
nn_input = input.values
elif input.__class__ == np.ndarray:
nn_input = input
if clip_low is True and (low_bound is None):
low_bound = network._target_min.values
if clip_high is True and (high_bound is None):
high_bound = network._target_max.values
return nn_input, safe, clip_low, clip_high, low_bound, high_bound
#@jit(float64[:,:](float64[:,:], float64[:], float64[:]), nopython=True)
def _prescale(nn_input, factors, biases):
return np.atleast_2d(factors) * nn_input + biases
# #return factors[np.newaxis, :] * nn_input + biases
#
#@jit(float64[:,:](float64[:,:]), nopython=True)
def _act_none(x):
return x
#
#@jit(float64[:,:](float64[:,:]), nopython=True)
def _act_relu(x):
return x * (x > 0)
#
##@jit(float64[:,:](float64[:,:], float64[:,:,:]), nopython=True)
##def _apply_layers(self, input, layers):
## for layer in layers:
## input = layer.apply(input)
## return input
#
#def _create_apply(weight, bias, activation):
# #self.weight = weight
# #self.bias = bias
# #self.activation = activation
# #if activation is None:
# # @jit(float64[:,:](float64[:,:]), nopython=True)
# # def _apply_layer(input):
# # preactivation = np.dot(input, weight) + bias
# # result = preactivation
# # return result
# #else:
# @jit(float64[:,:](float64[:,:]), nopython=True)
# def _apply_layer(input):
# preactivation = np.dot(input, weight) + bias
# result = activation(preactivation)
# return result
#
# return _apply_layer
if __name__ == '__main__':
# Test the function
root = os.path.dirname(os.path.realpath(__file__))
#nn1 = QuaLiKizNDNN.from_json(os.path.join(root, 'nn_efe_GB.json'))
#nn2 = QuaLiKizNDNN.from_json(os.path.join(root, 'nn_efi_GB.json'))
#nn3 = QuaLiKizDuoNN('nn_eftot_GB', nn1, nn2, lambda x, y: x + y)
#nn = QuaLiKizMultiNN([nn1, nn2])
nn = QuaLiKizNDNN.from_json('nn.json', layer_mode='intel')
scann = 100
input = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
lower_thresh = 0.007
upper_thresh = 0.03
print('############################### Reading generated predictions #############################\n')
test_status = | pd.read_csv('data/test_status.csv', usecols=['ID', 'probability']) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pandas as pd
import os
DATASET_FOLDER = '../../datasets/'
def gen_worldbank_countries():
# Écrit un csv avec les pays et les codes associés des pays qui nous intéresses
df_des = dataframe_flood()
df = pd.DataFrame(df_des.groupby('Country')[['Country', 'ISO']].head(1))
df.rename(columns={'Country': 'name', 'ISO':'code'}, inplace=True)
df.to_csv(f'{DATASET_FOLDER}worldbank_countries.csv', index=False)
def dataframe_flood():
# Renvoit une dataframe avec les désastres de type flood
try:
df = pd.read_excel(f'{DATASET_FOLDER}emdat_public_2020_09_12_query_uid-tAnKEX.xlsx', index_col=0)
except FileNotFoundError:
print(f"Le fichier 'edmat_public_2020_09_12_query_uid-tAnKEX.xlsx'\
n'est pas dans le dossier {DATASET_FOLDER}")
# Mise en forme de la dataframe (les premières lignes ne nous intéressent pas)
df.columns = list(df.iloc[5])
df = df.iloc[6:]
# On ne prends que les désatres de type inondations
df = df[df['Disaster Type'] == 'Flood']
return(df)
def gen_dataset():
'''Créer les deux fichier : historique_precipitation_clean.csv et projection_precipitation_clean.csv
'''
def abreviation2nombre(abr):
lst_abr = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
return(lst_abr.index(abr)+1)
dir_precipitation = DATASET_FOLDER+'precipitation/'
df_hist = pd.DataFrame()
df_pred = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: | pd.Timestamp("2013-04-12 00:00:00") | pandas.Timestamp |
import pandas as pd
import numpy as np
import ray
from .utils import (
_build_row_lengths,
_build_col_widths,
_build_coord_df)
from pandas.core.indexing import convert_to_index_sliceable
class _IndexMetadata(object):
"""Wrapper for Pandas indexes in Ray DataFrames. Handles all of the
metadata specific to the axis of partition (setting indexes,
calculating the index within partition of a value, etc.). This
implementation assumes the underlying index lies across multiple
partitions.
IMPORTANT NOTE: Currently all operations, as implemented, are inplace.
WARNING: Currently, the `_lengths` item is the source of truth for an
_IndexMetadata object, since it is easy to manage, and that the coord_df
item may be deprecated in the future. As such, it is _very_ important that
any functions that mutate the coord_df splits in anyway first modify the
lengths. Otherwise bad things might happen!
"""
def __init__(self, dfs=None, index=None, axis=0, lengths_oid=None,
coord_df_oid=None):
"""Inits a IndexMetadata from Ray DataFrame partitions
Args:
dfs ([ObjectID]): ObjectIDs of dataframe partitions
index (pd.Index): Index of the Ray DataFrame.
axis: Axis of partition (0=row partitions, 1=column partitions)
Returns:
A IndexMetadata backed by the specified pd.Index, partitioned off
specified partitions
"""
assert (lengths_oid is None) == (coord_df_oid is None), \
"Must pass both or neither of lengths_oid and coord_df_oid"
if dfs is not None and lengths_oid is None:
if axis == 0:
lengths_oid = _build_row_lengths.remote(dfs)
else:
lengths_oid = _build_col_widths.remote(dfs)
coord_df_oid = _build_coord_df.remote(lengths_oid, index)
self._lengths = lengths_oid
self._coord_df = coord_df_oid
self._index_cache = index
self._cached_index = False
def _get__lengths(self):
if isinstance(self._lengths_cache, ray.ObjectID) or \
(isinstance(self._lengths_cache, list) and
isinstance(self._lengths_cache[0], ray.ObjectID)):
self._lengths_cache = ray.get(self._lengths_cache)
return self._lengths_cache
def _set__lengths(self, lengths):
self._lengths_cache = lengths
_lengths = property(_get__lengths, _set__lengths)
def _get__coord_df(self):
"""Get the coordinate dataframe wrapped by this _IndexMetadata.
Since we may have had an index set before our coord_df was
materialized, we'll have to apply it to the newly materialized df
"""
if isinstance(self._coord_df_cache, ray.ObjectID):
self._coord_df_cache = ray.get(self._coord_df_cache)
if self._cached_index:
self._coord_df_cache.index = self._index_cache
self._cached_index = False
return self._coord_df_cache
def _set__coord_df(self, coord_df):
"""Set the coordinate dataframe wrapped by this _IndexMetadata.
Sometimes we set the _IndexMetadata's coord_df outside of the
constructor, generally using fxns like drop(). This produces a modified
index, so we need to reflect the change on the index cache.
If the set _IndexMetadata is an OID instead (due to a copy or whatever
reason), we fall back relying on `_index_cache`.
"""
if not isinstance(coord_df, ray.ObjectID):
self._index_cache = coord_df.index
self._coord_df_cache = coord_df
_coord_df = property(_get__coord_df, _set__coord_df)
def _get_index(self):
"""Get the index wrapped by this _IndexMetadata.
The only time `self._index_cache` would be None is in a newly created
_IndexMetadata object without a specified `index` parameter (See the
_IndexMetadata constructor for more details)
"""
if isinstance(self._coord_df_cache, ray.ObjectID):
return self._index_cache
else:
return self._coord_df_cache.index
def _set_index(self, new_index):
"""Set the index wrapped by this _IndexMetadata.
It is important to always set `_index_cache` even if the coord_df is
materialized due to the possibility that it is set to an OID later on.
This design is more straightforward than caching indexes on setting the
coord_df to an OID due to the possibility of an OID-to-OID change.
"""
new_index = | pd.DataFrame(index=new_index) | pandas.DataFrame |
import pandas as pd
import numpy as np
from vol_processor import interpolate_return_volspread
import statsmodels.api as sm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from os import mkdir, getcwd
from os.path import exists, join
data_dir = join(getcwd(),'data/')
img_dir = join(getcwd(),'images/')
def fit_return_volspread(stock_index,ticker,date,plot_result,**kwargs):
"""
Fit returns in terms of the vol_spread signal
"""
df, mov_avg_window, exc_delay_t, tstamps = interpolate_return_volspread(stock_index,\
ticker,date)
regressors = [
'Const',
'Vol_spread',
'Vol_spread_t-tau',
'Dummy_bot'
]
X = pd.DataFrame(columns=regressors)
# Estimate decay time of initial return
tau_decay_ret0 = mov_avg_window/4
X['Vol_spread'] = df['Vol_spread']* (df['Time'] > tau_decay_ret0).astype(float)
#X['Vol_spread_t-tau0'] = X['Vol_spread'].shift(int(tau_decay_ret0)).fillna(0)
X['Vol_spread_t-tau'] = X['Vol_spread'].shift(mov_avg_window).fillna(0)
# Data belong to beginning of trading?
X['Dummy_bot'] = df['Return'].ix[0]* np.exp(-df['Time']/tau_decay_ret0)*\
(df['Time'] < tau_decay_ret0).astype(float)
X['Const'] = sm.add_constant(X)
# Dummies to distinguish vol_spread drastic jumps
abs_dvol_spread = abs(df['Vol_spread'].diff().fillna(0))
q = abs_dvol_spread.quantile(0.995)
id_jumps = np.array([0] + abs_dvol_spread[abs_dvol_spread > q].index.tolist())
# If there are consecutive indexes in id_jumps, keep only first and last
# This is convenient, e.g., to catch the end of trading effect
consc = np.split(id_jumps, np.where(np.diff(id_jumps) != 1)[0]+1)
new_ids = [x.tolist() if len(x) <= 2 else [x[0],x[-1]] for x in consc]
first_consc = [x[0] for x in new_ids if len(x) > 2]
# flatten new_ids
id_jumps = [0]+[item for sublist in new_ids for item in sublist]
for i in range(len(id_jumps)-1):
df_dummies = pd.DataFrame(df.shape[0]*[0])
df_dummies.loc[id_jumps[i]:id_jumps[i+1]] = 1
time_jump = df['Time'].loc[id_jumps[i+1]]
if time_jump in exc_delay_t:
# If the time of jump coincides with an excess request delay, identify
# this type of dummy
regressors += ['Dummy_t'+ str(int(time_jump))+'_req_delay']
elif id_jumps[i+1] in first_consc:
regressors += ['Dummy_t'+ str(int(time_jump))+'+2consc']
else:
regressors += ['Dummy_t'+ str(int(time_jump))]
X = pd.concat([X,df_dummies],axis=1)
X.columns = regressors
# Fit Return to vol_spread and dummies
ols = sm.OLS(df['Return'],X.astype(float)).fit()
if plot_result:
dir_save = img_dir +'ret_vs_vol/'+ date +'/'
if not exists(dir_save):
mkdir(dir_save)
if kwargs['tformat'] == 'tstamps':
df['Time'] = tstamps['Time']
ax = df.plot(x='Time',y='Return',style='k',\
title='Intraday Returns for '+ ticker,legend=False)
#df.plot(ax=ax,x='Time',y='Vol_spread',style='g',legend=False)
zero = pd.DataFrame({'Time':df['Time'].values,'Return 0':np.zeros(df.shape[0])})
zero.plot(ax=ax,x='Time',y='Return 0',style='--k',legend=False)
ax.plot(df['Time'],ols.fittedvalues,'c',label='OLS')
fig = ax.get_figure()
fig.set_size_inches(8, 6)
fig.savefig(dir_save+ticker+'.png',bbox_inches='tight',dpi=100)
plt.close(fig)
else:
return ols, X['Vol_spread'].mean(), X['Vol_spread'].std(),\
df['Return'].loc[int(tau_decay_ret0):].sum(),\
np.sum(ols.fittedvalues[int(tau_decay_ret0)])
def plot_return_vs_volspread(stock_index,date):
"""
Plot returns and volume spread together with the fitted values of
return explained by the volume spread
"""
tickers = pd.read_csv(data_dir+ stock_index+ '/'+ stock_index+'_atoms.csv',\
usecols=['Ticker'])
tickers = tickers['Ticker'].values.tolist()
for ticker in tickers:
try:
print('Ploting return vs volSpread for {}'.format(ticker))
fit_return_volspread(stock_index,ticker,date,True,tformat='tstamps')
except (KeyError,IOError):
continue
def return_volspread_stat(stock_index,date):
"""Calculate requested statistics on the stock index"""
cols = [
'Ticker',
'P_const > |t|',
'P_vol_spread > |t|',
'P_vol_spread_t-tau > |t|',
'P_bot > |t|',
'# extra params',
'Traded as | Vol_spread (avg,std)',
'Total avg ret (t>tau0)',
'Total avg ret (pred)',
'R^2'
]
df_stat = | pd.DataFrame(columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""PyGraal_Livrable_2_ITERATION_1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1doBNy8ywSlzrGvYFNBsLZGvAUpFTYhcR
Pour réaliser cette première itération, nous repartons du dataset auquel ont été apporté les modifications suivantes:
- Suppression des NaNs sur la variable cible ('Q5')
- Remplacement des NaNs par le mode pour chaque variable correspondant à des questions à choix unique
- Encodage des colonnes des questions à choix multiples par 0/1 selon NaN ou valeur
- Réduction du Dataset aux entrées des participants professionnels (ayant une profession précise, hors 'étudiant', 'sans emploi' et 'autre')
"""
from google.colab import drive
drive.mount('/content/drive')
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_theme()
df = pd.read_csv('/content/drive/MyDrive/PyGraal/df_pro.csv', index_col=0)
"""# Analyse de la variable cible
Comme vu précédemment, la valeur cible est 'Q5' (poste actuellement occupé) et après un premier traitement, elle contient 10 valeurs uniques.
"""
plt.figure (figsize=(10,10))
plt.pie(df['Q5'].value_counts(),
autopct = lambda x: str(round(x, 2)) + '%',
labels=df['Q5'].value_counts().index,
pctdistance=0.7,
shadow =True,
colors = ['#dddddd', '#81d8d0','#ffff66', '#ff7f50',
'#a0db8e', '#c0d6e4', '#ffc0cb', '#ffa500', '#808080' , '#6897bb'])
plt.title("Distribution du panel de répondants par poste hors étudiant/autres/sans emploi",fontsize=15, fontweight='bold');
"""Le domaine de la Data est en constante évolution, ses métiers également. De ce fait, nous allons restreindre notre analyse au 5 principaux métiers, qui représentent à eux seuls près de 80% du panel professionnel interrogé.
Pour appuyer cette réflexion, nous nous sommes inspirés de cet article précisant qu'au sein même du métier de Data Scientist il y avait des différenciations:
https://www.journaldunet.com/solutions/dsi/1506951-de-l-avenir-du-metier-de-data-scientist-e/
## Filtre du data set sur les 5 principaux métiers
"""
#Liste Top 5 des professions du panel de répondant
top_5 =df['Q5'].value_counts().head().index.tolist()
top_5
#Création du df_top5
df_top5 = df[df['Q5'].isin (top_5)]
print('Notre dataset contient à présent', df_top5.shape[0], 'entrées et', df_top5.shape[1],'colonnes.')
"""Pour rappel, notre objectif est de créer un modèle capable de proposer un poste en fonction de compétences et d'outils associés.
Par conséquent, en analysant les questions posées, nous pouvons supprimer une partie des colonnes.
"""
#Aperçu du data set
df.sample(2)
"""## Voici la liste des colonnes concernées et notre raisonnement:
## Colonnes à supprimer
Q1 -> Age -> Dans un souci d'éthique, cette variable ne peut-être un élément différenciant pour une suggestion de poste
Q2 -> Genre-> Dans un souci d'éthique, cette variable ne peut-être un élément différenciant pour une suggestion de poste
Q3 -> Pays -> Dans un souci d'éthique, cette variable ne peut-être un élément différenciant pour une suggestion de poste
Q4 -> Niveau d'études-> Dans un souci d'éthique, cette variable ne peut-être un élément différenciant pour une suggestion de poste
Q8 -> Langage de programmation que la personne recommanderait -> Il s'agit d'une recommandation donc point de vue subjectif et non d'une compétence liée à un poste précis
Q11 -> Computing platform -> Il s'agit d'une habitude donc point de vue subjectif et non d'une compétence liée à un poste précis
Q12 -> Hardware / GPU ? TPU -> Il s'agit d'une habitude donc point de vue subjectif et non d'une compétence liée à un poste précis
Q13 -> Nb fois TPU used -> Il s'agit d'une habitude donc point de vue subjectif et non d'une compétence liée à un poste précis
Q18 -> Computer vision methods -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q19 -> NLP Methods -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q20 taille entreprise-> Question liée à l'entreprise et non au poste du répondant
Q21 combien de personnes en data
Q22 ML implémenté
Q24 salaire
Q25 combien d’argent dépensé
27A -> cloud computing products -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
28A -> ML products -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q30 -> Big Data products -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q32 -> BI Tools -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q34-A -> Automated ML Tools -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
Q36 -> Plateforme de publication -> Il ne s'agit pas d'une compétence technique ou d'un outil lié au poste
Q37 -> Plateforme de formation -> Il ne s'agit pas d'une compétence technique ou d'un outil lié au poste
Q38 -> Primary Tools for Data Analysis -> Il s'agit d'une réponse par texte libre
Q39 -> media favori -> Il ne s'agit pas d'une compétence technique ou d'un outil lié au poste
Q26_B à Q35_B -> Question dépendant d'une réponse précédente et n'ayant pas été posée à tout le panel
## Suppression des colonnes questions B
"""
#1) Sélection des questions avec 'B:'
quest_B =[]
for i in df_top5.columns.tolist():
if 'B:' in i:
quest_B.append(i)
print('Il y a',len(quest_B),'colonnes de questions partie B.')
quest_B
#2) Suppression des colonnes dans le DataFrame
df_top5 = df_top5.drop(quest_B, axis=1)
print('Notre dataset contient à présent', df_top5.shape[0], 'entrées et',df_top5.shape[1], 'colonnes.')
#Les 91 colonnes ont bien été supprimées.
"""##Suppression des autres colonnes
"""
#Recherche des colonnes avec + de 2 valeurs
#quest_u regroupe les colonnes des questions choix unique
#quest_u regroupe les colonnes des questions choix multiple
quest_u =[]
quest_m =[]
for i in df:
if len(df[i].unique())>2:
quest_u.append(i)
else:
quest_m.append(i)
#Création de la liste des colonnes à supprimer
col_to_drop =[]
for i in ['Q1', 'Q2', 'Q3', 'Q4', 'Q8',
'Q11', 'Q12', 'Q13', 'Q18', 'Q19',
'Q20', 'Q21', 'Q22', 'Q24', 'Q25', 'Q27A', 'Q28A',
'Q34A', 'Q36', 'Q37', 'Q38', 'Q39']:
if i not in col_to_drop:
if i in quest_u:
col_to_drop.append(i)
else:
for j in quest_m:
if i in j:
col_to_drop.append(j)
print('Nombre de colonnes à supprimer :', len(col_to_drop))
print(col_to_drop)
#Suppression de ces colonnes et création d'un nouveau df
df_clean = df_top5.drop(col_to_drop,axis=1)
print('Notre dataset contient à présent', df_clean.shape[0], 'entrées et',df_clean.shape[1], 'colonnes.')
df_clean.sample(2)
"""## Encodage des colonnes restantes:
L'ensemble des questions à choix multiple a déjà été traité précédemment.
Il nous reste à encoder Q6 et Q15.
"""
#Q6 Années d'expérience en programmation est une variable ordinale => encodage de 0 à 6
print(df_clean['Q6'].unique().tolist())
df_clean['Q6'] = df_clean['Q6'].replace(['I have never written code', '< 1 years', '1-2 years', '3-5 years', '5-10 years', '10-20 years', '20+ years'], [0,1,2,3,4,5,6])
#Vérif Q6
print(df_clean['Q6'].unique().tolist())
#Q15 Années d'expérience en programmation est une variable ordinale => encodage de 0 à 8
print(df_clean['Q15'].unique().tolist())
df_clean['Q15'] = df_clean['Q15'].replace(['I do not use machine learning methods','Under 1 year', '1-2 years','2-3 years','3-4 years','4-5 years','5-10 years','10-20 years','20 or more years'], [0,1,2,3,4,5,6,7,8])
#Vérif Q15
print(df_clean['Q15'].unique().tolist())
df_clean.sample(2)
#Retravail des intitulés de colonnes pour supprimer les espaces et caractères spéciaux
#df_clean = df_clean.rename(columns=lambda x: x.replace(' ', '_'))
#df_clean = df_clean.rename(columns=lambda x: x.replace(':', '_'))
"""# Itération 1: Choix du modèle d'apprentissage"""
#Création du vecteur 'target' contenant la variable cible 'Q5' et d'un Data Frame 'feats' contenant les différentes features.
target = df_clean['Q5']
feats=df_clean.drop('Q5', axis=1)
#Séparation du dataset en train set et test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(feats, target, test_size=0.2, random_state=200)
"""Notre variable cible étant une variable catégorielle composée de classes, nous utiliserons par la suite des modèles de classification. """
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
sc= StandardScaler()
X_train_scaled = sc.fit_transform(X_train)
X_test_scaled = sc.transform(X_test)
"""### Méthode Arbre de Décision"""
dtc = DecisionTreeClassifier()
dtc.fit(X_train_scaled, y_train)
dtc_y_pred = dtc.predict(X_test_scaled)
print('Score Train set du DecisionTree :',round(dtc.score(X_train_scaled, y_train),3)*100,'%')
print('Score Test set du DecisionTree :', round(dtc.score(X_test_scaled, y_test),5)*100,'%')
from sklearn.metrics import classification_report
print('Rapport de Classification Arbre de Décision')
print(classification_report(y_test, dtc_y_pred))
print("Matrice de confusion de l'Arbre de Décision")
pd.crosstab(y_test, dtc_y_pred, rownames=['Classe réelle'], colnames=['Classe prédite'])
"""### Méthide de Régression Logistique"""
lr = LogisticRegression()
lr.fit(X_train_scaled, y_train)
lr_y_pred = lr.predict(X_test_scaled)
print('Score Train set de la Regression Logistique',round(lr.score(X_train_scaled, y_train),4)*100,'%')
print('Score Test set de la Regression Logistique',round(lr.score(X_test_scaled, y_test),3)*100,'%')
print('Rapport de Classification Regression Logistique')
print(classification_report(y_test, lr_y_pred))
print("Matrice de confusion de la Regression Logistique")
pd.crosstab(y_test, lr_y_pred, rownames=['Classe réelle'], colnames=['Classe prédite'])
"""## Méthode des plus proches voisins (Minkowski, n=3)
"""
knn = KNeighborsClassifier(n_neighbors=3, metric='minkowski')
knn.fit(X_train_scaled, y_train)
knn_y_pred = knn.predict(X_test_scaled)
print('Score Train set de la méthode des k plus proches voisins',round(knn.score(X_train_scaled, y_train),4)*100,'%')
print('Score Test set de la méthode des k plus proches voisins',round(knn.score(X_test_scaled, y_test),3)*100,'%')
print('Rapport de Classification Méthode des k plus proches voisins')
print(classification_report(y_test, knn_y_pred))
print("Matrice de confusion des k plus proches voisins")
pd.crosstab(y_test, knn_y_pred, rownames=['Classe réelle'], colnames=['Classe prédite'])
"""## Méthode SVM - Support Vector Machine"""
from sklearn import model_selection
from sklearn import svm
svm = svm.SVC()
svm.fit(X_train_scaled, y_train)
svm_y_pred = svm.predict(X_test_scaled)
print('Score Train set du modèle SVM', round(svm.score(X_train_scaled, y_train), 4)*100, '%')
print('Score Test set du modèle SVM', round(svm.score(X_test_scaled, y_test), 4)*100, '%')
print('Rapport de Classification du modèle SVM')
print(classification_report(y_test, svm_y_pred))
print("Matrice de confusion du modèle SVM")
pd.crosstab (y_test, svm_y_pred, rownames= ['Classe réelle'], colnames = ['Classe prédite'])
"""## Méthode - Random Forest"""
rf = RandomForestClassifier ()
rf.fit(X_train_scaled, y_train)
rf_y_pred = rf.predict(X_test_scaled)
print('Score Train set du modèle Random Forest', round(rf.score(X_train_scaled, y_train), 4)*100, '%')
print('Score Test set du modèle Random Forest', round(rf.score(X_test_scaled, y_test), 4)*100, '%')
print('Rapport de Classification du modèle Random Forest')
print(classification_report(y_test, rf_y_pred))
print("Matrice de confusion du modèle Random Forest")
pd.crosstab (y_test, rf_y_pred, rownames= ['Classe réelle'], colnames = ['Classe prédite'])
"""## Comparaison des scores de chaque modèle entraîné"""
Data = {'Method' : ['dtc','lr','knn','svm','rf'],
'Method_Name' : ['DecisionTreeClassifier','LogisticRegression','KNeighborsClassifier','svm.SVC','RandomForestClassifier'],
'Score_Train' : [0.975, 0.5825, 0.6313, 0.7425, 0.9751],
'Score_Test' : [0.4036, 0.5430, 0.3820, 0.5433, 0.5337],
'precision(macro_avg)' : [0.37, 0.51, 0.40, 0.54, 0.52],
'recall(macro_avg)' : [0.37, 0.49, 0.35, 0.48, 0.46],
'f1_score(macro_avg)' : [0.37, 0.50, 0.35, 0.49, 0.47],
'precision(weighted_avg)' : [0.41, 0.53, 0.42, 0.54, 0.53],
'recall(weighted_avg)' : [0.40, 0.54, 0.38, 0.54, 0.53],
'f1_score(weighted_avg)' : [0.40, 0.53, 0.38, 0.53, 0.52]}
Scores = | pd.DataFrame(Data) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import pkgutil
from io import StringIO
import sys
from tabulate import tabulate
import unittest
#find parent directory and import model
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
print("parent_dir")
print(parent_dir)
sys.path.append(parent_dir)
from beerex_exe import Beerex, BeerexOutputs
from ..beerex_exe import Beerex, BeerexOutputs
print("sys.path")
print(sys.path)
# load transposed qaqc data for inputs and expected outputs
# this works for both local nosetests and travis deploy
#input details
try:
if __package__ is not None:
csv_data = pkgutil.get_data(__package__, 'beerex_qaqc_in_transpose.csv')
data_inputs = StringIO(csv_data)
pd_obj_inputs = pd.read_csv(data_inputs, index_col=0, engine='python')
else:
csv_transpose_path_in = os.path.join(os.path.dirname(__file__), "beerex_qaqc_in_transpose.csv")
#print(csv_transpose_path_in)
pd_obj_inputs = pd.read_csv(csv_transpose_path_in, index_col=0, engine='python')
pd_obj_inputs['csrfmiddlewaretoken'] = 'test'
#with open('./beerex_qaqc_in_transpose.csv') as f:
#csv_data = csv.reader(f)
finally:
pass
#print("beerex inputs")
#print(pd_obj_inputs.shape)
#print('beerex expected output keys ' + str(pd_obj_inputs.columns.values.tolist()))
#print(tabulate(pd_obj_inputs.iloc[:,0:5], headers='keys', tablefmt='plain'))
#print(tabulate(pd_obj_inputs.iloc[:,6:10], headers='keys', tablefmt='plain'))
#print(tabulate(pd_obj_inputs.iloc[:,11:13], headers='keys', tablefmt='plain'))
#print(tabulate(pd_obj_inputs.iloc[:,14:17], headers='keys', tablefmt='plain'))
# load transposed qaqc data for expected outputs
try:
if __package__ is not None:
data_exp_outputs = StringIO(pkgutil.get_data(__package__, './beerex_qaqc_exp_transpose.csv'))
pd_obj_exp = pd.read_csv(data_exp_outputs, index_col=0, engine='python')
else:
#csv_transpose_path_exp = "./beerex_qaqc_exp_transpose.csv"
csv_transpose_path_exp = os.path.join(os.path.dirname(__file__), "beerex_qaqc_exp_transpose.csv")
#print(csv_transpose_path_exp)
pd_obj_exp = pd.read_csv(csv_transpose_path_exp, index_col=0, engine='python')
finally:
pass
#print("beerex expected outputs")
#print('beerex expected output dimensions ' + str(pd_obj_exp.shape))
#print('beerex expected output keys ' + str(pd_obj_exp.columns.values.tolist()))
#print(tabulate(pd_obj_exp.iloc[:,0:5], headers='keys', tablefmt='plain'))
#print(tabulate(pd_obj_exp.iloc[:,6:10], headers='keys', tablefmt='plain'))
#print(tabulate(pd_obj_exp.iloc[:,11:14], headers='keys', tablefmt='plain'))
#print(tabulate(pd_obj_exp.iloc[:,15:16], headers='keys', tablefmt='plain'))
# create an instance of trex object with qaqc data
beerex_output_empty = BeerexOutputs()
beerex_calc = Beerex(pd_obj_inputs, pd_obj_exp)
beerex_calc.execute_model()
inputs_json, outputs_json, exp_out_json = beerex_calc.get_dict_rep()
#print("beerex output")
#print(inputs_json)
#print("####")
#######print(beerex_calc)
test = {}
######beerex_calc.execute_model()
class TestBeerex(unittest.TestCase):
"""
Integration tests for beerex.
"""
def setUp(self):
"""
Setup routine for beerex.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for beerex.
:return:
"""
pass
def test_assert_output_series(self):
""" Verify that each output variable is a pd.Series """
try:
num_variables = len(beerex_calc.pd_obj_out.columns)
result = pd.Series(False, index=list(range(num_variables)), dtype='bool')
expected = pd.Series(True, index=list(range(num_variables)), dtype='bool')
for i in range(num_variables):
column_name = beerex_calc.pd_obj_out.columns[i]
output = getattr(beerex_calc, column_name)
if isinstance(output, pd.Series):
result[i] = True
tab = pd.concat([result,expected], axis=1)
print('model output properties as pandas series')
print(tabulate(tab, headers='keys', tablefmt='fancy_grid'))
npt.assert_array_equal(result, expected)
finally:
pass
return
def test_assert_output_series_dtypes(self):
""" Verify that each output variable is the correct dtype """
try:
num_variables = len(beerex_calc.pd_obj_out.columns)
#get the string of the type that is expected and the type that has resulted
result = pd.Series(False, index=list(range(num_variables)), dtype='bool')
expected = pd.Series(True, index=list(range(num_variables)), dtype='bool')
for i in range(num_variables):
column_name = beerex_calc.pd_obj_out.columns[i]
output_result = getattr(beerex_calc, column_name)
column_dtype_result = output_result.dtype.name
output_expected = getattr(beerex_output_empty, column_name)
output_expected2 = getattr(beerex_calc.pd_obj_out, column_name)
column_dtype_expected = output_expected.dtype.name
if column_dtype_result == column_dtype_expected:
result[i] = True
#tab = pd.concat([result,expected], axis=1)
if(result[i] != expected[i]):
print(i)
print(column_name)
print(str(result[i]) + "/" + str(expected[i]))
print(column_dtype_result + "/" + column_dtype_expected)
print('result')
print(output_result)
print('expected')
print(output_expected2)
#print(tabulate(tab, headers='keys', tablefmt='fancy_grid'))
npt.assert_array_equal(result, expected)
finally:
pass
return
def test_eec_spray(self):
"""
Integration test for beerex.eec_spray
"""
try:
self.blackbox_method_int('eec_spray')
finally:
pass
return
def test_eec_soil(self):
"""
Integration test for beerex.eec_soil
"""
try:
self.blackbox_method_int('eec_soil')
finally:
pass
return
def test_eec_seed(self):
"""
Integration test for beerex.eec_seed
"""
try:
self.blackbox_method_int('eec_seed')
finally:
pass
return
def test_eec_tree(self):
"""
Integration test for beerex.eec_tree
"""
try:
self.blackbox_method_int('eec_tree')
finally:
pass
return
def test_eec_method(self):
"""
Integration test for beerex.eec_method
"""
try:
self.blackbox_method_int('eec')
finally:
pass
return
def test_lw1_total_dose(self):
"""
Integration test for beerex.lw1_total_dose
"""
try:
self.blackbox_method_int('lw1_total_dose')
finally:
pass
return
def test_lw2_total_dose(self):
"""
Integration test for beerex.lw2_total_dose
"""
try:
self.blackbox_method_int('lw2_total_dose')
finally:
pass
return
def test_lw3_total_dose(self):
"""
Integration test for beerex.lw3_total_dose
"""
try:
self.blackbox_method_int('lw3_total_dose')
finally:
pass
return
def test_lw4_total_dose(self):
"""
Integration test for beerex.lw4_total_dose
"""
try:
self.blackbox_method_int('lw4_total_dose')
finally:
pass
return
def test_lw5_total_dose(self):
"""
Integration test for beerex.lw5_total_dose
"""
try:
self.blackbox_method_int('lw5_total_dose')
finally:
pass
return
def test_ld6_total_dose(self):
"""
Integration test for beerex.ld6_total_dose
"""
try:
self.blackbox_method_int('ld6_total_dose')
finally:
pass
return
def test_lq1_total_dose(self):
"""
Integration test for beerex.lq1_total_dose
"""
try:
self.blackbox_method_int('lq1_total_dose')
finally:
pass
return
def test_lq2_total_dose(self):
"""
Integration test for beerex.lq2_total_dose
"""
try:
self.blackbox_method_int('lq2_total_dose')
finally:
pass
return
def test_lq3_total_dose(self):
"""
Integration test for beerex.lq3_total_dose
"""
try:
self.blackbox_method_int('lq3_total_dose')
finally:
pass
return
def test_lq4_total_dose(self):
"""
Integration test for beerex.lq4_total_dose
"""
try:
self.blackbox_method_int('lq4_total_dose')
finally:
pass
return
def test_aw_cell_total_dose(self):
"""
Integration test for beerex.aw_cell_total_dose
"""
try:
self.blackbox_method_int('aw_cell_total_dose')
finally:
pass
return
def test_aw_brood_total_dose(self):
"""
Integration test for beerex.aw_brood_total_dose
"""
try:
self.blackbox_method_int('aw_brood_total_dose')
finally:
pass
return
def test_aw_comb_total_dose(self):
"""
Integration test for beerex.aw_comb_total_dose
"""
try:
self.blackbox_method_int('aw_comb_total_dose')
finally:
pass
return
def test_aw_pollen_total_dose(self):
"""
Integration test for beerex.aw_pollen_total_dose
"""
try:
self.blackbox_method_int('aw_pollen_total_dose')
finally:
pass
return
def test_aw_nectar_total_dose(self):
"""
Integration test for beerex.aw_nectar_total_dose
"""
try:
self.blackbox_method_int('aw_nectar_total_dose')
finally:
pass
return
def test_aw_winter_total_dose(self):
"""
Integration test for beerex.aw_winter_total_dose
"""
try:
self.blackbox_method_int('aw_winter_total_dose')
finally:
pass
return
def test_ad_total_dose(self):
"""
Integration test for beerex.ad_total_dose
"""
try:
self.blackbox_method_int('ad_total_dose')
finally:
pass
return
def test_aq_total_dose(self):
"""
Integration test for beerex.aq_total_dose
"""
try:
self.blackbox_method_int('aq_total_dose')
finally:
pass
return
def test_lw1_acute_rq(self):
"""
Integration test for beerex.lw1_acute_rq
"""
try:
self.blackbox_method_int('lw1_acute_rq')
finally:
pass
return
def test_lw2_acute_rq(self):
"""
Integration test for beerex.lw2_acute_rq
"""
try:
self.blackbox_method_int('lw2_acute_rq')
finally:
pass
return
def test_lw3_acute_rq(self):
"""
Integration test for beerex.lw3_acute_rq
"""
try:
self.blackbox_method_int('lw3_acute_rq')
finally:
pass
return
def test_lw4_acute_rq(self):
"""
Integration test for beerex.lw4_acute_rq
"""
try:
self.blackbox_method_int('lw4_acute_rq')
finally:
pass
return
def test_lw5_acute_rq(self):
"""
Integration test for beerex.lw5_acute_rq
"""
try:
self.blackbox_method_int('lw5_acute_rq')
finally:
pass
return
def test_ld6_acute_rq(self):
"""
Integration test for beerex.ld6_acute_rq
"""
try:
self.blackbox_method_int('ld6_acute_rq')
finally:
pass
return
def test_lq1_acute_rq(self):
"""
Integration test for beerex.lq1_acute_rq
"""
try:
self.blackbox_method_int('lq1_acute_rq')
finally:
pass
return
def test_lq2_acute_rq(self):
"""
Integration test for beerex.lq2_acute_rq
"""
try:
self.blackbox_method_int('lq2_acute_rq')
finally:
pass
return
def test_lq3_acute_rq(self):
"""
Integration test for beerex.lq3_acute_rq
"""
try:
self.blackbox_method_int('lq3_acute_rq')
finally:
pass
return
def test_lq4_acute_rq(self):
"""
Integration test for beerex.lq4_acute_rq
"""
try:
self.blackbox_method_int('lq4_acute_rq')
finally:
pass
def test_aw_cell_acute_rq(self):
"""
Integration test for beerex.aw_cell_acute_rq
"""
try:
self.blackbox_method_int('aw_cell_acute_rq')
finally:
pass
def test_aw_brood_acute_rq(self):
"""
Integration test for beerex.aw_brood_acute_rq
"""
try:
self.blackbox_method_int('aw_brood_acute_rq')
finally:
pass
def test_aw_comb_acute_rq(self):
"""
Integration test for beerex.aw_comb_acute_rq
"""
try:
self.blackbox_method_int('aw_comb_acute_rq')
finally:
pass
def test_aw_pollen_acute_rq(self):
"""
Integration test for beerex.aw_pollen_acute_rq
"""
try:
self.blackbox_method_int('aw_pollen_acute_rq')
finally:
pass
def test_aw_nectar_acute_rq(self):
"""
Integration test for beerex.aw_nectar_acute_rq
"""
try:
self.blackbox_method_int('aw_nectar_acute_rq')
finally:
pass
def test_aw_winter_acute_rq(self):
"""
Integration test for beerex.aw_winter_acute_rq
"""
try:
self.blackbox_method_int('aw_winter_acute_rq')
finally:
pass
def test_ad_acute_rq(self):
"""
Integration test for beerex.ad_acute_rq
"""
try:
self.blackbox_method_int('ad_acute_rq')
finally:
pass
def test_aq_acute_rq(self):
"""
Integration test for beerex.aq_acute_rq
"""
try:
self.blackbox_method_int('aq_acute_rq')
finally:
pass
return
def test_lw1_chronic_rq(self):
"""
Integration test for beerex.lw1_chronic_rq
"""
try:
self.blackbox_method_int('lw1_chronic_rq')
finally:
pass
return
def test_lw2_chronic_rq(self):
"""
Integration test for beerex.lw2_chronic_rq
"""
try:
self.blackbox_method_int('lw2_chronic_rq')
finally:
pass
return
def test_lw3_chronic_rq(self):
"""
Integration test for beerex.lw3_chronic_rq
"""
try:
self.blackbox_method_int('lw3_chronic_rq')
finally:
pass
return
def test_lw4_chronic_rq(self):
"""
Integration test for beerex.lw4_chronic_rq
"""
try:
self.blackbox_method_int('lw4_chronic_rq')
finally:
pass
return
def test_lw5_chronic_rq(self):
"""
Integration test for beerex.lw5_chronic_rq
"""
try:
self.blackbox_method_int('lw5_chronic_rq')
finally:
pass
return
def test_ld6_chronic_rq(self):
"""
Integration test for beerex.ld6_chronic_rq
"""
try:
self.blackbox_method_int('ld6_chronic_rq')
finally:
pass
return
def test_lq1_chronic_rq(self):
"""
Integration test for beerex.lq1_chronic_rq
"""
try:
self.blackbox_method_int('lq1_chronic_rq')
finally:
pass
return
def test_lq2_chronic_rq(self):
"""
Integration test for beerex.lq2_chronic_rq
"""
try:
self.blackbox_method_int('lq2_chronic_rq')
finally:
pass
return
def test_lq3_chronic_rq(self):
"""
Integration test for beerex.lq3_chronic_rq
"""
try:
self.blackbox_method_int('lq3_chronic_rq')
finally:
pass
return
def test_lq4_chronic_rq(self):
"""
Integration test for beerex.lq4_chronic_rq
"""
try:
self.blackbox_method_int('lq4_chronic_rq')
finally:
pass
def test_aw_cell_chronic_rq(self):
"""
Integration test for beerex.aw_cell_chronic_rq
"""
try:
self.blackbox_method_int('aw_cell_chronic_rq')
finally:
pass
def test_aw_brood_chronic_rq(self):
"""
Integration test for beerex.aw_brood_chronic_rq
"""
try:
self.blackbox_method_int('aw_brood_chronic_rq')
finally:
pass
def test_aw_comb_chronic_rq(self):
"""
Integration test for beerex.aw_comb_chronic_rq
"""
try:
self.blackbox_method_int('aw_comb_chronic_rq')
finally:
pass
def test_aw_pollen_chronic_rq(self):
"""
Integration test for beerex.aw_pollen_chronic_rq
"""
try:
self.blackbox_method_int('aw_pollen_chronic_rq')
finally:
pass
def test_aw_nectar_chronic_rq(self):
"""
Integration test for beerex.aw_nectar_chronic_rq
"""
try:
self.blackbox_method_int('aw_nectar_chronic_rq')
finally:
pass
def test_aw_winter_chronic_rq(self):
"""
Integration test for beerex.aw_winter_chronic_rq
"""
try:
self.blackbox_method_int('aw_winter_chronic_rq')
finally:
pass
def test_ad_chronic_rq(self):
"""
Integration test for beerex.ad_chronic_rq
"""
try:
self.blackbox_method_int('ad_chronic_rq')
finally:
pass
def test_aq_chronic_rq(self):
"""
Integration test for beerex.aq_chronic_rq
"""
try:
self.blackbox_method_int('aq_chronic_rq')
finally:
pass
return
def blackbox_method_int(self, output):
"""
Helper method to reuse code for testing numpy array outputs from Beerex model
:param output: String; Pandas Series name (e.g. column name) without '_out'
:return:
"""
pd.set_option('display.float_format','{:.4E}'.format) # display model output in scientific notation
result = beerex_calc.pd_obj_out["out_" + output]
expected = beerex_calc.pd_obj_exp["exp_" + output]
tab = | pd.concat([result, expected], axis=1) | pandas.concat |
import copy
import datetime
from datetime import datetime
from datetime import timedelta
import altair as alt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import streamlit as st
import gsheet
from streamlit import caching
import cufflinks as cf
LOCAL = False
if LOCAL:
cf.go_offline()
#See gsheet for fetching local creds
def st_config():
"""Configure Streamlit view option and read in credential f
ile if needed check if user and password are correct"""
st.set_page_config(layout="wide")
pw = st.sidebar.text_input("Enter password:")
if pw == st.secrets["PASSWORD"]: #CHANGE CHANGE CHANGE BEFORE PUSHING!
return st.secrets["GSHEETS_KEY"]
else:
return None
@st.cache
def read_data(creds,ws,gs):
"""Read court tracking data in and drop duplicate case numbers"""
try:
df = gsheet.read_data(gsheet.open_sheet(gsheet.init_sheets(creds),ws,gs))
# df.drop_duplicates("Case Number",inplace=True) #Do we want to drop duplicates???
return df
except Exception as e:
return None
def convert(x):
try:
return x.date()
except:
return None
def convert_date(df,col):
"""Helper function to convert a col to a date"""
df[col] = pd.to_datetime(df[col]).apply(lambda x: convert(x))
return df
def agg_checklist(df_r):
"""Aggegrates a dataframe with multi indexes (but one level) seperated by a ', ' into a data frame with single indexes"""
df_r["result"]=df_r.index
df_b = pd.concat([pd.Series(row['count'], row['result'].split(', ')) for _,row in df_r.iterrows()]).reset_index().groupby("index").sum()
df_a = pd.concat([pd.Series(row['cases'], row['result'].split(', ')) for _,row in df_r.iterrows()]).reset_index().groupby("index").agg(lambda x: ", ".join(x))
df_r = df_b.merge(df_a,right_index=True,left_index=True)
return df_r
def agg_cases(df,col,i,pie=False):
"""Aggregates a df by col and aggregates case numbers into new column seperated by ',' or <br> depending on pie flag """
df_r = df.groupby([col,"Case Number"]).count().iloc[:,i]
df_r.name = "count"
df_r = pd.DataFrame(df_r)
df_a = pd.DataFrame(df_r.to_records())
df_r = df_r.groupby(level=0).sum()
if pie:
df_r["cases"] = df_a.groupby(col)["Case Number"].agg(lambda x: ',<br>'.join(x))
else:
df_r["cases"] = df_a.groupby(col)["Case Number"].agg(lambda x: ','.join(x))
return df_r
def volunteer_details(cl):
"""Compute and Render Volunteer Information section"""
df = agg_cases(cl,"Caller Name",0,True)
#total minutes on phone per caller
df1 = cl.groupby("Caller Name")["Length Call (minutes)"].sum()
#Calls over time
df2 = agg_cases(cl,"Date Contact Made or Attempted",0,True)
#calls made per tracker
# with st.beta_expander("Volunteer Information"):
cols = st.beta_columns([1,1])
fig = px.pie(df, values='count', names=df.index, title='Volunteer Call Count',hover_data=["cases"])
fig.update_traces(textinfo='value')
cols[0].plotly_chart(fig)
fig1 = px.pie(df1, values='Length Call (minutes)', names=df1.index, title='Volunteer Call Time',hover_data=["Length Call (minutes)"])
fig1.update_traces(textinfo='value')
cols[1].plotly_chart(fig1)
#Summary table
#completed calls
cl_s = cl.loc[cl["Status of Call"]=="Spoke with tenant call completed"]
cl_s = pd.DataFrame(cl_s.groupby("Caller Name")["count"].sum())
#combine non completed and completed
df = df.merge(cl_s,on="Caller Name")
cols = st.beta_columns([1,1,1,1])
cols[0].markdown("**Name**")
cols[1].markdown("**Call Count**")
cols[2].markdown("**Tenants Spoken To**")
cols[3].markdown("**Time on Calls**")
for i,row in df.iterrows():
cols = st.beta_columns([1,1,1,1])
cols[0].text(i)
cols[1].text(row["count_x"])
cols[2].text(row["count_y"])
cols[3].text(df1.loc[i])
#So it would be each questions and then you would lead each response with the name/case# and then the info?
def render_qualitative_data(cl):
# with st.beta_expander("Qualitative Data"):
# min_date = cl["Date Contact Made or Attempted"].min()-timedelta(days=7)
# max_date = datetime.today().date()+timedelta(days=90) #df[col].max()+timedelta(days=31) #lets just go a month out actually lets do today
# start_date,end_date = date_options(min_date,max_date,"2")
cl.reset_index(inplace=True)
display = [
"Defendant",
"Case Number",
"Notes ",
"Other Eviction Details",
"LL mentioned eviction details",
"Rental Assistance Programs Applied",
"Rental Assistance Application Issues",
"Health Issues",
"Repair notes",
"Want to Call Code?",
"Feedback about RRT",
"Talking with Media"
]
cl = cl[display]
cl.replace("Unknown","",inplace=True)
for col in cl.columns:
if not((col == "Defendant") or (col == "Case Number")):
st.markdown(f"## {col}")
for i,entry in enumerate(cl[col]):
if entry != "":
st.markdown(f"**{cl.at[i,'Defendant']}/{cl.at[i,'Case Number']}:** {entry}")
#for idx,row in cl.iterrows():
# st.markdown(f"**{row['Defendant']}**")
# text = ""
# for i,col in enumerate(cl.columns):
# if row[col] != "":
# text += row[col] + ", "
# st.markdown(f"{text}")
# for i,col in enumerate(cl.columns):
# cols[i].markdown(f"**{col}**")
# for idx,row in cl.iterrows():
# cols = st.beta_columns(len(cl.columns))
# for i,col in enumerate(cl.columns):
# cols[i].text(row[col])
#UI start date end date filtering assume dataframe already in date format.date()
def date_options(min_date,max_date,key):
quick_date_input = st.selectbox("Date Input",["Custom Date Range","Previous Week","Previous 2 Weeks","Previous Month (4 weeks)"],1)
if quick_date_input == "Previous Week":
start_date = (
datetime.today() - timedelta(weeks=1)
).date()
end_date = datetime.today().date()
if quick_date_input == "Previous 2 Weeks":
start_date = (
datetime.today() - timedelta(weeks=2)
).date()
end_date = datetime.today().date()
if quick_date_input == "Previous Month (4 weeks)":
start_date = (
datetime.today() - timedelta(weeks=4)
).date()
end_date = datetime.today().date()
if quick_date_input == "User Input":
key1 = key + "a"
key2 = key + "b"
cols = st.beta_columns(2)
start_date = cols[0].date_input("Start Date",min_value=min_date,max_value=max_date,value=min_date,key=key1)#,format="MM/DD/YY")
end_date = cols[1].date_input("End Date",min_value=min_date,max_value=max_date,value=datetime.today().date(),key=key2)#,format="MM/DD/YY")
return start_date,end_date
def filter_dates(df,start_date,end_date,col):
return df.loc[(df[col].apply(lambda x: x)>=start_date) & (df[col].apply(lambda x: x)<=end_date)]
def yes_no_qs(df_cc):
with st.beta_expander("Trends Over Time"):
display = ['Still living at address?','Knows about moratorium?','Knows about the eviction?','Eviction for Non-Payment?','LL mentioned eviction?','Rental Assistance Applied?','Repairs issues?']
df_cc["Date"] = pd.to_datetime(df_cc['Date Contact Made or Attempted'])
for col in display:
df_cc_agg = (
df_cc
.groupby([col,pd.Grouper(key='Date', freq='M')])
.agg("nunique")['Case Number']
.reset_index()
.sort_values('Date')
)
df_cc_agg = df_cc_agg.set_index(df_cc_agg["Date"])
df_cc_agg = df_cc_agg.pivot_table("Case Number", index=df_cc_agg.index, columns=col,aggfunc='first')
if "Unknown" not in df_cc_agg.columns:
df_cc_agg["Unknown"] = 0
df_cc_agg["Yes"].fillna(0,inplace=True)
df_cc_agg["No"].fillna(0,inplace=True)
df_cc_agg["Unknown"].fillna(0,inplace=True)
df_cc_agg["Yes %"] = (df_cc_agg["Yes"] / (df_cc_agg["Yes"]+df_cc_agg["Unknown"]+df_cc_agg["No"])*100)
df_cc_agg["No %"] = (df_cc_agg["No"] / (df_cc_agg["Yes"]+df_cc_agg["Unknown"]+df_cc_agg["No"])*100)
df_cc_agg["Unknown %"] = (df_cc_agg["Unknown"] / (df_cc_agg["Yes"]+df_cc_agg["Unknown"]+df_cc_agg["No"])*100)
#round percentages
df_cc_agg[["Yes %","No %","Unknown %"]] = df_cc_agg[["Yes %","No %","Unknown %"]].round(decimals=1).astype(object)
df_cc_agg.columns.name = None
st.markdown(f"### {col}")
cols = st.beta_columns(2)
cols[1].line_chart(
df_cc_agg[["Yes %","No %","Unknown %"]],
use_container_width = True,
height = 200
)
df_cc_agg.index = df_cc_agg.index.map(lambda x: x.strftime("%B"))
cols[0].write(df_cc_agg)
#change try excepts to check empty and return if none for df_r and cs agg cases
def overview(el,cl,cc,df_cc,df_fu,pir):
# with st.beta_expander("Data Overview for all Tenants"):
#Date filter
#Call Status break downs not unique cases..
with st.beta_expander("Call Data by Date"):
min_date = cl["Date Contact Made or Attempted"].min()-timedelta(days=7)
max_date = datetime.today().date()+timedelta(days=90) #df[col].max()+timedelta(days=31) #lets just go a month out actually lets do today
start_date,end_date = date_options(min_date,max_date,"1")
cl_f = filter_dates(cl,start_date,end_date,"Date Contact Made or Attempted")
df_cc = cl_f.loc[cl_f["Status of Call"].eq("Spoke with tenant call completed")].drop_duplicates("Case Number")
ev_ff = filter_dates(ev,start_date,end_date,"date_filed")
ev_h = filter_dates(ev,start_date,end_date,"date")
cols = st.beta_columns([1,1,1])
cols[0].markdown(f"### :phone: Calls made: {len(cl_f)} ")
cols[1].markdown(f"### :mantelpiece_clock: Time on calls: {cl_f.groupby('Caller Name')['Length Call (minutes)'].sum().sum()}m")
cols[2].markdown(f"### :ballot_box_with_check: Tenants Spoken to: {len(df_cc['Case Number'].unique())}") #Do we want to only have unique case numbers?
cols = st.beta_columns([1,1,1,1])
cols[0].markdown(f"### :muscle: Cases Called: {len(cl_f['Case Number'].unique())}")
cols[1].markdown(f"### :open_file_folder: Filings:{len(ev_ff['case_number'].unique())}")
cols[2].markdown(f"### :female-judge: Hearings:{len(ev_h['case_number'].unique())}")
cols[3].markdown(f"### :telephone_receiver::smiley: Number of callers:{len(cl_f['Caller Name'].unique())}")
st.text("")
#Completed Calls
#Completed call Yes/No break downs:
display = ['Still living at address?','Knows about moratorium?','Knows about the eviction?','Eviction for Non-Payment?','LL mentioned eviction?','Rental Assistance Applied?','Repairs issues?']
dfs= []
columns = df_cc.columns
for i,col in enumerate(columns):
if col in display:
try: #if agg columns had no data
df_r = agg_cases(df_cc,col,i)
except:
df_r = None
if df_r is not None:
df_r.columns = ["Count","Cases"]
df_r = df_r.reset_index(level=[0]) #
dfs.append(df_r)
st.text("")
st.text("")
cols = st.beta_columns(len(display))
for i, df in enumerate(dfs):
cols[i].markdown(f"#### {display[i]}")
cols[i].text("")
#Yes/ No / Unknown ?s
for i, df in enumerate(dfs): #Sort change to ["Yes","No","Unknown"]
#clean up blanks
for vals in df.values:
cols[i].markdown(f"{vals[0]}: {vals[1]}/{df['Count'].sum()}")
#Call Status Pie Chart and table/ Completed calls
try:
cs = agg_cases(cl_f,"Status of Call",0,True)
except:
cs = None
if cs is not None:
fig = px.pie(cs, values="count", names=cs.index, title="Call Status Break Down",hover_data=["cases"])
fig.update_traces(textinfo='value')
cols = st.beta_columns([2,1])
cols[0].plotly_chart(fig)
#Call Status numbers
cols[1].text("")
cols[1].text("")
cols[1].markdown("#### Break downs for call status:")
cols[1].write(cs["count"])
cols[1].text("")
cols[1].text("")
#Case number contact bar graph
# cl_s["Status"] = "Spoke with tenant call completed"
# cl_a = pd.DataFrame(cl_f.groupby("Caller Name")["count"].sum())
# cl_a["Status"] = "All calls"
# cl_ff = pd.concat([cl_a,cl_s])
# fig = px.bar(cl_ff,x=cl_ff.index,y="count",color="Status")
# fig = px.bar(cl_s,x=cl_s.index,y="count",color="Status")
# st.plotly_chart(fig,use_container_width=True)
#Completed call information
#volunteer details
volunteer_details(cl_f)
st.write("")
st.write("")
st.markdown("## Click Checkbox Below for Qualitative Data")
if st.checkbox("Qualitative Data"):
render_qualitative_data(cl_f)
def side_bar(cl,df_cc,el,cc,df_fu,ev_s):
"""Compute and render data for the sidebar (Excludes Sidebar UI)"""
st.sidebar.markdown(f"### Total calls made: {len(cl)} ")
st.sidebar.markdown(f"### Total time on calls: {cl.groupby('Caller Name')['Length Call (minutes)'].sum().sum()} minutes")
st.sidebar.markdown(f"### Tenants Spoken to: {len(df_cc['Case Number'].unique())}") #Do we want to only have unique case numbers?
st.sidebar.markdown(f"### Emails Sent: {len(el['Case Number'].unique())-len(el.loc[el['Email Method'].eq('')])}") #Errors are logged as "" in Email log gsheet
st.sidebar.markdown(f"### Cases Called: {len(cl['Case Number'].unique())}")
st.sidebar.markdown(f"### Cases Not Yet Called: {len(cc.loc[~cc['unique search'].eq('')])}")
st.sidebar.markdown(f"### Calls to Follow Up: {len(df_fu['Case Number'].unique())}")
st.sidebar.markdown(f"### Settings Today to 90 Days Out: {len(ev_s['case_number'].unique())}")
def activity_graph(pir,cl,ev):
with st.beta_expander(" Volunteer Activity vs. Court Activity"):
#call counts vs. not called counts vs filing counts vs contact counts (with phone numbers) all unique
#for contact counts aggregate by week take max date -6 days and sum unique cases with that filter (add 7 to max date to get day contacts came in)
#filter completed vs non completed calls
df_cc = cl.loc[cl["Status of Call"].eq("Spoke with tenant call completed")].drop_duplicates("Case Number")
df_nc = cl.loc[~cl["Status of Call"].eq("Spoke with tenant call completed")].drop_duplicates("Case Number")
#aggregate by day/week/month
#only look at date range when we started making calls to now
min_date = pd.to_datetime(cl["Date Contact Made or Attempted"]).min().date()
max_date = datetime.today().date()
ev = filter_dates(ev,min_date,max_date,"date_filed")
pir = filter_dates(pir,min_date,max_date,"File Date")
choice = st.radio(
"Aggregate by day/week/month",
["day","week","month"],
index=1
)
if choice == "day":
freq = "D" #B ? for biz day
if choice == "week":
freq = "W-SUN" #week mon- sunday
if choice == "month":
freq = "M" #month starting on 1st
#set up time index, aggregate my freq, merge, and display graphs
#aggegrate and build dfs
#new contacts
pir['Date'] = pd.to_datetime(pir['File Date']) + pd.to_timedelta(7, unit='d') #get in a week after file date
df_pir = (
pir.groupby(pd.Grouper(key='Date', freq=freq))
.agg("nunique")[["Cell Phone","Home Phone"]]
.reset_index()
.sort_values('Date')
)
df_pir = df_pir.set_index(df_pir["Date"])
df_pir["New Contacts"] = df_pir["Cell Phone"] + df_pir["Home Phone"]
#call counts`
cl['Date'] = pd.to_datetime(cl['Date Contact Made or Attempted'])
df_cl = (
cl.groupby(pd.Grouper(key='Date', freq=freq))
.agg("count")[["Case Number","Defendant"]]
.reset_index()
.sort_values('Date')
)
df_cl = df_cl.set_index(df_cl["Date"])
df_cl["Cases Called"] = df_cl["Case Number"]
#completed calls
cl_cc = cl.loc[cl["Status of Call"].eq("Spoke with tenant call completed")]
df_cc = (
cl_cc.groupby(pd.Grouper(key='Date', freq=freq))
.agg("count")[["Case Number","Defendant"]]
.reset_index()
.sort_values('Date')
)
df_cc = df_cc.set_index(df_cc["Date"])
df_cl["Tenants Spoken With"] = df_cc["Case Number"] #can just add back into call counts df so we dont have to double merge
#filings
ev['Date'] = pd.to_datetime(ev['date_filed'])
df_ev = (
ev.groupby(pd.Grouper(key='Date', freq=freq))
.agg("nunique")[["case_number","defendants"]]
.reset_index()
.sort_values('Date')
)
df_ev = df_ev.set_index(df_ev["Date"])
df_ev["Cases Filed"] = df_ev["case_number"]
#hearings
ev['Date'] = pd.to_datetime(ev['date'])
df_evh = (
ev.groupby(pd.Grouper(key='Date', freq=freq))
.agg("nunique")[["case_number","defendants"]]
.reset_index()
.sort_values('Date')
)
df_evh = df_evh.set_index(df_evh["Date"])
df_ev["Cases Heard"] = df_evh["case_number"]
#merge em
df=df_cl.merge(df_pir,right_index=True,left_index=True,how="outer")
df=df.merge(df_ev,right_index=True,left_index=True,how="outer")
#plot em
st.plotly_chart(
df[["New Contacts","Cases Called","Tenants Spoken With","Cases Filed","Cases Heard"]].iplot(
# df[["New Contacts","Cases Called","Tenants Spoken With","Cases Filed"]].iplot(
kind='lines',
size = 5,
rangeslider=True,
asFigure=True
),
use_container_width = True,
height = 200
)
#maybe sort in render page and then drop duplicates so follow ups get droped?
def render_page(el,cl,cc,ev,pir,ev_s):
"""Compute sub data frames for page rendering and call sub render functions"""
#Make sub data frames
#Follow up calls to make: not unique for case number Looks at cases still in follow up list (Follow up list is generated and maintained in community lawyer) A call is taken out if a case is dismissed (from PIR integration) or a volunteer marks completed call or do not call back
df_fu = cl.loc[cl["Case Number"].isin(cc.loc[~cc['unique search follow up'].eq("")]["Case Number"])]
#Calls to make: not unique for case number
df_c2m = cc.loc[~cc['unique search'].eq("")]
#Completed Calls: for overview (only completed calls info) unique for case number
df_cc = cl.loc[cl["Status of Call"].eq("Spoke with tenant call completed")].drop_duplicates("Case Number")
df_cc.replace("","Unknown",inplace=True)#replace "" entries with unknown
#Completed Calls: for list (includes follow up calls) not unique for case number
df_cc_fu = cl.loc[cl["Case Number"].isin(df_cc["Case Number"])]
#Not yet contacted cases
df_nc = cc.loc[~cc['unique search'].eq("")] #Have calls that were not made yet or were never made in All data set
#All cases: includes email logs call logs and calls not yet made information
df_ac = pd.concat([df_nc,cl,el])
#Contact Failed not in contact list (cc) follow up not in call completed list (df_cc)
df_cf = cl.loc[~cl["Case Number"].isin(df_cc["Case Number"]) & ~cl["Case Number"].isin(df_fu["Case Number"])]
#clean volunteer names
cl["Caller Name"] = cl["Caller Name"].str.rstrip(" ")
#filter for settings for week in advance
start_date = datetime.today().date()
end_date = datetime.today().date()+timedelta(days=90)
ev_s = filter_dates(ev_s,start_date,end_date,"setting_date")
#Render sub-pages
side_bar(cl,df_cc,el,cc,df_fu,ev_s)
overview(el,cl,cc,df_cc,df_fu,pir)
yes_no_qs(df_cc)
activity_graph(pir,cl,ev)
if __name__ == "__main__":
#Read in data
if LOCAL:
st.set_page_config(layout="wide")
el = pd.read_csv('../data/RRT_contacts_cl - Email_log.csv')
cl = pd.read_csv('../data/RRT_contacts_cl - Call_log.csv')
cc = pd.read_csv('../data/RRT_contacts_cl - Contact_list.csv')
ev = pd.read_csv('../data/Court_scraper_evictions_archive - evictions_archive.csv')
ev_s = | pd.read_csv('../data/Court_scraper_eviction_scheduler - eviction_scheduler.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
import json
from . import Tagsets
from ..Utils.CorpusParser import CorpusParser
from ..Utils.Utils import increment
class HMM(object):
"""
Data structure to represent a Hidden Markov Model
"""
def __init__(self, q=None, a=None, b=None, smoothing='laplace', alpha=1, tag_count=None):
"""
Init the data structure.
:param q: set of states.
:param a: transition probability matrix. Each a[i, j] represents the probability of moving from state i to j.
:param b: observation likelihoods. b[tag, word] = likelihood of 'word' being of class 'tag'
:param smoothing: Smoothing technique. Needed to deal with unknown words.
- None: No smoothing is used. b[tag, word] = count(word, tag) / count(tag)
- Laplace: Additive smoothing. b[tag, word] = (count(word, tag) + alpha) / (count(tag) + alpha * size(vocabulary)).
:param alpha:
:param tag_count: Tag count. Used for trained Models using LaPlace smoothing.
"""
if smoothing in ['laplace', 'max', 'none']:
self._smoothing = smoothing
self._alpha = alpha
self.q = q
self._a = a
self._b = b
self.tag_count = tag_count
self.trained = a is not None and b is not None # If the user provides a and b, then we already have a model.
def train(self, sentences=None, root=None, fileids='.*', encoding='utf8'):
"""
Trains the Hidden Markov Model.
:param sentences: Tagged sentences. If provided, the others arguments will be ignored.
:param root: Directory.
:param fileids: List of files that have to be read. '.*' if all files have to be parsed.
:param encoding: File encoding. UTF-8 by default.
"""
if sentences is None and root is None:
return -1
bigram_states = {} # Counts the frequency of two states appearing one after the other.
tag_word_count = {} # Counts how many times has a tag.
if sentences is None:
reader = CorpusParser(root, fileids, encoding)
sentences = reader.tagged_sentences()
for sentence in sentences:
current = Tagsets.START_TAG
for word in sentence:
# Each word is a tuple ("cat", "NN")
token = word[0]
tag = word[1]
last = current # Last state (t_{i - 1})
current = tag # Current state (t_i)
tag_word_count = increment(tag_word_count, tag, token)
bigram_states = increment(bigram_states, current, last)
bigram_states = increment(bigram_states, Tagsets.END_TAG, current) # Link the last word with the stop tag
self.q = tuple([Tagsets.START_TAG]) + tuple(bigram_states.keys())
self._a = self.compute_a(bigram_states)
self._b = self.compute_b(tag_word_count)
self.tag_count = self.compute_tag_count(tag_word_count)
self.trained = True
def compute_a(self, dictionary):
"""
Given a dictionary with the bigrams of states, computes the matrix A.
a[i, j] = p(t_i | t_j) = C(t_j, t_i)/C(t_j)
Where:
C(t_j, t_i): How many times t_j is followed by t_i.
C(t_j): Number of t_j occurrences.
:param dictionary: Dictionary of bigram states with their count dictionary[s_i][s_j] = count
:return: transition probability matrix.
"""
n = len(self.q)
a = np.zeros((n, n))
for s_i in self.q:
for s_j in self.q:
if s_j in dictionary and s_i in dictionary[s_j]:
i = self.q.index(s_i)
j = self.q.index(s_j)
a[i, j] = dictionary[s_j][s_i] / dictionary[s_j][0]
return pd.DataFrame(a, columns=self.q, index=self.q)
def compute_b(self, dictionary):
"""
Given a dictionary with the count of how many times a word has a tag, computes the matrix B.
b[w, t] = p(w | t) = C(t, w) / C(t)
C(t, w): how many times the word w has the tag t.
C(t): Count of tag t.
:param dictionary: Dictionary of words and tags counts.
:return: observation likelihood matrix.
"""
dict_b = {} # We temporarily use a dictionary instead of a matrix because we don't have a list of words.
unique_words = []
for t in dictionary.keys():
for w in dictionary[t].keys():
if w != 0:
dict_b[w, t] = dictionary[t][w] / dictionary[t][0]
if w not in unique_words:
unique_words.append(w)
rows = len(self.q)
cols = len(unique_words)
b = np.zeros((rows, cols))
for (w, t) in dict_b:
i = self.q.index(t)
j = unique_words.index(w)
if self._smoothing == 'none' or self._smoothing == 'max':
b[i, j] = dict_b[w, t]
elif self._smoothing == 'laplace':
if t in dictionary:
count_t = dictionary[t][0]
else:
count_t = 0
if t in dictionary and w in dictionary[t]:
count_t_w = dictionary[t][w]
else:
count_t_w = 0
b[i, j] = (count_t_w + self._alpha) / (count_t + self._alpha * len(unique_words))
return | pd.DataFrame(b, columns=unique_words, index=self.q) | pandas.DataFrame |
import pandas as pd
from datetime import date
import requests
from tokenfile import *
contributors_filtered = | pd.read_csv("Complete_steps/contributorsfiltered.csv") | pandas.read_csv |
from os import getcwd
import sys
sys.path.append(getcwd() + '/..') # Add src/ dir to import path
import traceback
import logging
from os.path import join
from datetime import date, timedelta, datetime
import networkx as nx
import pandas as pd
from pymongo import MongoClient
from bson.objectid import ObjectId
from mdutils.mdutils import MdUtils
import libs.networkAnalysis as na
import libs.pandasLib as pl
from libs.mongoLib import getContentDocsPerPlatform, getAllDocs, getMinMaxDay
from initialProcessing.createGraph import getGraphRequirments
from libs.osLib import loadYaml
def getDayFinisher(myDate):
date_suffix = ["th", "st", "nd", "rd"]
if myDate % 10 in [1, 2, 3] and myDate not in [11, 12, 13]:
return date_suffix[myDate % 10]
else:
return date_suffix[0]
if __name__ == '__main__':
# Set up logger
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# Load config
configDir = '../../configs/'
config = loadYaml(join(configDir, 'export.yaml'))
platforms = config['platforms']
exportKeys = {plt: dt for plt, dt in config['keysToExport'].items()}
minYear, maxYear = 2009, 2020
singleFile = False
try:
# Set up DB
client = MongoClient()
db = client['digitalMe']
collectionCont = db['content']
collectionEnt = db['entities']
collectionLoc = db['locations']
logging.info(f'Loading data from DB')
data = getGraphRequirments(collectionEnt, collectionLoc, collectionCont, platforms,
timeLimits=(minYear, maxYear))
nodesPerClass = {
'time': data['temporalPeriod'],
'content': [x['_id'] for x in data['contentList']],
'tag': [x['_id'] for x in data['entitiesList']],
'spatial': [x['_id'] for x in data['locationsList']],
}
logging.info(
f'Data acquired, creating graph (temporal period: {data["temporalPeriod"][0]} -> {data["temporalPeriod"][-1]})')
# Transform lists to dataframe for faster operations
data['contentDf'] = pd.DataFrame(data['contentList']).set_index('_id')
data['contentDf'].timestamp = data['contentDf'].timestamp.apply(lambda x: [d.date() for d in x])
# data['contentDf'] = data['contentDf'][['platform', 'type', 'timestamp', 'body', 'tags', 'locations']]
data['locationDf'] = pd.DataFrame(data['locationsList']).set_index('_id')
data['entityDf'] = | pd.DataFrame(data['entitiesList']) | pandas.DataFrame |
# try and add training data points versus F1 score graph
# author: <NAME>, <NAME>, <NAME>, <NAME>
# date: 2020-06-05
'''This script will read X_test and predicted label values for test comments from the theme model from the specified directory
and will predict the subthemes for these comments. The output dataframe will be saved it in specified directory.
There are 2 parameters Input Path and Output Path where you want to write the evaluations of the subtheme predictions.
Usage: predict_subtheme.py --input_file=<input_file> --output_dir=<destination_dir_path>
Example:
python src/models/predict_theme.py --input_file='theme_question1_test' --output_dir=data/output/theme_predictions/
python src/models/predict_theme.py --input_file='theme_question2' --output_dir=data/output/theme_predictions/
python src/models/predict_theme.py --input_file='theme_question1_2015' --output_dir=data/output/theme_predictions/
Options:
--input_file String for which predictions needs to be made
--output_dir=<destination_dir_path> Directory for saving predictions
'''
import numpy as np
import pandas as pd
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
import tensorflow.compat.v1 as tf
import os
tf.disable_v2_behavior()
from docopt import docopt
opt = docopt(__doc__)
def main(input_file, output_dir):
"""
Takes the input_file and calls make_predictions class with
output_dir as arguments
"""
if input_file not in ["theme_question1_test", "theme_question2", "theme_question1_2015"]:
raise TypeError("The input_file options are 'theme_question1_test', 'theme_question2' or 'theme_question1_2015.\n")
assert os.path.exists(output_dir), "The path entered for output_dir does not exist. Make sure to enter correct path \n"
print("----START: predict_theme.py----")
mp = make_predictions()
mp.predict(input_file=input_file, output_dir=output_dir)
print('Thanks for your patience, the predictions have been saved!\n')
print("----END: predict_theme.py----")
return
class make_predictions:
def load_data(self, input_file="theme_question1_test"):
"""
Loads data according to input_file argument
Parameters
----------
input_file: (str) (theme_question2, theme_question1_2015, default value: theme_question1_test)
Returns
-------
numpy array/ pandas dataframe
"""
if input_file == 'theme_question1_test':
self.padded_docs = np.load('data/interim/question1_models/advance/X_test_padded.npy')
self.output_name = 'theme_question1_test'
self.y_test = pd.read_excel('data/interim/question1_models/advance/y_test.xlsx')
assert len(self.y_test) > 0, 'no records in y_test'
self.y_test = self.y_test.iloc[:,:12]
self.y_train = pd.read_excel('data/interim/question1_models/advance/y_train.xlsx')
assert len(self.y_train) > 0, 'no records in y_train'
self.y_train = self.y_train.iloc[:,:12]
elif (input_file == 'theme_question1_2015'):
self.padded_docs = np.load('data/interim/question1_models/advance/data_2015_padded.npy')
self.output_name = 'theme_question1_2015'
else:
self.padded_docs = np.load('data/interim/question2_models/comments_q2_padded.npy')
self.output_name = 'theme_question2'
print('\nLoading: files were sucessfuly loaded.')
def themewise_results(self, Ytrue, Ypred, Ytrain):
'''Calculate accuracies for theme classification
Parameters
----------
Ytrue : array of shape (n_obeservations, n_labels)
Correct labels for the 12 text classifications
Ypred : array of shape (n_obeservations, n_labels)
Predicted labels for the 12 text classifications
Returns
-------
overall_results : dataframes of overall evaluation metrics
theme_results : dataframe of evaluation metrics by class
'''
# Calculate individual accuracies and evaluation metrics for each class
labels = ['CPD', 'CB', 'EWC', 'Exec', 'FWE', 'SP', 'RE', 'Sup', 'SW',
'TEPE', 'VMG', 'OTH']
Y_count = []
pred_count = []
Y_count_train = []
accuracies = []
precision = []
recall = []
f1 = []
for i in np.arange(Ytrue.shape[1]):
Y_count.append(np.sum(Ytrue.iloc[:, i] == 1))
pred_count.append(np.sum(Ypred[:, i] == 1))
Y_count_train.append(np.sum(Ytrain.iloc[:, i] == 1))
accuracies.append(accuracy_score(Ytrue.iloc[:, i], Ypred[:, i]))
precision.append(precision_score(Ytrue.iloc[:, i], Ypred[:, i]))
recall.append(recall_score(Ytrue.iloc[:, i], Ypred[:, i]))
f1.append(f1_score(Ytrue.iloc[:, i], Ypred[:, i]))
theme_results = pd.DataFrame({'Label': labels,
'Y_count': Y_count,
'Pred_count': pred_count,
'Y_count_train' : Y_count_train,
'Accuarcy': accuracies,
'Precision': precision,
'Recall': recall,
'F1 Score': f1})
return theme_results
def predict(self, input_file, output_dir):
"""
Predicts the themes depending on the input_file and saved results using the
output_dir
"""
"Predicts the theme for comments based on input file"
# Loading padded document for prediction
self.load_data(input_file)
#Loading the model
theme_model = tf.keras.models.load_model('models/Theme_Model/theme_model')
#Predictions
print("**Making the predictions**")
pred = theme_model.predict(self.padded_docs)
pred = (pred > 0.4)*1
if input_file == 'theme_question1_test':
accuracy = []
precision = []
recall = []
accuracy = accuracy_score(self.y_test, pred)
precision = precision_score(self.y_test, pred, average='micro')
recall = recall_score(self.y_test, pred, average='micro')
f1 = f1_score(self.y_test, pred, average='micro')
results = | pd.DataFrame(data={'Accuracy':accuracy, 'Precision':precision, 'Recall':recall, 'F1 Score':f1}, index=['theme_test_results']) | pandas.DataFrame |
import pkg_resources
from unittest.mock import sentinel
import pandas as pd
import pytest
import osmo_jupyter.dataset.combine as module
@pytest.fixture
def test_picolog_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_picolog.csv"
)
@pytest.fixture
def test_calibration_file_path():
return pkg_resources.resource_filename(
"osmo_jupyter", "test_fixtures/test_calibration_log.csv"
)
class TestOpenAndCombineSensorData:
def test_interpolates_data_correctly(
self, test_calibration_file_path, test_picolog_file_path
):
combined_data = module.open_and_combine_picolog_and_calibration_data(
calibration_log_filepaths=[test_calibration_file_path],
picolog_log_filepaths=[test_picolog_file_path],
).reset_index() # move timestamp index to a column
# calibration log has 23 columns, but we only need to check that picolog data is interpolated correctly
subset_combined_data_to_compare = combined_data[
[
"timestamp",
"equilibration status",
"setpoint temperature (C)",
"PicoLog temperature (C)",
]
]
expected_interpolation = pd.DataFrame(
[
{
"timestamp": "2019-01-01 00:00:00",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39,
},
{
"timestamp": "2019-01-01 00:00:01",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 39.5,
},
{
"timestamp": "2019-01-01 00:00:03",
"equilibration status": "equilibrated",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
{
"timestamp": "2019-01-01 00:00:04",
"equilibration status": "waiting",
"setpoint temperature (C)": 40,
"PicoLog temperature (C)": 40,
},
]
).astype(
subset_combined_data_to_compare.dtypes
) # coerce datatypes to match
pd.testing.assert_frame_equal(
subset_combined_data_to_compare, expected_interpolation
)
class TestGetEquilibrationBoundaries:
@pytest.mark.parametrize(
"input_equilibration_status, expected_boundaries",
[
(
{ # Use full timestamps to show that it works at second resolution
pd.to_datetime("2019-01-01 00:00:00"): "waiting",
pd.to_datetime("2019-01-01 00:00:01"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:02"): "equilibrated",
pd.to_datetime("2019-01-01 00:00:03"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019-01-01 00:00:01"),
"end_time": pd.to_datetime("2019-01-01 00:00:02"),
}
],
),
(
{ # Switch to using only years as the timestamp for terseness and readability
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
pd.to_datetime("2021"): "waiting",
pd.to_datetime("2022"): "equilibrated",
pd.to_datetime("2023"): "waiting",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
},
{
"start_time": pd.to_datetime("2022"),
"end_time": pd.to_datetime("2022"),
},
],
),
(
{
pd.to_datetime("2019"): "equilibrated",
pd.to_datetime("2020"): "waiting",
},
[
{
"start_time": pd.to_datetime("2019"),
"end_time": pd.to_datetime("2019"),
}
],
),
(
{
pd.to_datetime("2019"): "waiting",
pd.to_datetime("2020"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2020"),
"end_time": pd.to_datetime("2020"),
}
],
),
(
{
pd.to_datetime("2019"): "equilibrated",
pd.to_datetime("2020"): "waiting",
pd.to_datetime("2021"): "equilibrated",
},
[
{
"start_time": pd.to_datetime("2019"),
"end_time": pd.to_datetime("2019"),
},
{
"start_time": pd.to_datetime("2021"),
"end_time": | pd.to_datetime("2021") | pandas.to_datetime |
import pandas as pd
import numpy as np
class TriangleDisplay():
def __repr__(self):
if (self.values.shape[0], self.values.shape[1]) == (1, 1):
data = self._repr_format()
return data.to_string()
else:
data = 'Valuation: ' + self.valuation_date.strftime('%Y-%m') + \
'\nGrain: ' + 'O' + self.origin_grain + \
'D' + self.development_grain + \
'\nShape: ' + str(self.shape) + \
'\nIndex: ' + str(self.key_labels) + \
'\nColumns: ' + str(list(self.vdims))
return data
def _repr_html_(self):
''' Jupyter/Ipython HTML representation '''
if (self.values.shape[0], self.values.shape[1]) == (1, 1):
data = self._repr_format()
if np.nanmean(abs(data)) < 10:
fmt_str = '{0:,.4f}'
elif np.nanmean(abs(data)) < 1000:
fmt_str = '{0:,.2f}'
else:
fmt_str = '{:,.0f}'
if len(self.ddims) > 1 and type(self.ddims[0]) is int:
data.columns = [['Development Lag'] * len(self.ddims),
self.ddims]
default = data.to_html(max_rows=pd.options.display.max_rows,
max_cols=pd.options.display.max_columns,
float_format=fmt_str.format) \
.replace('nan', '')
return default.replace(
'<th></th>\n <th>{}</th>'.format(
list(data.columns)[0]),
'<th>Origin</th>\n <th>{}</th>'.format(
list(data.columns)[0]))
else:
data = pd.Series([self.valuation_date.strftime('%Y-%m'),
'O' + self.origin_grain + 'D'
+ self.development_grain,
self.shape, self.key_labels, list(self.vdims)],
index=['Valuation:', 'Grain:', 'Shape:',
'Index:', "Columns:"],
name='Triangle Summary').to_frame()
pd.options.display.precision = 0
return data.to_html(max_rows=pd.options.display.max_rows,
max_cols=pd.options.display.max_columns)
def _repr_format(self):
if type(self.odims[0]) == np.datetime64:
origin = pd.Series(self.odims).dt.to_period(self.origin_grain)
else:
origin = pd.Series(self.odims)
out = pd.DataFrame(self.values[0, 0], index=origin, columns=self.ddims)
if str(out.columns[0]).find('-') > 0 and not \
isinstance(out.columns, pd.PeriodIndex):
out.columns = [item.replace('-9999', '-Ult')
for item in out.columns]
if len(out.drop_duplicates()) != 1:
return out
else:
return out.drop_duplicates().set_index( | pd.Index(['(All)']) | pandas.Index |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=E1101
"""
Created on Saturday, March 14 15:23 2020
@author: khayes847
"""
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
def kmeans_score(data, n_groups):
"""
This function will perform KMeans clustering on the included dataset,
using the n_groups numbers as the number of clusters. It will then
find and return the predicted clusters' Silhouette Score.
Parameters:
data: The dataset in question.
n_groups (int): The number of clusters.
Returns:
score (float): The Silhouette Score for the clustered dataset.
"""
k_means = KMeans(n_clusters=n_groups, random_state=42)
k_means.fit(data)
labels = k_means.labels_
score = float(silhouette_score(data, labels))
return score
def agg_score(data, n_groups, score=True):
"""
Performs Agglomerative Hierarchical Clustering on data using
the specified number of components. If "Score" is selected,
returns the Silhouette Score. Otherwise, produces the cluster labels,
and adds them to the original dataset. For convenience, the function
also performs the data cleaning steps that don't require the log, outlier-
capping, or scaling transformations.
Parameters:
data: The dataset in question.
n_groups (int): The number of clusters.
score (bool): Whether the function will return the Silhouette
Score. If 'True', the function will return the Silhouette
Score. If 'False', the function will add the clustered labels
to the dataset, then save and return the dataset.
Returns:
score_val (float): The Silhouette Score for the clustered dataset.
target: The target labels as a pandas dataframe.
"""
agg_comp = AgglomerativeClustering(n_clusters=n_groups)
agg_comp.fit(data)
labels = agg_comp.labels_
if score:
score_val = float(silhouette_score(data, labels))
return score_val
data = pd.read_csv("data/shoppers.csv")
# Combining datasets
target = pd.DataFrame(labels, columns=['Target'])
return target
def pca_95(data):
"""
This function performs PCA dimension reduction on data, in
order to determine whether doing so will improve clustering.
The number of dimensions is determined as the number that will
retain at least 95% variance.
Parameters:
data: The dataset in question.
Returns:
data: The transformed dataset.
"""
pca = PCA(n_components=.95, random_state=42)
pca_array = pca.fit_transform(data)
data_pca = pd.DataFrame(pca_array)
return data_pca
def sil_score(data):
"""
This function performs Agglomerative Hierarchical Clustering and KMeans
clustering on pre- and post-PCA data into a range of two to ten clusters.
For each of the four cluster methods, it compiles a list of Silhouette
Scores at each cluster number, and graphs them using a line graph.
This is done in order to determine which cluster produces the highest
Silhouette Score, as well as how many clusters we should use.
Parameters:
data: The dataset in question.
Returns:
None
"""
data_pca = pca_95(data)
n_list = list(range(2, 10))
kmeans_no_pca = []
kmeans_pca = []
agg_no_pca = []
agg_pca = []
for number in n_list:
score = kmeans_score(data, n_groups=number)
kmeans_no_pca.append(score)
score = agg_score(data, n_groups=number)
agg_no_pca.append(score)
score = kmeans_score(data_pca, n_groups=number)
kmeans_pca.append(score)
score = agg_score(data_pca, n_groups=number)
agg_pca.append(score)
plot_sil_scores(kmeans_no_pca, agg_no_pca, kmeans_pca, agg_pca, n_list)
def plot_sil_scores(kmeans_no_pca, agg_no_pca, kmeans_pca, agg_pca, n_list):
"""
Plots Silhouette Scores for KMeans and Agglomerative Hierarchical
Clustering both pre- and post-PCA against the number of clusters
used to obtain each score.
Parameters:
kmeans_no_pca: The list of Silhouette Scores for
the KMeans clustering without PCA.
agg_no_pca: The list of Silhouette Scores for the
Agglomerative Hierarchical clustering without PCA.
kmeans_pca: The list of Silhouette Scores for the
KMeans clustering with PCA.
agg_pca: The list of Silhouette Scores for the
Agglomerative Hierarchical clustering with PCA.
n_list: A list describing the range of cluster numbers used
(from two to ten).
Returns:
None
"""
plt.figure(figsize=(16, 8))
plt.plot(n_list, kmeans_no_pca, label='KMeans')
plt.plot(n_list, agg_no_pca, label='Agglomerative Hierarchical')
plt.plot(n_list, kmeans_pca, label='KMeans W/ PCA')
plt.plot(n_list, agg_pca, label='Agglomerative Hierarchical W/ PCA')
plt.xlabel('Number of Clusters')
plt.ylabel('Silhouette Score')
plt.legend()
plt.title("Comparison of Clustering Methods")
plt.show()
def get_targets(data, n_groups):
"""
In order to obtain our final set of labels for the data,
this function transforms the data first using PCA,
then Agglomerative Hierarchical Clustering. It returns
the target labels as a pandas dataframe.
Parameters:
data: The dataset in question.
n_groups (int): The number of clusters the function will form.
Returns:
target: The target labels as a pandas dataframe.
"""
data = pca_95(data)
target = agg_score(data, n_groups, score=False)
return target
def train_test(x_val, y_val, test=.25, rs_val=42):
"""
This function separates takes in the feature and target datasets.
It then splits them into training and test datasets, according
to the test size and random state specified. It stratifies the
test datasets according to the target values, in order to
maintain target value ratios.
Parameters:
x_val: The feature dataset.
y_val: The target dataset.
test (float): The percentage of the datasets that will be split
into the test dataset.
rs_val (int): The random_state value for the train_test_split
function.
Returns:
x_train: The features for the training dataset.
x_test: The features for the test dataset.
y_train: The targets for the training dataset.
y_test: The targets for the test dataset.
"""
x_train, x_test, y_train, y_test = train_test_split(x_val, y_val,
test_size=test,
random_state=rs_val,
stratify=y_val)
return x_train, x_test, y_train, y_test
def visualize_feature_importance(x_val, y_val):
"""
In order to determine the important features in the
classification method, this function creates a random forests
algorithm and fits it to the included data. It then graphs
the relative importances of the ten most influential
features.
Parameters:
x_val: The dataset features
y_val: The dataset labels
Returns:
None
"""
# Determining feature importance using Random Forests
clf = RandomForestClassifier(n_estimators=100,
random_state=42).fit(x_val, y_val)
feature_importances = (pd.DataFrame(clf.feature_importances_,
index=x_val.columns,
columns=['importance'])
.sort_values('importance', ascending=True))
feature_importances = feature_importances.iloc[-10:]
# Graphing feature importance
ax_val = feature_importances.plot(kind='barh', figsize=(20, 10),
legend=None)
ax_val.set_xlabel('Importance', fontsize=16)
ax_val.set_ylabel('Features', fontsize=16)
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.title('Feature Importance Determined By Random Forests', fontsize=20)
plt.show()
def graph_differences_cat(data, target, features, overlap=False):
"""
In order to label the target categories properly, we have to describe
each target group's relationship to the important variables. This feature
will create three pairs of bar graphs describing the relationship
(both cumulative and by percentage) between the cluster labels and
the included categorical variables. If we are plotting 'Browser_Other' and
'TrafficType_20', for the purposes of determining overlap between these
features, the function will also describe the relationship between the
cluster labels and datapoints belonging to both categories using a
stacked bar graph if 'overlap' is defined as True.
Parameters:
data: The dataset features.
target: The dataset target labels
features: A list of features to graph.
overlap (bool): If set to true, will stack the relationship between cluster
label and datapoints in both the 'Browser_Other' and
'TrafficType_20' categories.
Returns:
None
"""
# pylint: disable=W0612
# Create "groupby" dataset for graphing
data = data.join(target)
if overlap:
data['Both'] = ((data['Browser_Other'] == 1) &
(data['TrafficType_20'] == 1)).astype(int)
features += ['Both']
data_grouped = pd.DataFrame(data['Target'].value_counts())
for col in features:
data_grouped[col] = pd.DataFrame(data.groupby('Target')[col].sum())
data_grouped[f'{col}_percentage'] = (data_grouped[col] /
data_grouped['Target'])
if overlap:
features = features[:2]
# Create graphs
x_pos = [0, 1, 2]
for col in features:
figure, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
plt.subplot(1, 2, 1)
plt.bar(x_pos, data_grouped[col], color='green')
if overlap:
plt.bar(x_pos, data_grouped['Both'], color='blue')
plt.legend(['Both features', 'Only one feature'])
plt.xlabel("Cluster label", fontsize=16)
plt.ylabel("Datapoint Quantity", fontsize=16)
plt.title("Quantity Per Cluster", fontsize=16)
plt.xticks(x_pos, data_grouped.index, fontsize=12)
plt.yticks(fontsize=12)
plt.subplot(1, 2, 2)
plt.bar(x_pos, data_grouped[f'{col}_percentage'], color='green')
if overlap:
plt.bar(x_pos, data_grouped['Both_percentage'], color='blue')
plt.legend(['Both features', 'Only one feature'])
plt.xlabel("Cluster label", fontsize=16)
plt.ylabel("Datapoint Percentage", fontsize=16)
plt.title("Percentage Per Cluster", fontsize=16)
plt.xticks(x_pos, data_grouped.index, fontsize=12)
plt.yticks(fontsize=12)
figure.tight_layout(pad=3.0)
plt.suptitle(col, fontsize=20)
plt.show()
# pylint: enable=W0612
def cluster_1_composition(data, target):
"""
For the purposes of fulling understanding the composition of cluster '1',
this feature will determine the percentage of cluster '1' datapoints that
belong to both the 'Browser_Other' category and the 'TrafficType_20_Only'
categories, the percentage that belong to only one category, and the
percentage that belong to neither category.
Parameters:
data: The dataset features.
target: The dataset target labels.
Returns:
None
"""
data = data.join(target)
data = data.loc[data['Target'] == 1]
data['Both'] = ((data['Browser_Other'] == 1) &
(data['TrafficType_20'] == 1)).astype(int)
data['Browser_Other_Only'] = ((data['Browser_Other'] == 1) &
(data['TrafficType_20'] == 0)).astype(int)
data['TrafficType_20_Only'] = ((data['Browser_Other'] == 0) &
(data['TrafficType_20'] == 1)).astype(int)
data['Neither'] = ((data['Browser_Other'] == 0) &
(data['TrafficType_20'] == 0)).astype(int)
data = data[['Both', 'Browser_Other_Only',
'TrafficType_20_Only', 'Neither']]
data_grouped = (pd.DataFrame(data.sum(), columns=['Number']))/len(data)
# Create Graph
x_pos = [0, 1, 2, 3]
plt.figure(figsize=(16, 8))
plt.bar(x_pos, data_grouped['Number'], color='green')
plt.xlabel("Feature Overlap Category", fontsize=16)
plt.ylabel("Datapoint Percentage", fontsize=16)
plt.title('Cluster "1" Distribution', fontsize=16)
plt.xticks(x_pos, data_grouped.index, fontsize=12)
plt.yticks(fontsize=12)
plt.show()
def plot_continuous(data, target, new_cluster_0=False):
"""
In order to label the new clusters, we will need to analyze each cluster
with regards to the most important continuous variables,
'ProductRelated_Duration', 'ExitRates', and 'ProductRelated'. We will
divide each into quantiles of 10, and plot the distributions of each
cluster using bar plots. Since the clusters are unbalanced, we will
look at the total percentage of each cluster allocated to each quantile.
Parameters:
data: The dataset features.
target: The dataset target labels.
new_cluster_0 (bool): If True, removes all Cluster "0" values that don't
either have 'Browser_Other' or 'TrafficType_20'
for easier comparison with Cluster "1".
Returns:
None
"""
data2 = data.join(target)
features = ['ProductRelated_Duration', 'ExitRates', 'ProductRelated']
for col in features:
if new_cluster_0:
data2 = data2.loc[~((data2['Target'] == 0) &
(~((data2['Browser_Other'] > 1) |
(data2['TrafficType_20'] > 1))))]
data2 = data2.loc[~((data2['Target'] == 2) &
(~((data2['Browser_Other'] > 1) |
(data2['TrafficType_20'] > 1))))]
data2 = data2.reset_index(drop=True)
data_grouped = pd.DataFrame(data2['Target'])
data_grouped['quantiles'] = pd.qcut(data2[col],
q=10, labels=list(range(10)))
enc = OneHotEncoder()
data_array = enc.fit_transform(data_grouped[['quantiles']]).toarray()
enc_data = | pd.DataFrame(data_array) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + | pd.offsets.Minute(1) | pandas.offsets.Minute |
#######################
# Header.R
from datetime import time
from operator import index
from os import path, times
import numpy as np
import pandas as pd
import os
import logging
from pathlib import Path
from pandas.core.reshape.merge import merge
from powergenome.util import regions_to_keep
from powergenome.us_state_abbrev import (state2abbr, abbr2state)
path_in = r"..\data\load_profiles_data\input" # fix
#read in state proportions
#how much state load should be distributed to GenXRegion
# pop = pd.read_parquet(path_in + "\GenX_State_Pop_Weight.parquet")
pop = pd.read_parquet(path_in + "\ipm_state_pop_weight_20210517.parquet")
states = pop.drop_duplicates(subset=["State"])["State"]
states_abb = list(map(state2abbr, states))
pop["State"] = list(map(state2abbr, pop["State"]))
states_eastern_abbr = ["ME","VT","NH","MA","RI","CT","NY","PA","NJ","DE","MD","DC","MI","IN","OH","KY","WV","VA","NC","SC","GA","FL"]
states_central_abbr = ["IL","MO","TN","AL","MS","WI","AR","LA","TX","OK","KS","NE","SD","ND","IA","MN"]
states_mountain_abbr = ["MT","WY","CO","NM","AZ","UT","ID"]
states_pacific_abbr = ["CA","NV","OR","WA"]
states_eastern = list(map(abbr2state, states_eastern_abbr))
states_central = list(map(abbr2state, states_central_abbr))
states_mountain = list(map(abbr2state, states_mountain_abbr))
states_pacific = list(map(abbr2state, states_pacific_abbr))
# some parameters
stated_states = ["New Jersey", "New York", "Virginia"]
# Date Jan 29, 2021
# (2) PA, NJ, VA, NY, MI all have EV and heat pump stocks from NZA DD case
# consistent with their economywide decarbonization goals.
# https://www.c2es.org/content/state-climate-policy/
# Date Feb 10, 2021
# Remove high electrification growth in PA and MI in stated policies;
# they dont have clean energy goals so kind of confusing/inconsistent to require high electrification in these states.
# So our new "Stated Policies" definition for electrification is states
# with BOTH economywide emissions goals + 100% carbon-free electricity standards
# = NY, NJ, VA.
stated_states_abbr = list(map(state2abbr, stated_states))
#years = ["2022", "2025", "2030", "2040", "2050"]
cases = ["current_policy", "stated_policy", "deep_decarbonization"]
running_sector = ['Residential','Residential', 'Commercial', 'Commercial','Transportation','Transportation','Transportation', 'Transportation']
running_subsector = ['space heating and cooling','water heating', 'space heating and cooling', 'water heating','light-duty vehicles','medium-duty trucks','heavy-duty trucks','transit buses']
Nsubsector = len(running_subsector)
logger = logging.getLogger(__name__)
#Define function for adjusting time-difference
def addhour(x):
x += 1
x = x.replace(8761, 1)
return x
def SolveThreeUnknowns(a1, b1, c1, d1, a2, b2, c2, d2, a3, b3, c3, d3):
D = a1*b2*c3 + b1*c2*a3 + c1*a2*b3 - a1*c2*b3 - b1*a2*c3 - c1*b2*a3
Dx = d1*b2*c3 + b1*c2*d3 + c1*d2*b3 - d1*c2*b3 - b1*d2*c3 - c1*b2*d3
Dy = a1*d2*c3 + d1*c2*a3 + c1*a2*d3 - a1*c2*d3 - d1*a2*c3 - c1*d2*a3
Dz = a1*b2*d3 + b1*d2*a3 + d1*a2*b3 - a1*d2*b3 - b1*a2*d3 - d1*b2*a3
Sx = Dx/D
Sy = Dy/D
Sz = Dz/D
d = {'Sx':Sx, 'Sy':Sy, 'Sz':Sz}
return pd.DataFrame(d)
def SolveTwoUnknowns(a1, b1, c1, a2, b2, c2):
D = a1*b2 - a2*b1
Dx = c1*b2 - c2*b1
Dy = a1*c2 - a2*c1
Sx = Dx/D
Sy = Dy/D
d = {'Sx':Sx, 'Sy':Sy}
return pd.DataFrame(d)
def CreateOutputFolder(case_folder):
path = case_folder / "extra_outputs"
if not os.path.exists(path):
os.makedirs(path)
######################################
# CreatingBaseLoad.R
def CreateBaseLoad(years, regions, output_folder, path_growthrate):
path_processed = path_in
path_result = output_folder.__str__()
years = years
regions = regions
path_growthrate = path_growthrate
## Method 3: annually
EFS_2020_LoadProf = pd.read_parquet(path_in + "\EFS_REF_load_2020.parquet")
EFS_2020_LoadProf = pd.merge(EFS_2020_LoadProf, pop, on = ["State"])
EFS_2020_LoadProf = EFS_2020_LoadProf.assign(weighted = EFS_2020_LoadProf["LoadMW"]*EFS_2020_LoadProf["State Prop"])
EFS_2020_LoadProf = EFS_2020_LoadProf.groupby(["Year", "GenX.Region", "LocalHourID", "Sector", "Subsector"], as_index = False).agg({"weighted" : "sum"})
# Read in 2019 Demand
Original_Load_2019 = pd.read_parquet(path_in + "\ipm_load_curves_2019_EST.parquet")
# Reorganize Demand
Original_Load_2019 = Original_Load_2019.melt(id_vars="LocalHourID").rename(columns={"variable" : "GenX.Region", "value": "LoadMW_original"})
Original_Load_2019 = Original_Load_2019.groupby(["LocalHourID"], as_index = False).agg({"LoadMW_original" : "sum"})
ratio_A = Original_Load_2019["LoadMW_original"].sum() / EFS_2020_LoadProf["weighted"].sum()
EFS_2020_LoadProf = EFS_2020_LoadProf.assign(weighted = EFS_2020_LoadProf["weighted"]*ratio_A)
Base_Load_2019 = EFS_2020_LoadProf.rename(columns ={"weighted" : "LoadMW"})
breakpoint()
# Read in the Growth Rate
GrowthRate = pd.read_parquet(path_in + "\ipm_growthrate_2019.parquet")
try:
GrowthRate = pd.read_parquet(path_growthrate)
except:
pass
# Create Base loads
Base_Load_2019 = Base_Load_2019[Base_Load_2019["GenX.Region"].isin(regions)]
Base_Load_2019.loc[(Base_Load_2019["Sector"] == "Industrial") & (Base_Load_2019["Subsector"].isin(["process heat", "machine drives"])), "Subsector"] = "other"
Base_Load_2019 = Base_Load_2019[Base_Load_2019["Subsector"] == "other"]
Base_Load_2019 = Base_Load_2019.groupby(["Year", "LocalHourID", "GenX.Region", "Sector"], as_index= False).agg({'LoadMW' : 'sum'})
Base_Load = Base_Load_2019
for y in years:
ScaleFactor = GrowthRate.assign(ScaleFactor = (1+GrowthRate["growth_rate"])**(int(y) - 2019)) \
.drop(columns = "growth_rate")
Base_Load_temp = pd.merge(Base_Load_2019, ScaleFactor, on = ["GenX.Region"])
Base_Load_temp = Base_Load_temp.assign(Year = y, LoadMW = Base_Load_temp["LoadMW"]*Base_Load_temp["ScaleFactor"])\
.drop(columns = "ScaleFactor")
Base_Load = Base_Load.append(Base_Load_temp, ignore_index=True)
Base_Load.to_parquet(path_result + "\Base_Load.parquet", index = False)
del Base_Load, Base_Load_2019, Base_Load_temp, ScaleFactor,GrowthRate, Original_Load_2019
#####################################
# Add_Electrification.R
def AddElectrification(years, regions, electrification, output_folder, path_stock):
path_processed = path_in
path_result = output_folder.__str__()
path_stock = path_stock
years = years
electrification = electrification
regions = regions
#Creating Time-series
SCENARIO_STOCK = pd.read_parquet(path_processed + "\SCENARIO_STOCK.parquet")
SCENARIO_STOCK = SCENARIO_STOCK[(SCENARIO_STOCK["YEAR"].isin(years)) & (SCENARIO_STOCK["SCENARIO"].isin(electrification))]
SCENARIO_STOCK_temp = pd.DataFrame()
for year, case in zip(years, electrification):
SCENARIO_STOCK_temp = SCENARIO_STOCK_temp.append(SCENARIO_STOCK[(SCENARIO_STOCK["YEAR"] == year) & (SCENARIO_STOCK["SCENARIO"] == case)])
SCENARIO_STOCK = SCENARIO_STOCK_temp
del SCENARIO_STOCK_temp
try:
CUSTOM_STOCK = pd.read_parquet(path_stock)
CUSTOM_STOCK = CUSTOM_STOCK[(CUSTOM_STOCK["YEAR"].isin(years)) & (CUSTOM_STOCK["SCENARIO"].isin(electrification))]
SCENARIO_STOCK = SCENARIO_STOCK.append(CUSTOM_STOCK)
except:
pass
#Method 1 Calculate from Type1 and Type 2
for i in range(0, Nsubsector):
timeseries = pd.read_parquet(path_processed + "\\" + running_sector[i] + "_" + running_subsector[i] + "_Incremental_Factor.parquet")
timeseries = timeseries[["State", "Year", "LocalHourID", "Unit", "Factor_Type1", "Factor_Type2" ]]
stock_temp = SCENARIO_STOCK[(SCENARIO_STOCK["SECTOR"] == running_sector[i]) & (SCENARIO_STOCK["SUBSECTOR"] == running_subsector[i])]
stock_temp = stock_temp[["SCENARIO", "STATE", "YEAR", "AGG_STOCK_TYPE1", "AGG_STOCK_TYPE2"]].rename(columns={"STATE" : "State", "YEAR" : "Year"})
years_pd = pd.Series(years)
IF_years = pd.Series(timeseries["Year"].unique())
for year in years_pd:
exists = year in IF_years.values
if not exists:
diff = np.array(IF_years - year)
index = diff[np.where(diff <= 0)].argmax()
year_approx = IF_years[index]
timeseries_temp = timeseries[timeseries["Year"] == year_approx]
timeseries_temp["Year"] = year
logger.warning("No incremental factor available for year " + str(year) + ": using factors from year " + str(year_approx) + ".")
timeseries = timeseries.append(timeseries_temp)
timeseries = pd.merge(timeseries, stock_temp, on = ["State", "Year"])
timeseries = timeseries.assign(LoadMW = timeseries["AGG_STOCK_TYPE1"]*timeseries["Factor_Type1"] + timeseries["AGG_STOCK_TYPE2"]*timeseries["Factor_Type2"])
timeseries = timeseries[["SCENARIO", "State", "Year", "LocalHourID", "LoadMW"]].dropna()
timeseries.to_parquet(path_result + "\\" + running_sector[i] + "_" + running_subsector[i] + "_Scenario_Timeseries_Method1.parquet", index = False)
del timeseries, stock_temp
##########################
# Read in time series and combine them
Method = "Method1"
Res_SPH = pd.read_parquet(path_result + "\Residential_space heating and cooling_Scenario_Timeseries_" + Method + ".parquet")
Res_SPH = Res_SPH.rename(columns={"LoadMW" : "Res_SPH_LoadMW"})
Res_SPH_sum = Res_SPH
Res_SPH_sum = Res_SPH.groupby(["SCENARIO", "State", "Year"], as_index = False)["Res_SPH_LoadMW"].agg({"Total_Res_SPH_TWh" : "sum"})
Res_SPH_sum["Total_Res_SPH_TWh"] = 10**-6*Res_SPH_sum["Total_Res_SPH_TWh"]
Res_WH = | pd.read_parquet(path_result + "\Residential_water heating_Scenario_Timeseries_" + Method +".parquet") | pandas.read_parquet |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Experiment file for comparing simple versions of multi-fidelity optimizers"""
from pickle import dump
from warnings import warn, simplefilter
import mf2
import numpy as np
import pandas as pd
import xarray as xr
from collections import namedtuple
from operator import itemgetter
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
from pyprojroot import here
from experiments import scale_to_function, create_subsampling_error_grid, mlcs
from functools import wraps
from time import time
def timing(f):
@wraps(f)
def wrap(*args, **kw):
start = time()
result = f(*args, **kw)
end = time()
print(f'func {f.__name__}: {end-start:2.4f} sec')
return result
return wrap
save_dir = here('files/2020-11-05-simple-mfbo/')
save_dir.mkdir(parents=True, exist_ok=True)
#TODO de-duplicate (already present in processing.py
def fit_lin_reg(da: xr.DataArray, calc_SSE: bool=False):
"""Return lin-reg coefficients after training index -> value"""
series = da.to_series().dropna()
X = np.array(series.index.tolist())[:,:2] # remove rep_idx (3rd column)
y = np.log10(series.values)
reg = LinearRegression().fit(X, y)
if not calc_SSE:
return reg
pred_y = reg.predict(X)
SSE = np.sum((pred_y - y)**2)
return reg, SSE
@timing
def proto_EG_multifid_bo(func, budget, cost_ratio, doe_n_high, doe_n_low, num_reps=50):
np.random.seed(20160501)
N_RAND_SAMPLES = 100
if doe_n_high + cost_ratio*doe_n_low >= budget:
raise ValueError('Budget should not be exhausted after DoE')
Entry = namedtuple('Entry', 'budget time_since_high_eval tau fidelity candidate fitness')
entries = []
#make mf-DoE
high_x, low_x = mlcs.bi_fidelity_doe(func.ndim, doe_n_high, doe_n_low)
high_x, low_x = scale_to_function(func, [high_x, low_x])
high_y, low_y = func.high(high_x), \
func.low(low_x)
#subtract mf-DoE from budget
budget -= (doe_n_high + doe_n_low*cost_ratio)
#create archive
archive = mlcs.CandidateArchive.from_bi_fid_DoE(high_x, low_x, high_y, low_y)
proto_eg = mlcs.ProtoEG(archive, num_reps=num_reps)
proto_eg.subsample_errorgrid()
mfm = mlcs.MultiFidelityModel(fidelities=['high', 'low'], archive=archive,
kernel='Matern', scaling='off')
time_since_high_eval = 0
while budget > 0:
tau = calc_tau_from_EG(proto_eg.error_grid['mses'], cost_ratio)
# compare \tau with current count t to select fidelity, must be >= 1
fidelity = 'high' if 1 <= tau <= time_since_high_eval else 'low'
# predict best place to evaluate:
if fidelity == 'high':
#best predicted low-fid only datapoint for high-fid (to maintain hierarchical model)
candidates = select_high_fid_only_candidates(archive)
candidate_predictions = [
(cand, mfm.models['high'].predict(cand.reshape(1, -1)))
for cand in candidates
]
x = min(candidate_predictions, key=itemgetter(1))[0].ravel()
time_since_high_eval = 0
budget -= 1
else: # elif fidelity == 'low':
# simple optimization for low-fid
x = minimize(
lambda x: mfm.models['high'].predict(x.reshape(1, -1)),
x0=np.random.uniform(func.l_bound, func.u_bound).reshape(-1, ),
bounds=func.bounds.T,
).x
while x in archive: # resample to ensure a new candidate is added to the archive
# print(f'Existing candidate {x} ...')
random_candidates = scale_to_function(func, np.random.rand(N_RAND_SAMPLES, func.ndim))
fitnesses = mfm.models['high'].predict(random_candidates)
x = random_candidates[np.argmin(fitnesses)]
# print(f'... replaced by {x}')
time_since_high_eval += 1
budget -= cost_ratio
#evaluate best place
y = func[fidelity](x.reshape(1, -1))[0]
archive.addcandidate(candidate=x.flatten(), fitness=y, fidelity=fidelity)
# update model & error grid
mfm.retrain()
if budget > 0: # prevent unnecessary computation
proto_eg.update_errorgrid_with_sample(x, fidelity=fidelity)
# logging
entries.append(Entry(budget, time_since_high_eval, tau, fidelity, x, y))
return mfm, pd.DataFrame.from_records(entries, columns=Entry._fields), archive
@timing
def simple_multifid_bo(func, budget, cost_ratio, doe_n_high, doe_n_low, num_reps=50):
np.random.seed(20160501)
if doe_n_high + cost_ratio*doe_n_low >= budget:
raise ValueError('Budget should not be exhausted after DoE')
Entry = namedtuple('Entry', 'budget time_since_high_eval tau fidelity candidate fitness')
entries = []
#make mf-DoE
high_x, low_x = mlcs.bi_fidelity_doe(func.ndim, doe_n_high, doe_n_low)
high_x, low_x = scale_to_function(func, [high_x, low_x])
high_y, low_y = func.high(high_x), \
func.low(low_x)
#subtract mf-DoE from budget
budget -= (doe_n_high + doe_n_low*cost_ratio)
#create archive
archive = mlcs.CandidateArchive.from_bi_fid_DoE(high_x, low_x, high_y, low_y)
#make mf-model using archive
mfbo = mlcs.MultiFidelityBO(func, archive)
time_since_high_eval = 0
while budget > 0:
#select next fidelity to evaluate:
#sample error grid
EG = create_subsampling_error_grid(archive, num_reps=num_reps, func=func)
tau = calc_tau_from_EG(EG, cost_ratio)
#compare \tau with current count t to select fidelity, must be >= 1
fidelity = 'high' if 1 <= tau <= time_since_high_eval else 'low'
#predict best place to evaluate:
if fidelity == 'high':
#best predicted low-fid only datapoint for high-fid (to maintain hierarchical model)
candidates = select_high_fid_only_candidates(archive)
candidate_predictions = [
(cand, mfbo.models['high'].predict(cand.reshape(1, -1)))
for cand in candidates
]
x = min(candidate_predictions, key=itemgetter(1))[0].ravel()
time_since_high_eval = 0
budget -= 1
else: # elif fidelity == 'low':
#simple optimization for low-fid
x = minimize(
lambda x: mfbo.models['high'].predict(x.reshape(1, -1)),
x0=np.random.uniform(func.l_bound, func.u_bound).reshape(-1, ),
bounds=func.bounds.T,
).x
N_RAND_SAMPLES = 100
while x in archive: # resample to ensure a new candidate is added to the archive
# print(f'Existing candidate {x} ...')
random_candidates = scale_to_function(func, np.random.rand(N_RAND_SAMPLES, func.ndim))
fitnesses = mfbo.models['high'].predict(random_candidates)
x = random_candidates[np.argmin(fitnesses)]
# print(f'... replaced by {x}')
time_since_high_eval += 1
budget -= cost_ratio
#evaluate best place
y = func[fidelity](x.reshape(1, -1))[0]
archive.addcandidate(x, y, fidelity=fidelity)
entries.append(Entry(budget, time_since_high_eval, tau, fidelity, x, y))
#update model
mfbo.retrain()
return mfbo, pd.DataFrame.from_records(entries, columns=Entry._fields), archive
@timing
def fixed_ratio_multifid_bo(func, budget, cost_ratio, doe_n_high, doe_n_low, num_reps=50):
np.random.seed(20160501)
if doe_n_high + cost_ratio*doe_n_low >= budget:
raise ValueError('Budget should not be exhausted after DoE')
Entry = namedtuple('Entry', 'budget time_since_high_eval tau fidelity candidate fitness')
entries = []
tau = 1 / cost_ratio
#make mf-DoE
high_x, low_x = mlcs.bi_fidelity_doe(func.ndim, doe_n_high, doe_n_low)
high_x, low_x = scale_to_function(func, [high_x, low_x])
high_y, low_y = func.high(high_x), \
func.low(low_x)
#subtract mf-DoE from budget
budget -= (doe_n_high + doe_n_low*cost_ratio)
#create archive
archive = mlcs.CandidateArchive.from_multi_fidelity_function(func, ndim=func.ndim)
archive.addcandidates(low_x, low_y, fidelity='low')
archive.addcandidates(high_x, high_y, fidelity='high')
#make mf-model using archive
mfbo = mlcs.MultiFidelityBO(func, archive)
time_since_high_eval = 0
while budget > 0:
#select next fidelity to evaluate:
#compare \tau with current count t to select fidelity, must be >= 1
fidelity = 'high' if 1 <= tau <= time_since_high_eval else 'low'
#predict best place to evaluate:
if fidelity == 'high':
#best predicted low-fid only datapoint for high-fid (to maintain hierarchical model)
all_low = {
tuple(candidate)
for candidate in archive.getcandidates(fidelity='low').candidates
}
all_high = {
tuple(candidate)
for candidate in archive.getcandidates(fidelity='high').candidates
}
selected_candidates = all_low - all_high
candidates = [np.array(cand).reshape(1, -1) for cand in selected_candidates] # only consider candidates that are not yet evaluated in high-fidelity
candidate_predictions = [
(cand, mfbo.models['high'].predict(cand.reshape(1, -1)))
for cand in candidates
]
x = min(candidate_predictions, key=itemgetter(1))[0].ravel()
time_since_high_eval = 0
budget -= 1
else: # elif fidelity == 'low':
#simple optimization for low-fid
x = minimize(
lambda x: mfbo.models['high'].predict(x.reshape(1, -1)),
x0=np.random.uniform(func.l_bound, func.u_bound).reshape(-1, ),
bounds=func.bounds.T,
).x
N_RAND_SAMPLES = 100
while x in archive: # resample to ensure a new candidate is added to the archive
print(f'Existing candidate {x} ...')
random_candidates = scale_to_function(func, np.random.rand(N_RAND_SAMPLES, func.ndim))
fitnesses = mfbo.models['high'].predict(random_candidates)
x = random_candidates[np.argmin(fitnesses)]
print(f'... replaced by {x}')
time_since_high_eval += 1
budget -= cost_ratio
#evaluate best place
y = func[fidelity](x.reshape(1, -1))[0]
archive.addcandidate(x, y, fidelity=fidelity)
entries.append(Entry(budget, time_since_high_eval, tau, fidelity, x, y))
#update model
mfbo.retrain()
return mfbo, | pd.DataFrame.from_records(entries, columns=Entry._fields) | pandas.DataFrame.from_records |
# imports
#region
import os
import pyreadstat
import pandas as pd
import numpy as np
from statsmodels.stats.weightstats import DescrStatsW
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from libs.utils import *
from libs.plots import *
from libs.extensions import *
plt.ioff()
#endregion
# load new EDGAR v5.0 data ---
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
edgar_files = ['CH4', 'CO2_excl_short-cycle_org_C', 'CO2_org_short-cycle_C', 'N2O']
ef = edgar_files[0]
edgar_df = None
for ef in edgar_files:
logger(ef)
ey = 2018 if ef == 'CO2_excl_short-cycle_org_C' else 2015
frame = pd.read_excel(f'{root}\\edgar_v5.0\\v50_{ef}_1970_{ey}.xls', sheet_name='TOTALS BY COUNTRY',
header=9)
frame = frame[['ISO_A3'] + list(range(1970, ey + 1))].rename(columns={'ISO_A3': 'code'}).set_index('code')
frame.columns = frame.columns.rename('year')
frame = frame.unstack().rename(f'edgar50_{ef}').reset_index()
frame = frame[~frame['code'].isin(['SEA', 'AIR'])]
if edgar_df is None:
edgar_df = frame
else:
edgar_df = pd.merge(edgar_df, frame, how='outer')
edgar_df.to_csv(root + '\\edgar_v5.0.csv', index=False)
edgar_df.show()
data = edgar_df.copy()
# find sensible GDP vs population vs CO2eq (or CO2) data vs time ?
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
df = pd.read_csv(root + '\\data_all.csv')
df.show_csv()
df.query('code == "CZE"').show_csv()
df = pd.merge(df, edgar_df, how='left', on=['code', 'year'])
df.to_csv(f'{root}\\data_all_w_edgar50.csv', index=False)
df = pd.read_csv(f'{root}\\data_all_w_edgar50.csv')
df['edgar432_co2'] = df['edgar432_CO2_excl_short-cycle_org_C']
df['edgar432_co2_w_short'] = df['edgar432_co2'] + df['edgar432_CO2_org_short-cycle_C']
# actually, these are old sensitivities!
df['edgar432_co2eq'] = df['edgar432_CO2_excl_short-cycle_org_C'] + 25 * df['edgar432_CH4'] + 298 * df['edgar432_N2O']
df['edgar432_co2eq_w_short'] = df['edgar432_co2eq'] + df['edgar432_CO2_org_short-cycle_C']
df['edgar50_co2'] = df['edgar50_CO2_excl_short-cycle_org_C']
df['edgar50_co2_w_short'] = df['edgar50_co2'] + df['edgar50_CO2_org_short-cycle_C']
# I am using the new sensitivities here, 28 and 265
df['edgar50_co2eq'] = df['edgar50_CO2_excl_short-cycle_org_C'] + 28 * df['edgar50_CH4'] + 265 * df['edgar50_N2O']
df['edgar50_co2eq_w_short'] = df['edgar50_co2eq'] + df['edgar50_CO2_org_short-cycle_C']
data = df[['code', 'year', 'SP.POP.TOTL', 'NY.GDP.MKTP.PP.KD', 'edgar50_co2eq']] \
.rename(columns={'year': 'year_data', 'SP.POP.TOTL': 'pop', 'NY.GDP.MKTP.PP.KD': 'gdp_ppp',
'edgar50_co2eq': 'co2eq'})
data
sns.lineplot(x='year_data', y='co2eq', data=data, units='code', estimator=None).show()
sns.lineplot(x='year_data', y='pop', data=data, units='code', estimator=None).show()
sns.lineplot(x='year_data', y='gdp_ppp', data=data, units='code', estimator=None).show()
vars = ['pop', 'gdp_ppp', 'co2eq']
data = data.dropna(subset=vars)
codes = pd.DataFrame({'code': np.sort(data['code'].unique())})
codes['year'] = np.int_(2012)
data['year_data'] = np.int_(data['year_data'])
res = pd.merge_asof(codes, data.sort_values('year_data'), by='code', left_on='year', right_on='year_data')
res = pd.merge(res, countries[['code', 'en_short', 'en_region', 'cz_short', 'cz_region', 'en_category', 'cz_category',
'cz_cat_desc']])
df.dtypes
data
countries = pd.read_csv('D:\\projects\\fakta-o-klimatu\\work\\emission-intensity\\countries.csv')
countries.show()
data.show()
no_years = data.groupby('code')['year_data'].count().rename('count').reset_index()
max_pop = data.groupby('code')['pop'].max().reset_index()
pop_years = pd.merge(no_years, max_pop)
pop_years['pop'].sum() # 7_248_361_589
pop_years[pop_years['count'] < 26]['pop'].sum() # 139_046_348
pop_years[pop_years['count'] == 26]['pop'].sum() # 7_109_315_241
pop_years[pop_years['count'] == 23]['pop']
countries.dtypes
countries = pd.merge(countries, pop_years)
countries.final_region.drop_duplicates()
data
regions = pd.merge(data, countries[countries['count'] == 26][['code', 'final_region']])
# regions.final_region.drop_duplicates()
regions.loc[regions.final_region == 'Evropská unie', 'final_region'] = 'Evropa'
regions.loc[regions.final_region == 'Spojené státy americké', 'final_region'] = 'Severní Amerika'
world = regions.drop(columns=['code', 'final_region']).groupby(['year_data']).sum().reset_index()
cze = regions[regions.code == 'CZE'].copy()
cze['final_region'] = 'Česká republika'
regions = pd.concat([regions, cze])
regions = regions.drop(columns=['code']).groupby(['final_region', 'year_data']).sum().reset_index()
# regions.show()
regions['ghg_per_cap'] = 1_000 * regions['co2eq'] / regions['pop'] # t CO2eq / capita
regions['ghg_per_gdp'] = 1_000_000 * regions['co2eq'] / regions['gdp_ppp'] # kg CO2eq / $
regions['gdp_per_cap'] = regions['gdp_ppp'] / regions['pop']
regions['co2eq'] = regions['co2eq'] / 1_000_000 # Gt CO2
regions['gdp_ppp'] = regions['gdp_ppp'] / 1_000_000 # Gt CO2
regions['pop'] = regions['pop'] / 1_000_000_000 # Gt CO2
world['ghg_per_cap'] = 1_000 * world['co2eq'] / world['pop'] # t CO2eq / capita
world['ghg_per_gdp'] = 1_000_000 * world['co2eq'] / world['gdp_ppp'] # kg CO2eq / $
world['gdp_per_cap'] = world['gdp_ppp'] / world['pop']
world['co2eq'] = world['co2eq'] / 1_000_000 # Gt CO2
world['gdp_ppp'] = world['gdp_ppp'] / 1_000_000 # Gt CO2
world['pop'] = world['pop'] / 1_000_000_000 # Gt CO2
world['final_region'] = 'Svět'
titles = {
'ghg_per_cap': 't CO2eq / person',
'ghg_per_gdp': 'kg CO2eq / $',
'gdp_per_cap': '$ / person',
'pop': 'population (billion)',
'gdp_ppp': 'GDP (million $)',
'co2eq': 'Gt CO2eq'
}
plt.rcParams['figure.figsize'] = 12, 7
figs = []
for x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2eq']:
fig, ax = plt.subplots()
sns.lineplot('year_data', x, data=regions, hue='final_region', marker='o')
ax.set_title(titles[x] + ' (regions)')
legend = plt.legend()
legend.get_frame().set_facecolor('none')
figs.append(fig)
# Chart(figs, cols=2, title='All regions').show()
all_chart = Chart(figs, cols=2, title='All regions')
plt.rcParams['figure.figsize'] = 8, 5
figs = []
for x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2eq']:
fig, ax = plt.subplots()
sns.lineplot('year_data', x, data=world, marker='o')
ax.set_title(titles[x] + ' (world)')
figs.append(fig)
# Chart(figs, cols=3, title='World').show()
world_chart = Chart(figs, cols=3, title='World')
plt.rcParams['figure.figsize'] = 8, 5
charts = []
for r, rdf in regions.groupby('final_region'):
figs = []
for x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2eq']:
fig, ax = plt.subplots()
sns.lineplot('year_data', x, data=rdf, marker='o')
ax.set_title(titles[x] + f' ({r})')
figs.append(fig)
charts.append(Chart(figs, cols=3, title=r))
regions_chart = Selector(charts, title='Per region')
f# regions_chart.show()
rep = Selector([all_chart, world_chart, regions_chart], 'Emissions intensity (2015 update)')
rep.show()
# again, CO2 to 2018 only! ---
data = df[['code', 'year', 'SP.POP.TOTL', 'NY.GDP.MKTP.PP.KD', 'edgar50_co2']] \
.rename(columns={'year': 'year_data', 'SP.POP.TOTL': 'pop', 'NY.GDP.MKTP.PP.KD': 'gdp_ppp',
'edgar50_co2': 'co2'})
vars = ['pop', 'gdp_ppp', 'co2']
data = data.dropna(subset=vars)
data['year_data'] = np.int_(data['year_data'])
countries = pd.read_csv('D:\\projects\\fakta-o-klimatu\\work\\emission-intensity\\countries.csv')
no_years = data.groupby('code')['year_data'].count().rename('count').reset_index()
max_pop = data.groupby('code')['pop'].max().reset_index()
pop_years = pd.merge(no_years, max_pop)
pop_years['pop'].sum() # 7_502_176_200
pop_years[pop_years['count'] < 29]['pop'].sum() # 225_276_087
pop_years[pop_years['count'] == 29]['pop'].sum() # 7_276_900_113
countries = | pd.merge(countries, pop_years) | pandas.merge |
from datetime import datetime, timedelta
import unittest
from pandas.core.datetools import (
bday, BDay, BQuarterEnd, BMonthEnd, BYearEnd, MonthEnd,
DateOffset, Week, YearBegin, YearEnd, Hour, Minute, Second,
format, ole2datetime, to_datetime, normalize_date,
getOffset, getOffsetName, inferTimeRule, hasOffsetName)
from nose.tools import assert_raises
####
## Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
assert_raises(Exception, ole2datetime, 60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s) == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
#####
### DateOffset Tests
#####
class TestDateOffset(object):
def setUp(self):
self.d = datetime(2008, 1, 2)
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert(DateOffset(months=2).copy() == DateOffset(months=2))
class TestBusinessDay(unittest.TestCase):
def setUp(self):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_repr(self):
assert repr(self.offset) == '<1 BusinessDay>'
assert repr(self.offset2) == '<2 BusinessDays>'
expected = '<1 BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
self.assertEqual(self.offset2, self.offset2)
def test_mul(self):
pass
def test_hash(self):
self.assertEqual(hash(self.offset2), hash(self.offset2))
def testCall(self):
self.assertEqual(self.offset2(self.d), datetime(2008, 1, 3))
def testRAdd(self):
self.assertEqual(self.d + self.offset2, self.offset2 + self.d)
def testSub(self):
off = self.offset2
self.assertRaises(Exception, off.__sub__, self.d)
self.assertEqual(2 * off - off, off)
self.assertEqual(self.d - self.offset2, self.d + BDay(-2))
def testRSub(self):
self.assertEqual(self.d - self.offset2, (-self.offset2).apply(self.d))
def testMult1(self):
self.assertEqual(self.d + 10*self.offset, self.d + BDay(10))
def testMult2(self):
self.assertEqual(self.d + (-5*BDay(-10)),
self.d + BDay(50))
def testRollback1(self):
self.assertEqual(BDay(10).rollback(self.d), self.d)
def testRollback2(self):
self.assertEqual(BDay(10).rollback(datetime(2008, 1, 5)), datetime(2008, 1, 4))
def testRollforward1(self):
self.assertEqual(BDay(10).rollforward(self.d), self.d)
def testRollforward2(self):
self.assertEqual(BDay(10).rollforward(datetime(2008, 1, 5)), datetime(2008, 1, 7))
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def test_apply(self):
tests = []
tests.append((bday,
{datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2*bday,
{datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
tests.append((-bday,
{datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
tests.append((-2*bday,
{datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
tests.append((BDay(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_apply_corner(self):
self.assertRaises(Exception, BDay().apply, BMonthEnd())
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected
class TestWeek(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, Week, weekday=7)
self.assertRaises(Exception, Week, weekday=-1)
def test_isAnchored(self):
self.assert_(Week(weekday=0).isAnchored())
self.assert_(not Week().isAnchored())
self.assert_(not Week(2, weekday=2).isAnchored())
self.assert_(not Week(2).isAnchored())
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(Week(weekday=0), datetime(2008, 1, 1), False),
(Week(weekday=0), datetime(2008, 1, 2), False),
(Week(weekday=0), datetime(2008, 1, 3), False),
(Week(weekday=0), datetime(2008, 1, 4), False),
(Week(weekday=0), datetime(2008, 1, 5), False),
(Week(weekday=0), datetime(2008, 1, 6), False),
(Week(weekday=0), datetime(2008, 1, 7), True),
(Week(weekday=1), datetime(2008, 1, 1), True),
(Week(weekday=1), datetime(2008, 1, 2), False),
(Week(weekday=1), datetime(2008, 1, 3), False),
(Week(weekday=1), datetime(2008, 1, 4), False),
(Week(weekday=1), datetime(2008, 1, 5), False),
(Week(weekday=1), datetime(2008, 1, 6), False),
(Week(weekday=1), datetime(2008, 1, 7), False),
(Week(weekday=2), datetime(2008, 1, 1), False),
(Week(weekday=2), datetime(2008, 1, 2), True),
(Week(weekday=2), datetime(2008, 1, 3), False),
(Week(weekday=2), datetime(2008, 1, 4), False),
(Week(weekday=2), datetime(2008, 1, 5), False),
(Week(weekday=2), datetime(2008, 1, 6), False),
(Week(weekday=2), datetime(2008, 1, 7), False),
(Week(weekday=3), datetime(2008, 1, 1), False),
(Week(weekday=3), datetime(2008, 1, 2), False),
(Week(weekday=3), datetime(2008, 1, 3), True),
(Week(weekday=3), datetime(2008, 1, 4), False),
(Week(weekday=3), datetime(2008, 1, 5), False),
(Week(weekday=3), datetime(2008, 1, 6), False),
(Week(weekday=3), datetime(2008, 1, 7), False),
(Week(weekday=4), datetime(2008, 1, 1), False),
(Week(weekday=4), datetime(2008, 1, 2), False),
(Week(weekday=4), datetime(2008, 1, 3), False),
(Week(weekday=4), datetime(2008, 1, 4), True),
(Week(weekday=4), datetime(2008, 1, 5), False),
(Week(weekday=4), datetime(2008, 1, 6), False),
(Week(weekday=4), datetime(2008, 1, 7), False),
(Week(weekday=5), datetime(2008, 1, 1), False),
(Week(weekday=5), datetime(2008, 1, 2), False),
(Week(weekday=5), datetime(2008, 1, 3), False),
(Week(weekday=5), datetime(2008, 1, 4), False),
(Week(weekday=5), datetime(2008, 1, 5), True),
(Week(weekday=5), datetime(2008, 1, 6), False),
(Week(weekday=5), datetime(2008, 1, 7), False),
(Week(weekday=6), datetime(2008, 1, 1), False),
(Week(weekday=6), datetime(2008, 1, 2), False),
(Week(weekday=6), datetime(2008, 1, 3), False),
(Week(weekday=6), datetime(2008, 1, 4), False),
(Week(weekday=6), datetime(2008, 1, 5), False),
(Week(weekday=6), datetime(2008, 1, 6), True),
(Week(weekday=6), datetime(2008, 1, 7), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestMonthEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBQuarterEnd(unittest.TestCase):
def test_corner(self):
self.assertRaises(Exception, BQuarterEnd, startingMonth=4)
self.assertRaises(Exception, BQuarterEnd, startingMonth=-1)
def test_isAnchored(self):
self.assert_(BQuarterEnd(startingMonth=1).isAnchored())
self.assert_(BQuarterEnd().isAnchored())
self.assert_(not BQuarterEnd(2, startingMonth=1).isAnchored())
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),}))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),}))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),}))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
self.assertEqual(datetime(2010, 1, 31) + offset, datetime(2010, 1, 29))
def test_onOffset(self):
tests = [(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearBegin(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((YearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),}))
tests.append((YearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),}))
tests.append((YearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1),}))
tests.append((YearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEndLagged(unittest.TestCase):
def test_bad_month_fail(self):
self.assertRaises(Exception, BYearEnd, month=13)
self.assertRaises(Exception, BYearEnd, month=0)
def test_offset(self):
tests = []
tests.append((BYearEnd(month=6),
{datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30)},
))
tests.append((BYearEnd(n=-1, month=6),
{datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29)},
))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
self.assertEqual(baseDate + dateOffset, expected)
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
self.assertEqual(offset.rollforward(date), datetime(2010, 6, 30))
self.assertEqual(offset.rollback(date), datetime(2009, 6, 30))
def test_onOffset(self):
tests = [
(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestBYearEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((BYearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29),}))
tests.append((BYearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29),}))
tests.append((BYearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29),}))
tests.append((BYearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
class TestYearEnd(unittest.TestCase):
def test_offset(self):
tests = []
tests.append((YearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31),}))
tests.append((YearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),}))
tests.append((YearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31),}))
tests.append((YearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31),}))
for dateOffset, cases in tests:
for baseDate, expected in cases.iteritems():
assertEq(dateOffset, baseDate, expected)
def test_onOffset(self):
tests = [
(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def testOnOffset():
tests = [#(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
#(QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
#(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
#(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 29), False),
#(QuarterEnd(1, startingMonth=3), datetime(2008, 6, 30), True),
]
for offset, date, expected in tests:
assertOnOffset(offset, date, expected)
def assertEq(dateOffset, baseDate, expected):
actual = dateOffset + baseDate
assert actual == expected
def test_Hour():
assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assertEq(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assertEq(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert (Hour(3) + Hour(2)) == Hour(5)
assert ( | Hour(3) | pandas.core.datetools.Hour |
import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, roc_curve, roc_auc_score, confusion_matrix
from sklearn.feature_selection import SelectKBest, chi2
import numpy as np
"""Data is reformated to add a win/loss class and all columns are renamed to provide one column for each performance
characteristic. There is now a separate row for winners and losers. """
def get_all_data():
"""
:return: concated df of all csv's since 1985
:rtype: dataFrame
"""
path = 'tennis_atp_1985>'
all_data = pd.DataFrame()
for file in os.listdir('tennis_atp_1985>'):
file_path = os.path.join(path, file)
all_data = all_data.append(pd.read_csv(file_path))
return all_data
def data_clean(data):
"""
Filters all unnecessary features from data set containg matches since 1985
:param data: data set compiled in get_all_data
:type data: dataFrame
:return clean:
:rtype clean: dataFrame
"""
# select all features of winning participants
winners = data.filter(['winner_name', 'winner_hand', 'winner_ht', 'winner_age', 'w_ace', 'w_df',
'w_svpt', 'w_1stIn', 'w_1stWon', 'w_2ndWon', 'w_SvGms', 'w_bpSaved', 'w_bpFaced'])
winners['won'] = 1
# select all features of losing participants
losers = data.filter(['loser_name', 'loser_hand', 'loser_ht', 'loser_age', 'l_ace', 'l_df',
'l_svpt', 'l_1stIn', 'l_1stWon', 'l_2ndWon', 'l_SvGms', 'l_bpSaved', 'l_bpFaced'])
losers['won'] = 0
winners.rename(columns={'winner_name': 'name', 'winner_hand': 'hand', 'winner_ht': 'ht', 'winner_age': 'age',
'w_ace': 'ace', 'w_df': 'df', 'w_svpt': 'svpt', 'w_1stIn': '1stIn', 'w_1stWon':
'1stWon', 'w_2ndWon': '2ndWon', 'w_SvGms': 'svGms', 'w_bpSaved': 'bpSaved', 'w_bpFaced':
'bpFaced'}, inplace=True)
losers.rename(columns={'loser_name': 'name', 'loser_hand': 'hand', 'loser_ht': 'ht', 'loser_age': 'age', 'l_ace':
'ace', 'l_df': 'df', 'l_svpt': 'svpt', 'l_1stIn': '1stIn', 'l_1stWon': '1stWon',
'l_2ndWon': '2ndWon', 'l_SvGms': 'svGms', 'l_bpSaved': 'bpSaved', 'l_bpFaced': 'bpFaced'},
inplace=True)
clean = pd.concat([winners, losers], axis=0)
clean['serving_bp_won'] = clean['bpSaved'] / clean['bpFaced']
clean['serving_bp_lost'] = 1 - clean['serving_bp_won']
clean['returning_bp_won'] = clean['bpSaved'] / clean['bpFaced']
clean['returning_bp_lost'] = 1 - clean['returning_bp_won']
# Null values are safely dropped and this indicates matches where there was a 0 for any of these categores
clean.dropna(inplace=True)
print(clean.isnull().values.any())
# one-hot encoded dummy variable for hand of the participant
clean = | pd.get_dummies(clean, prefix='hand', columns=['hand']) | pandas.get_dummies |
#!/usr/bin/env python
# coding: utf-8
# Following a similar recipe to lewis' R script (https://www.kaggle.com/cartographic/bosch-production-line-performance/bish-bash-xgboost), sampling the data to select features before running on the full set in order to stay within kaggle's memory limits. Here I add in the train_date data too.
#
# Please feel free to fork and improve.
# In[1]:
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from sklearn.metrics import matthews_corrcoef, roc_auc_score
from sklearn.cross_validation import cross_val_score, StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
# I'm limited by RAM here and taking the first N rows is likely to be
# a bad idea for the date data since it is ordered.
# Sample the data in a roundabout way:
date_chunks = pd.read_csv("../input/train_date.csv", index_col=0, chunksize=100000, dtype=np.float32)
num_chunks = pd.read_csv("../input/train_numeric.csv", index_col=0,
usecols=list(range(969)), chunksize=100000, dtype=np.float32)
X = pd.concat([pd.concat([dchunk, nchunk], axis=1).sample(frac=0.05)
for dchunk, nchunk in zip(date_chunks, num_chunks)])
y = | pd.read_csv("../input/train_numeric.csv", index_col=0, usecols=[0,969], dtype=np.float32) | pandas.read_csv |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import time
import numpy as np
import pandas as pd
import pymongo
try:
import QUANTAXIS as QA
from QUANTAXIS.QAUtil import (
QASETTING,
DATABASE,
QA_util_date_stamp,
QA_util_date_valid,
QA_util_log_info,
QA_util_to_json_from_pandas,
QA_util_dict_remove_key,
QA_util_code_tolist,
)
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION
from QUANTAXIS.QAData.QADataStruct import (
QA_DataStruct_Index_min,
QA_DataStruct_Index_day,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min
)
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
except:
print('PLEASE run "pip install QUANTAXIS" before call GolemQ.GQFetch.portfolio modules')
pass
from GolemQ.GQUtil.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
FEATURES as FTR,)
def GQSignal_fetch_position_singal_day(start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
getting_trigger=True,
format='numpy',
ui_log=None,
ui_progress=None):
"""
'获取特定买入信号的股票指标日线'
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
if QA_util_date_valid(end):
if (getting_trigger):
# 按“信号”查询
cursor = coll_indices.find({
ST.TRIGGER_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
else:
# 按“持有状态”查询
cursor = coll_indices.find({
ST.POSITION_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
#print(len(res), start, end)
try:
res = res.assign(date=pd.to_datetime(res.date)).drop_duplicates((['date',
'code'])).set_index(['date',
'code'],
drop=False)
except:
res = None
if (res is not None):
try:
codelist = QA.QA_fetch_stock_name(res[AKA.CODE].tolist())
res['name'] = res.apply(lambda x:codelist.at[x.get(AKA.CODE), 'name'], axis=1)
except:
res['name'] = res['code']
if format in ['P', 'p', 'pandas', 'pd']:
return res
elif format in ['json', 'dict']:
return QA_util_to_json_from_pandas(res)
# 多种数据格式
elif format in ['n', 'N', 'numpy']:
return numpy.asarray(res)
elif format in ['list', 'l', 'L']:
return numpy.asarray(res).tolist()
else:
print("QA Error GQSignal_fetch_position_singal_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" " % format)
return None
else:
QA_util_log_info('QA Error GQSignal_fetch_position_singal_day data parameter start=%s end=%s is not right' % (start,
end))
def GQSignal_fetch_mainfest_singal_day(start,
end,
frequence='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
portfolio='myportfolio',
getting_trigger=True,
format='numpy',
ui_log=None,
ui_progress=None):
"""
'获取主升浪买入信号的股票指标日线'
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
start = str(start)[0:10]
end = str(end)[0:10]
#code= [code] if isinstance(code,str) else code
client = QASETTING.client[AKA.SYSTEM_NAME]
# 同时写入横表和纵表,减少查询困扰
#coll_day = client.get_collection(
# 'indices_{}'.format(datetime.date.today()))
try:
if (market_type == QA.MARKET_TYPE.STOCK_CN):
#coll_indices = client.stock_cn_indices_min
coll_indices = client.get_collection('stock_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.INDEX_CN):
#coll_indices = client.index_cn_indices_min
coll_indices = client.get_collection('index_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUND_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('fund_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.FUTURE_CN):
#coll_indices = client.future_cn_indices_min
coll_indices = client.get_collection('future_cn_indices_{}'.format(portfolio))
elif (market_type == QA.MARKET_TYPE.CRYPTOCURRENCY):
#coll_indices = client.cryptocurrency_indices_min
coll_indices = client.get_collection('cryptocurrency_indices_{}'.format(portfolio))
else:
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
except Exception as e:
QA_util_log_info(e)
QA_util_log_info('WTF IS THIS! \n ', ui_log=ui_log)
return False
if QA_util_date_valid(end):
if (getting_trigger):
# 按主升浪买入“信号”查询
cursor = coll_indices.find({
'$and': [{ '$or': [{
FLD.BOOTSTRAP_COMBO_TIMING_LAG:{
'$gt':0
}
},
{
FLD.BOOTSTRAP_GROUND_ZERO_MINOR_TIMING_LAG:{
'$gt':0
}
}]
},
#{ FLD.BOOTSTRAP_COMBO_RETURNS:{
# '$gt':0.00618
# }
# },
{ '$or': [{ FLD.BOOTSTRAP_COMBO_RETURNS:{
'$gt':-0.0927
}
}, { FLD.BOOTSTRAP_COMBO_MINOR_RETURNS:{
'$gt':-0.0927
}
}]},
{ '$or': [{ ST.TRIGGER_R5:{'$gt':0}}, { ST.TRIGGER_RPS:{'$gt':0}}]},
{ "date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}},]
},
{"_id": 0},
batch_size=10000)
else:
# 按“持有状态”查询
cursor = coll_indices.find({
ST.POSITION_R5: {
'$gt': 0
},
"date_stamp":
{
"$lte": QA_util_date_stamp(end),
"$gte": QA_util_date_stamp(start)
}
},
{"_id": 0},
batch_size=10000)
#res=[QA_util_dict_remove_key(data, '_id') for data in cursor]
res = pd.DataFrame([item for item in cursor])
#print(len(res), start, end)
try:
res = res.assign(date= | pd.to_datetime(res.date) | pandas.to_datetime |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = | DatetimeIndex(['1/3/2000']) | pandas.DatetimeIndex |
from functools import wraps
import numpy as np
import datetime as dt
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_categorical, infer_dtype, is_object_dtype, is_string_dtype
from sklearn.decomposition import NMF, TruncatedSVD
from sklearn.feature_extraction.text import HashingVectorizer, TfidfTransformer
from sklearn.pipeline import make_pipeline
#TODO - create a simple class to dummify date columns
def dummify_date_cols(df):
if 'giadmd' in df.columns:
df['giadmd'] = pd.to_datetime(df['giadmd'], errors='coerce')
df['giadmd_year'] = df['giadmd'].dt.year.astype('Int64').astype('object')
df['giadmd_month'] = df['giadmd'].dt.month.astype('Int64').astype('object')
df = df.drop('giadmd', axis=1)
if 'girefs' in df.columns:
df['girefs'] = pd.to_datetime(df['girefs'], errors='coerce')
df['girefs_year'] = df['girefs'].dt.year.astype('Int64').astype('object')
df['girefs_month'] = df['girefs'].dt.month.astype('Int64').astype('object')
df = df.drop('girefs', axis=1)
if 'gidscd' in df.columns:
df['gidscd'] = pd.to_datetime(df['gidscd'], errors='coerce')
df['gidscd_year'] = df['gidscd'].dt.year.astype('Int64').astype('object')
df['gidscd_month'] = df['gidscd'].dt.month.astype('Int64').astype('object')
df = df.drop('gidscd', axis=1)
print("Shape after dummify:", df.shape)
return df
def format_missings(df):
for column in df.columns:
if is_numeric_dtype(df[column]):
fill_value = df[column].mean()
df[column] = df[column].fillna(fill_value, downcast=False)
elif is_object_dtype(df[column]) or is_string_dtype(df[column]):
df[column] = df[column].fillna('MISSING', downcast=False)
print("Shape after format_missing:", df.shape)
return df
def remove_features_with_missing_values(df, na_thres):
return df.loc[:, df.isna().mean() < na_thres]
def clean_floats(x):
if | pd.isnull(x) | pandas.isnull |
import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
###########################################################################
###################### Related to data analysis ############################
###########################################################################
class TestSGAnalysis(object):
# tests find_ir_genes, find_es_genes, get_die_genes, get_die_gene_table, test_gene
# TODO - add update_ids() call before running both find_ir/es_genes to make sure
# the subgraph shit works -
# TODO - check to make sure this works with + AND - strands because the
# loc_df locations no longer have strand associated with them from get_ordered_id_map!!!!
# test get_die_gene_table - gene that meets rc
def test_get_die_table_6(self):
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts']
data = [[1, 1, .5, .5, 20, 10, 10],
[2, 1, .5, .5, 20, 10, 10]]
df = pd.DataFrame(data=data, columns=columns)
conditions = ['cond1', 'cond2']
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=15)
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts', 'dpi']
data = [[1, 1, .5, .5, 20, 10, 10, 0],
[2, 1, .5, .5, 20, 10, 10, 0]]
ctrl = pd.DataFrame(data=data, columns=columns)
ctrl.set_index('tid', inplace=True)
print(df)
print(ctrl)
print(ctrl == df)
assert (ctrl == df).all(axis=0).all()
# test get_die_gene_table - gene with 11 > n isoforms > 1
def test_get_die_table_5(self):
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts']
data = [[1, 1, .5, .5, 20, 10, 10],
[2, 1, .5, .5, 20, 10, 10]]
df = pd.DataFrame(data=data, columns=columns)
conditions = ['cond1', 'cond2']
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=0)
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts', 'dpi']
data = [[1, 1, .5, .5, 20, 10, 10, 0],
[2, 1, .5, .5, 20, 10, 10, 0]]
ctrl = pd.DataFrame(data=data, columns=columns)
ctrl.set_index('tid', inplace=True)
print(df)
print(ctrl)
print(ctrl == df)
assert (ctrl == df).all(axis=0).all()
# test get_die_gene_table - gene with no expressed isoforms
def test_get_die_table_4(self):
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts']
data = [[1, 1, 0, 0, 0, 0, 0],
[2, 1, 0, 0, 0, 0, 0]]
df = pd.DataFrame(data=data, columns=columns)
conditions = ['cond1', 'cond2']
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=0)
assert df == None
# test get_die_gene_table - gene doesn't have enough reads (rc thresh)
def test_get_die_table_3(self):
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts']
data = [[1, 1, .5, .5, 20, 10, 10],
[2, 1, .5, .5, 20, 10, 10]]
df = pd.DataFrame(data=data, columns=columns)
conditions = ['cond1', 'cond2']
df.set_index('tid', inplace=True)
df
df = swan.get_die_gene_table(df, conditions, rc=30)
assert df == None
# test get_die_gene_table - limit to genes with >1 iso
def test_get_die_table_2(self):
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts', 'dpi']
data = [1, 1, 1, 1, 30, 15, 15, 0]
df = pd.DataFrame(data=[data], columns=columns)
conditions = ['cond1', 'cond2']
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=0)
assert df == None
# test get_die_gene_table_1 - limit to isoforms with > 0 exp in at least
# one condition, aggregate last n-11 isos
def test_get_die_table_1(self):
conditions = ['cond1', 'cond2']
df = pd.read_csv('files/test_pi_1.tsv', sep='\t')
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=0)
df.tid = df.tid.astype('str')
df.set_index('tid', inplace=True)
ctrl = pd.read_csv('files/test_pi_1_reference.tsv', sep='\t')
ctrl.tid = ctrl.tid.astype('str')
ctrl.set_index('tid', inplace=True)
print(ctrl)
print(df)
ctrl = ctrl[df.columns]
ctrl.dpi = ctrl.dpi.astype('float').round()
df.dpi = df.dpi.astype('float').round()
print(ctrl == df)
assert (ctrl == df).all(axis=0).all()
# still need better way to test this rip
# test test_gene - 2 up 2 down
def test_test_gene_3(self):
conditions = ['cond1', 'cond2']
df = pd.read_csv('files/test_pi_1.tsv', sep='\t')
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=0)
ctrl_gene_dpi = 0.41
pval, gene_dpi = swan.test_gene(df, conditions)
gene_dpi = np.round(gene_dpi, decimals=2)
ctrl_gene_dpi = np.round(ctrl_gene_dpi, decimals=2)
print(gene_dpi)
print(ctrl_gene_dpi)
assert gene_dpi == ctrl_gene_dpi
# test test_gene - <2 up, <2 down
def test_test_gene_2(self):
conditions = ['cond1', 'cond2']
df = pd.read_csv('files/test_pi_1.tsv', sep='\t')
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=0)
ctrl_gene_dpi = 0.22
tids = [3, 1]
df = df.loc[df.tid.isin(tids)]
pval, gene_dpi = swan.test_gene(df, conditions)
assert gene_dpi == ctrl_gene_dpi
# test test_gene - all 0
def test_test_gene_1(self):
columns = ['tid', 'gid', 'cond1', 'cond2', 'total_counts', 'cond1_counts', 'cond2_counts']
data = [[1, 1, .5, .5, 20, 10, 10],
[2, 1, .5, .5, 20, 10, 10]]
df = pd.DataFrame(data=data, columns=columns)
conditions = ['cond1', 'cond2']
df.set_index('tid', inplace=True)
df = swan.get_die_gene_table(df, conditions, rc=0)
ctrl_gene_dpi = 0
pval, gene_dpi = swan.test_gene(df, conditions)
assert gene_dpi == ctrl_gene_dpi
# test get_die_genes - obs col doesn't exist
def test_get_die_genes_5(self):
sg = get_die_test_sg()
print(sg.adata.obs)
obs_col = 'condition'
obs_conditions = ['PB65_B017', 'PB65_B018']
id_col = 'tid'
with pytest.raises(Exception) as e:
test = sg.get_die_genes(obs_col=obs_col, obs_conditions=obs_conditions, rc_thresh=1)
assert 'Metadata column' in str(e.value)
# tests get_die_genes - b/w dataset conditions
def test_get_die_genes_4(self):
sg = get_die_test_sg()
obs_col = 'dataset'
obs_conditions = ['PB65_B017', 'PB65_B018']
id_col = 'tid'
test = sg.get_die_genes(obs_col=obs_col, obs_conditions=obs_conditions, rc_thresh=1)
# don't test p vals cause that's tough
test.drop(['p_val', 'adj_p_val'], axis=1, inplace=True)
ctrl = pd.read_csv('files/chr11_and_Tcf3_PB65_B017_B018_dpi.tsv', sep='\t')
test.dpi = test.dpi.round().astype('float')
ctrl.dpi = ctrl.dpi.round().astype('float')
print(test)
print(ctrl)
print(test == ctrl)
assert test.equals(ctrl)
# tests get_die_genes - b/w non dataset conditions
def test_get_die_genes_3(self):
sg = get_die_test_sg()
obs_col = 'cluster'
id_col = 'tid'
test = sg.get_die_genes(obs_col=obs_col, rc_thresh=1)
# don't test p vals cause that's tough
test.drop(['p_val', 'adj_p_val'], axis=1, inplace=True)
ctrl = pd.read_csv('files/chr11_and_Tcf3_cluster_dpi.tsv', sep='\t')
test.dpi = test.dpi.round()
ctrl.dpi = ctrl.dpi.round()
print(ctrl)
print(test)
print(ctrl == test)
assert ctrl.equals(test)
# tests get_die_genes - obs_col has more than 2 values and obs_conditions
# not provided
def test_get_die_genes_2(self):
sg = get_die_test_sg()
obs_col = 'dataset'
id_col = 'tid'
with pytest.raises(Exception) as e:
test = sg.get_die_genes(obs_col=obs_col, rc_thresh=1)
assert 'Must provide obs_conditions' in str(e.value)
# tests get_die_genes - obs_col has more than 2 values and obs_conditions
# does not have 2 values
def test_get_die_genes_1(self):
sg = get_die_test_sg()
obs_col = 'dataset'
id_col = 'tid'
obs_conditions = ['D12']
with pytest.raises(Exception) as e:
test = sg.get_die_genes(obs_col=obs_col, obs_conditions=obs_conditions, rc_thresh=1)
assert 'exactly 2 values' in str(e.value)
# tests find_es_genes - requires edges to be in order
def test_find_es_genes(self):
sg = swan.SwanGraph()
sg.annotation = True
# t_df
data = [[0, [0,1,2,3,4], True, 'g1', 't1'],
[1, [0,5,4], False, 'g1', 't2']]
cols = ['vertex_id', 'path', 'annotation', 'gid', 'tid']
sg.t_df = pd.DataFrame(data=data, columns=cols)
# edge
data = [[0, 'exon', True, 0, 1],
[1, 'intron', True, 1, 2],
[2, 'exon', True, 2, 3],
[3, 'intron', True, 3, 4],
[4, 'exon', True, 4, 5],
[5, 'intron', False, 1, 4]]
cols = ['edge_id', 'edge_type', 'annotation', 'v1', 'v2']
sg.edge_df = pd.DataFrame(data=data, columns=cols)
# loc
data = [0,1,2,3,4,5]
cols = ['vertex_id']
sg.loc_df = pd.DataFrame(data=data, columns=cols)
ctrl_gids = ['g1']
ctrl_tids = ['t2']
ctrl_edges = [5]
sg.get_loc_path()
sg.create_graph_from_dfs()
gids, tids, edges = sg.find_es_genes()
print('edges')
print('control')
print(ctrl_edges)
print('test')
print(edges)
assert set(ctrl_edges) == set(edges)
print('transcripts')
print('control')
print(ctrl_tids)
print('test')
print(tids)
assert set(ctrl_tids) == set(tids)
print('genes')
print('control')
print(ctrl_gids)
print('test')
print(gids)
assert set(ctrl_gids) == set(gids)
# tests find_ir_genes - requires edges to be in order...
# also requires the swangraph
def test_find_ir_genes(self):
sg = swan.SwanGraph()
sg.annotation = True
# t_df
data = [[0, [0,1,2], True, 'g1', 't1'],
[1, [3], False, 'g1', 't2']]
cols = ['vertex_id', 'path', 'annotation', 'gid', 'tid']
sg.t_df = pd.DataFrame(data=data, columns=cols)
# edge
data = [[0, 'exon', True, 0, 1],
[1, 'intron', True, 1, 2],
[2, 'exon', True, 2, 3],
[3, 'exon', False, 0, 3]]
cols = ['edge_id', 'edge_type', 'annotation', 'v1', 'v2']
sg.edge_df = | pd.DataFrame(data=data, columns=cols) | pandas.DataFrame |
from contextlib import nullcontext
import copy
import numpy as np
import pytest
from pandas._libs.missing import is_matching_na
from pandas.core.dtypes.common import is_float
from pandas import (
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"arr, idx",
[
([1, 2, 3, 4], [0, 2, 1, 3]),
([1, np.nan, 3, np.nan], [0, 2, 1, 3]),
(
[1, np.nan, 3, np.nan],
MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]),
),
],
)
def test_equals(arr, idx):
s1 = Series(arr, index=idx)
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 9
assert not s1.equals(s2)
@pytest.mark.parametrize(
"val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None]
)
def test_equals_list_array(val):
# GH20676 Verify equals operator for list of Numpy arrays
arr = np.array([1, 2])
s1 = Series([arr, arr])
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = val
cm = (
tm.assert_produces_warning(FutureWarning, check_stacklevel=False)
if isinstance(val, str)
else nullcontext()
)
with cm:
assert not s1.equals(s2)
def test_equals_false_negative():
# GH8437 Verify false negative behavior of equals function for dtype object
arr = [False, np.nan]
s1 = Series(arr)
s2 = s1.copy()
s3 = Series(index=range(2), dtype=object)
s4 = s3.copy()
s5 = s3.copy()
s6 = s3.copy()
s3[:-1] = s4[:-1] = s5[0] = s6[0] = False
assert s1.equals(s1)
assert s1.equals(s2)
assert s1.equals(s3)
assert s1.equals(s4)
assert s1.equals(s5)
assert s5.equals(s6)
def test_equals_matching_nas():
# matching but not identical NAs
left = Series([np.datetime64("NaT")], dtype=object)
right = Series([np.datetime64("NaT")], dtype=object)
assert left.equals(right)
assert Index(left).equals(Index(right))
assert left.array.equals(right.array)
left = Series([np.timedelta64("NaT")], dtype=object)
right = Series([np.timedelta64("NaT")], dtype=object)
assert left.equals(right)
assert Index(left).equals(Index(right))
assert left.array.equals(right.array)
left = Series([np.float64("NaN")], dtype=object)
right = Series([np.float64("NaN")], dtype=object)
assert left.equals(right)
assert Index(left, dtype=left.dtype).equals(Index(right, dtype=right.dtype))
assert left.array.equals(right.array)
def test_equals_mismatched_nas(nulls_fixture, nulls_fixture2):
# GH#39650
left = nulls_fixture
right = nulls_fixture2
if hasattr(right, "copy"):
right = right.copy()
else:
right = copy.copy(right)
ser = Series([left], dtype=object)
ser2 = Series([right], dtype=object)
if is_matching_na(left, right):
assert ser.equals(ser2)
elif (left is None and is_float(right)) or (right is None and is_float(left)):
assert ser.equals(ser2)
else:
assert not ser.equals(ser2)
def test_equals_none_vs_nan():
# GH#39650
ser = Series([1, None], dtype=object)
ser2 = | Series([1, np.nan], dtype=object) | pandas.Series |
import matplotlib.pyplot as plt
import skimage.color
import time
import pytesseract
import pandas as pd
from shapely.geometry import box
import os
from map_extractor.utils import check_rgb_image
from map_extractor.PolygonGroup import PolygonGroup
import numpy as np
from shapely.geometry import Polygon
def apply_tesseract(img) :
'''
Recognize countries' names on a map with Tesseract.
Parameters
img (np.ndarray) : an RGB image of the map, with countries' names appearing
Returns
pd.DataFrame : The OCR results, a dataframe with columns 'text' and 'bbox' (text's bounding box)
'''
check_rgb_image(img)
tmp_filename = str(time.time()) + '.png'
# Stores the images as black and white to perform analysis on both
plt.imsave(tmp_filename, skimage.color.rgb2gray(img), cmap='gray')
d = pytesseract.image_to_data(tmp_filename, config="--oem 3 --psm 11",lang='eng', output_type=pytesseract.Output.DICT)
os.remove(tmp_filename)
#Now text and bounding boxes are extracted
boxes = []
bboxes = pd.DataFrame(columns=['bbox'])
# Filtering unisgnificant results from text recognition (ex : @àé won't pass the filter )
df = filter_recognition_results( | pd.DataFrame(d) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from datetime import datetime
import os
from ..utils.config import picture_path
from ..metrics.metrics_ import gini
COLOR = ['darkorange', 'cornflowerblue']
PATH = os.getcwd() + "/picture/modelling/"
from pathlib import Path
Path(PATH).mkdir(parents=True, exist_ok=True)
# from model import Model
class Plot:
def __init__(self, **kwargs):
# self.__init__(Model)
try:
self.name = kwargs['name']
except KeyError:
pass
try:
self.model = kwargs['model']
except KeyError:
pass
self.feature_importance = None
try:
self.feature_name = kwargs['feature_name']
except KeyError:
self.feature_name = None
self.args = kwargs
self.max_feat = 50
self.color = COLOR
self.path = PATH
def get_parameters(self):
if self.name == 'lgb_model':
self.feature_importance = self.model.feature_importance()
self.feature_name = self.model.feature_name()
else:
feat_name_key = [key for key in self.args.keys() if 'feat' in key and 'name' in key]
assert len(feat_name_key) == 1
self.feature_name = self.args[feat_name_key[0]]
if self.name == 'lgb_classifier' or self.name == 'lgb':
self.feature_importance = self.model.feature_importances_
elif self.name == 'RF':
self.feature_importance = self.model.feature_importances_
def simple_plot(self):
self.get_parameters()
dfx = pd.Series(self.feature_importance, index=self.feature_name)
dfx = dfx.sort_values(ascending=False)
if len(dfx) > self.max_feat:
dfx = dfx.iloc[:self.max_feat]
dfx.plot.bar(figsize=(12, 4), color=self.color)
plt.title('feat importance')
plt.show()
plt.savefig(self.path + 'feat_importance', bbox_inches='tight', dpi=1200)
def plot_lgb(self):
if 'lgb' in self.args.keys():
lgb = self.args['lgb']
ax = lgb.plot_importance(self.model, figsize=(6, 8), \
importance_type='gain', \
max_num_features=40,
height=.8,
color=self.color,
grid=False,
)
plt.sca(ax)
plt.xticks([], [])
plt.title('lgb model gain importance')
plt.show()
plt.savefig(self.path + 'feat_importance_lgb', bbox_inches='tight', dpi=1200)
else:
pass
def get_path(self):
if os.path.isdir(picture_path):
now = datetime.now()
current_time = now.strftime("%d %m %y %H %M") # will add this time to the name of file distinct them
path = picture_path + current_time + '_picture.png'
return path
else:
return None
def plot_metric(self):
if 'lgb' in self.args.keys():
lgb = self.args['lgb']
ax = lgb.plot_metric(self.model, figsize=(6, 8))
plt.savefig(self.path + 'metric', bbox_inches='tight', dpi=1200)
plt.show()
else:
pass
def plot_metric_and_importance(self):
if 'lgb' in self.args.keys():
lgb = self.args['lgb']
fig, ax = plt.subplots(2, 1)
fig.subplots_adjust(hspace=.2)
fig.set_figheight(6)
fig.set_figwidth(14)
lgb.plot_metric(self.model, ax=ax[0])
booster = self.model.booster_ # case of classifier, we must to acces to booster_ instance
dfx = pd.DataFrame(index=booster.feature_name())
dfx['gain'] = booster.feature_importance('gain')
dfx['gain'] = dfx['gain'] / dfx.gain.max()
dfx['split'] = booster.feature_importance('split')
dfx['split'] = dfx['split'] / dfx.split.max()
dfx = dfx.sort_values('gain', ascending=False).iloc[:self.max_feat]
dfx.plot.bar(width=0.9, ax=ax[1], color=COLOR)
plt.subplots_adjust(left=None, bottom=.5, right=None, top=None, wspace=None, hspace=None)
plt.savefig(self.path + 'feat_importance lgb', bbox_inches='tight', dpi=1200)
plt.show()
else:
print('nothing to plot')
pass
def plot_booster_lgb(self):
booster = self.model.booster_ # case of classifier, we must to acces to booster_ instance
dfx = pd.DataFrame(index=self.lgb.feature_name())
dfx['gain'] = booster.feature_importance('gain')
dfx['gain'] = dfx['gain'] / dfx.gain.max()
dfx['split'] = booster.feature_importance('split')
dfx['split'] = dfx['split'] / dfx.split.max()
dfx = dfx.sort_values('split', ascending=False).iloc[:self.max_feat]
dfx.plot.bar(width=0.9, figsize=(12, 3))
plt.show()
plt.savefig(self.path + 'feat_importance lgb 2', bbox_inches='tight', dpi=1200)
def plot_rf_or_lr(self):
if self.name.strip().lower() == 'lr':
feature_importance = abs(self.model.coef_[0])
elif self.name.strip().lower() == 'rf':
feature_importance = self.model.feature_importances_
else:
return self
plt.figure(figsize=(13, 4))
df = pd.Series(feature_importance, index=self.feature_name).sort_values(ascending=False).iloc[:self.max_feat]
plt.bar(range(len(df)), df, color=self.color)
plt.xticks(range(len(df)), df.index, rotation=90)
plt.title('Feature Importance Of %s Model' % (self.name.upper()), fontsize=16)
plt.subplots_adjust(left=None, bottom=.5, right=None, top=None, wspace=None, hspace=None)
plt.savefig(self.path + 'feat_importance rf or lr', bbox_inches='tight', dpi=1200)
plt.show()
def plotKfold(self, models, cv, X, y):
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import auc
from sklearn.metrics import plot_roc_curve, roc_curve
from sklearn.model_selection import StratifiedKFold
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 10000)
fig, ax = plt.subplots()
fig.set_figheight(8)
fig.set_figwidth(8)
index = 0
for model, (index_train, index_test) in zip(models, cv.split(X, y)):
train, test = X.loc[index_train], X.loc[index_test]
y_train, y_test = y.loc[index_train], y.loc[index_test]
prediction = model.predict_proba(test)
fpr, tpr, t = roc_curve(y_test, prediction[:, 1])
auc_value = auc(fpr, tpr)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(auc_value)
plt.plot(fpr, tpr, lw=2, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (index, auc_value))
index += 1
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="AUC by Kfold on Test set")
ax.legend(loc="lower right")
ax.set_xlabel('faux positive rate')
ax.set_ylabel('true positive rate')
plt.savefig(self.path + 'auc kfold', bbox_inches='tight', dpi=1200)
plt.show()
# plt.savefig('kfold.png', dpi=1200, bbox_inches='tight')
def plot_importance_Kfold(models):
to_plot = pd.DataFrame(index=models[0].booster_.feature_name())
for index, model in enumerate(models):
to_plot['%s_gain' % index] = model.booster_.feature_importance('gain')
to_plot['%s_split' % index] = model.booster_.feature_importance('split')
about_gains = [col for col in to_plot.columns if '_gain' in col]
about_split = [col for col in to_plot.columns if '_split' in col]
to_plot[about_gains] = to_plot[about_gains] / to_plot[about_gains].max().max()
to_plot[about_split] = to_plot[about_split] / to_plot[about_split].max().max()
to_plot['gain'] = to_plot[about_gains].mean(axis=1)
to_plot['split'] = to_plot[about_split].mean(axis=1)
to_plot['gain_std'] = to_plot[about_gains].std(axis=1)
to_plot['split_std'] = to_plot[about_split].std(axis=1)
to_plot = to_plot.sort_values('gain', ascending=False)
total = len(to_plot)
max_len = min(45, len(to_plot))
height = 0.45
x1 = to_plot.index[:max_len]
x = np.arange(max_len)
gain = to_plot.gain.iloc[:max_len]
gain_err = to_plot.iloc[:max_len].gain_std
split = to_plot.split.iloc[:max_len]
split_err = to_plot.iloc[:max_len].split_std
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(wspace=.5)
plt.barh(x - height, gain, xerr=gain_err, height=height, color=COLOR[0], align='center',
error_kw=dict(ecolor='gray', lw=1, capsize=1.5, capthick=.5))
plt.barh(x, split, height=height, xerr=split_err, color=COLOR[1], align='center',
error_kw=dict(ecolor='gray', lw=1, capsize=1.5, capthick=.5))
plt.yticks(x, x1, rotation=0)
plt.legend(('gain', 'split'))
plt.title('MOST %s IMPORTANT FEATURES, FEATURES BY TOTAL =%s' % (max_len, total))
plt.savefig(PATH + 'feat_importance kfold', bbox_inches='tight', dpi=1200)
# plt.savefig('most_importance.png', dpi=1200, bbox_inches='tight')
plt.show()
def plot_aucKfold(models):
this_df = pd.DataFrame()
for index, model in enumerate(models):
this_df['%s_train' % index] = model.evals_result_['training']['auc']
this_df['%s_test' % index] = model.evals_result_['valid_1']['auc']
all_about_train = [col for col in this_df.columns if '_train' in col]
all_about_test = [col for col in this_df.columns if '_test' in col]
train_mean = this_df[all_about_train].mean(axis=1)
train_std = this_df[all_about_train].std(axis=1)
test_mean = this_df[all_about_test].mean(axis=1)
test_std = this_df[all_about_test].std(axis=1)
x = np.arange(len(train_mean))
plt.plot(x, train_mean)
plt.fill_between(x, train_mean - train_std, train_mean + train_std, color='gray', alpha=.2)
plt.plot(x, test_mean)
plt.fill_between(x, test_mean - test_std, test_mean + test_std, color='gray', alpha=.2)
plt.legend(['train', 'valid', 'confidence zone'])
plt.title('Train and Valid auc during training')
plt.ylabel('auc')
plt.xlabel('epoch')
plt.savefig(PATH + 'auc kfold', bbox_inches='tight', dpi=1200)
plt.show()
def plot_train_test(acc_train, acc_test, name, legend=('train', 'test')):
plt.plot(acc_train)
plt.plot(acc_test)
plt.title(name)
plt.ylabel(name)
plt.xlabel('epoch')
plt.legend(legend, loc='upper left')
plt.savefig(PATH + 'acc train test', bbox_inches='tight', dpi=1200)
plt.show()
def plot_precision_recall_curve(model, X_test, y_test):
# from sklearn.metrics import average_precision_score
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import plot_precision_recall_curve
# import matplotlib.pyplot as plt
from numpy import argmax
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
y_score = model.predict_proba(X_test)
if len(y_score.shape) > 1:
y_score = y_score[:, 1]
precision, recall, thresholds = precision_recall_curve(y_test, y_score)
funct = np.vectorize(lambda x, y: -1 if (x <= 0 or y <= 0) else 2 * x * y / (x + y))
fscore = funct(precision, recall)
# print('fscore contains non numeric=',np.isnan(fscore).any())
# print('fscore contains inf =',np.isinf(fscore).any())
# ix = argmax(fscore)
max_value = np.max(fscore)
# print('max value = %.3f'%max_value)
ix = np.where(fscore == max_value)[0][0]
# print('Best Threshold=%f, F-Score=%.3f'%(thresholds[ix], fscore[ix]))
plt.plot(recall, precision, marker='.', label='model')
plt.plot(recall[ix], precision[ix], marker='o', color='red', label='Best')
# axis labels
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.legend()
plt.grid()
plt.title('best threshold=%.4f. Recall=%.2f, Precision = %.2f' % (thresholds[ix], recall[ix],
precision[ix]))
# show the plot
plt.savefig(PATH + 'precision recall', bbox_inches='tight', dpi=1200)
plt.show()
def __gini__(train, test, y_train, y_test, is_plot=True):
max_len = 30
gini_trains = []
gini_tests = []
for col in train.columns:
gini_train = np.abs(gini(y_train, train[col]))
gini_test = np.abs(gini(y_test, test[col]))
gini_trains.append(gini_train)
gini_tests.append(gini_test)
dfxx = pd.DataFrame(index=train.columns)
dfxx['train'] = gini_trains
dfxx['test'] = gini_tests
dfxx = dfxx.sort_values('train', ascending=False)
if not is_plot:
return dfxx
is_drop = False
total_features = len(dfxx)
if len(dfxx) > max_len:
dfxx = dfxx.iloc[:max_len]
is_drop = True
ax = dfxx.plot.barh(width=.9, rot=0, figsize=(10, 10), color=['darkorange', 'cornflowerblue'])
rects = ax.patches
# For each bar: Place a label
for rect in rects:
# Get X and Y placement of label from rect.
x_value = rect.get_width()
y_value = rect.get_y() + rect.get_height() / 2
# Number of points between bar and label. Change to your liking.
space = 5
# Vertical alignment for positive values
ha = 'left'
# If value of bar is negative: Place label left of bar
if x_value < 0:
# Invert space to place label to the left
space *= -1
# Horizontally align label at right
ha = 'right'
# Use X value as label and format number with one decimal place
label = '%.2f' % x_value
# Create annotation
plt.annotate(
label, # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(space, 0), # Horizontally shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
va='center', # Vertically center label
ha=ha) # Horizontally align label differently for
# positive and negative values.
if is_drop:
plt.title('Top %s features by gini on train and test, total features=%s' % (max_len, total_features))
else:
plt.title('Gini by features on train and test')
plt.savefig(PATH + 'gini', bbox_inches='tight', dpi=1200)
plt.show()
def __giniByUNiqueData__(data, target_name='label', is_plot=True):
max_len = 30
ginis = []
cols = []
for col in data.columns:
if col != target_name:
gini_ = np.abs(gini(data[target_name], data[col]))
ginis.append(gini_)
cols.append(col)
# print('%s=%.3f'%(col, gini_))
dfxx = | pd.Series(ginis, index=cols) | pandas.Series |
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import PrecisionRecallDisplay
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
#from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import make_scorer
import matplotlib.pyplot as plt
import matplotlib.cm
from sklearn import metrics
from sklearn.metrics import roc_curve
import seaborn as sns
import pandas as pd
models = {}
def get_best_model(models,abs_score=False):
if abs_score:
best_model = max(models, key=lambda k: abs(models[k]['metric_score']))
else:
best_model = max(models, key=lambda k: models[k]['metric_score'])
return best_model, models[best_model]
def plot_custom_confusion_matrix(cm,labels,confusion_matrix_values_format=None):
cmap = matplotlib.cm.get_cmap('Blues')
disp = ConfusionMatrixDisplay(confusion_matrix=cm,display_labels=labels)
return disp.plot(cmap=cmap,values_format=confusion_matrix_values_format)
def generate_classification_report(y_true,y_pred,labels,confusion_matrix_normalize=None,confusion_matrix_values_format=None):
selected_metric = None
acc = accuracy_score(y_true, y_pred)
print("Accuracy: %.2f%%" % (acc * 100.0))
prec_mic = precision_score(y_true, y_pred,average='micro')
print("Precision (micro): %.2f%%" % (prec_mic * 100.0))
prec_mac = precision_score(y_true, y_pred,average='macro')
print("Precision (macro): %.2f%%" % (prec_mac * 100.0))
prec_wei = precision_score(y_true, y_pred,average='weighted')
print("Precision (weighted): %.2f%%" % (prec_wei * 100.0))
rec_mic = recall_score(y_true, y_pred,average='micro')
print("Recall (micro): %.2f%%" % (rec_mic * 100.0))
rec_mac = recall_score(y_true, y_pred,average='macro')
print("Recall (macro): %.2f%%" % (rec_mac * 100.0))
rec_wei = recall_score(y_true, y_pred,average='weighted')
print("Recall (weighted): %.2f%%" % (rec_wei * 100.0))
f1_mic = f1_score(y_true, y_pred,average='micro')
print("F1 (micro): %.2f%%" % (f1_mic * 100.0))
f1_mac = f1_score(y_true, y_pred,average='macro')
print("F1 (macro): %.2f%%" % (f1_mac * 100.0))
f1_wei = f1_score(y_true, y_pred,average='weighted')
print("F1 (weighted): %.2f%%" % (f1_wei * 100.0))
f1_bin = f1_score(y_true, y_pred,average='binary')
print("F1 (binary): %.2f%%" % (f1_bin * 100.0))
f2_bin = fbeta_score(y_true, y_pred,average='binary',beta=2)
print("F2 (binary): %.2f%%" % (f2_bin * 100.0))
fd5_bin = fbeta_score(y_true, y_pred,average='binary',beta=0.5,)
print("F1/2 (binary): %.2f%%" % (fd5_bin * 100.0))
mcc_result = matthews_corrcoef(y_true, y_pred)
print("MCC: %.4f%%" % (mcc_result))
selected_metric = fd5_bin
print()
print()
cm = confusion_matrix(y_true, y_pred, normalize=confusion_matrix_normalize)
print(cm)
print()
print()
print(classification_report(y_true, y_pred))
# saving classification report to dataframe as well
output_classification_report = classification_report(y_true, y_pred, output_dict=True)
output_classification_report = | pd.DataFrame(output_classification_report) | pandas.DataFrame |
import collections
import logging
import pandas as pd
import sklearn.linear_model as slm
import core.artificial_signal_generators as sig_gen
import core.config as cconfig
import core.dataflow as dtf
import core.dataframe_modeler as dfmod
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestDataFrameModeler(hut.TestCase):
def test_dump_json1(self) -> None:
df = pd.DataFrame(
{"col0": [1, 2, 3], "col1": [4, 5, 6]},
index=pd.date_range("2010-01-01", periods=3),
)
oos_start = pd.Timestamp("2010-01-01")
info = collections.OrderedDict({"df_info": dtf.get_df_info_as_string(df)})
df_modeler = dfmod.DataFrameModeler(df, oos_start=oos_start, info=info)
output = df_modeler.dump_json()
self.check_string(output)
def test_load_json1(self) -> None:
"""
Test by dumping json and loading it again.
"""
df = pd.DataFrame(
{"col0": [1, 2, 3], "col1": [4, 5, 6]},
index=pd.date_range("2010-01-01", periods=3),
)
oos_start = pd.Timestamp("2010-01-01")
info = collections.OrderedDict({"df_info": dtf.get_df_info_as_string(df)})
df_modeler = dfmod.DataFrameModeler(df, oos_start=oos_start, info=info)
json_str = df_modeler.dump_json()
df_modeler_loaded = dfmod.DataFrameModeler.load_json(json_str)
pd.testing.assert_frame_equal(df_modeler.df, df_modeler_loaded.df)
self.assertEqual(df_modeler.oos_start, df_modeler_loaded.oos_start)
self.assertDictEqual(df_modeler.info, df_modeler_loaded.info)
def test_load_json2(self) -> None:
"""
Test by dumping json and loading it again with `oos_start=None`.
"""
df = pd.DataFrame(
{"col0": [1, 2, 3], "col1": [4, 5, 6]},
index=pd.date_range("2010-01-01", periods=3),
)
oos_start = None
info = collections.OrderedDict({"df_info": dtf.get_df_info_as_string(df)})
df_modeler = dfmod.DataFrameModeler(df, oos_start=oos_start, info=info)
json_str = df_modeler.dump_json()
df_modeler_loaded = dfmod.DataFrameModeler.load_json(json_str)
pd.testing.assert_frame_equal(df_modeler.df, df_modeler_loaded.df)
self.assertEqual(df_modeler.oos_start, df_modeler_loaded.oos_start)
self.assertDictEqual(df_modeler.info, df_modeler_loaded.info)
def test_load_json3(self) -> None:
"""
Test by dumping json and loading it again with `info=None`.
"""
df = pd.DataFrame(
{"col0": [1, 2, 3], "col1": [4, 5, 6]},
index= | pd.date_range("2010-01-01", periods=3) | pandas.date_range |
import numpy as np
import os
from sklearn.datasets import make_blobs
import sys
import requests, zipfile, io
import pandas
__datasets = ['Adult', 'Bank', 'Synthetic', 'Synthetic-unequal', 'CensusII']
def dataset_names():
return __datasets
def read_dataset(name, data_dir,args):
data = []
sex_num = []
K = []
if name not in __datasets:
raise KeyError("Dataset not implemented:",name)
elif name == 'Synthetic':
n_samples = 400
centers = [(1, 1), (2.1, 1), (1, 5), (2.1, 5)]
data, sex_num = make_blobs(n_samples=n_samples, n_features=2, cluster_std=0.1,
centers=centers, shuffle=False, random_state=1)
index = n_samples//2
sex_num[0:index] = 0
sex_num[index:n_samples] = 1
K = 2
elif name == 'Synthetic-unequal':
n_samples = 400
sample_list = [150,150,50,50]
centers = [(1, 1), (2.1, 1), (1, 3.5), (2.1, 3.5)]
data, sex_num = make_blobs(n_samples=sample_list, n_features=2, cluster_std=0.13,
centers=centers, shuffle=False, random_state=1)
index = sample_list[0]+sample_list[1]
sex_num[0:index] = 0
sex_num[index:] = 1
K = 2
elif name == 'Adult':
_path = 'adult.data'
data_path = os.path.join(data_dir,_path)
race_is_sensitive_attribute = 0
if race_is_sensitive_attribute==1:
m = 5
else:
m = 2
# n = 25000
K = 10
if (not os.path.exists(data_path)):
print('Adult data set does not exist in current folder --- Have to download it')
r = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', allow_redirects=True)
if r.status_code == requests.codes.ok:
print('Download successful')
else:
print('Could not download Adult data set - please download it manually')
sys.exit()
open(data_path, 'wb').write(r.content)
df = pandas.read_csv(data_path, sep=',',header=None)
# df = df[:n]
n = df.shape[0]
sens_attr = 9
sex = df[sens_attr]
sens_attributes = list(set(sex.astype(str).values)) # =[' Male', ' Female']
print(sens_attributes)
df = df.drop(columns=[sens_attr])
sex_num = np.zeros(n, dtype=int)
sex_num[sex.astype(str).values == sens_attributes[1]] = 1
#dropping non-numerical features and normalizing data
cont_types = np.where(df.dtypes=='int')[0] # =[0,2,4,9,10,11]
df = df.iloc[:,cont_types]
data = np.array(df.values, dtype=float)
data = data[:,[0,1,2,3,5]]
#Scale data
# data = scale(data, axis = 0)
elif name == 'Bank':
# n= 6000
# K = 4
K = 10
_path = 'bank-additional-full.csv' # Big dataset with 41108 samples
# _path = 'bank.csv' # most approaches use this small version with 4521 samples
data_path = os.path.join(data_dir,_path)
if (not os.path.exists(data_path)):
print('Bank dataset does not exist in current folder --- Have to download it')
r = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip', allow_redirects=True)
# r = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank.zip', allow_redirects=True)
if r.status_code == requests.codes.ok:
print('Download successful')
else:
print('Could not download - please download')
sys.exit()
z = zipfile.ZipFile(io.BytesIO(r.content))
# z.extract('bank-additional/bank-additional-full.csv','./data')
open(data_path, 'wb').write(z.read('bank-additional/bank-additional-full.csv'))
# open(data_path, 'wb').write(z.read('bank.csv'))
df = | pandas.read_csv(data_path,sep=';') | pandas.read_csv |
# This script performs the statistical analysis for the pollution growth paper
# Importing required modules
import pandas as pd
import numpy as np
import statsmodels.api as stats
from ToTeX import restab
# Reading in the data
data = pd.read_csv('C:/Users/User/Documents/Data/Pollution/pollution_data_kp.csv')
# Prepping data for pollution regression
# Data sets for ndividual pollutants
co2_data = data[['ln_co2', 'ln_co2_lag', 'ln_sk', 'ln_n5', 'ln_co2_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_co2_intensity_lag']].dropna()
ch4_data = data[['ln_ch4', 'ln_ch4_lag3', 'ln_sk', 'ln_n5', 'ln_ch4_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_ch4_intensity_lag3']].dropna()
nox_data = data[['ln_nox', 'ln_nox_lag', 'ln_sk', 'ln_n5', 'ln_nox_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_nox_intensity_lag']].dropna()
ghg_data = data[['ln_ghg', 'ln_ghg_lag', 'ln_sk', 'ln_n5', 'ln_ghg_intensity_rate', 'Country', 'Year']].dropna()#, 'ln_ghg_intensity_lag']].dropna()
# Creating dummy variables for each pollutant
co2_national_dummies = pd.get_dummies(co2_data['Country'])
co2_year_dummies = pd.get_dummies(co2_data['Year'])
ch4_national_dummies = pd.get_dummies(ch4_data['Country'])
ch4_year_dummies = pd.get_dummies(ch4_data['Year'])
nox_national_dummies = pd.get_dummies(nox_data['Country'])
nox_year_dummies = pd.get_dummies(nox_data['Year'])
ghg_national_dummies = pd.get_dummies(ghg_data['Country'])
ghg_year_dummies = pd.get_dummies(ghg_data['Year'])
# Replacing Country and Year with fixed effects
co2_data = pd.concat([co2_data, co2_national_dummies, co2_year_dummies], axis = 1)
ch4_data = pd.concat([ch4_data, ch4_national_dummies, ch4_year_dummies], axis = 1)
nox_data = pd.concat([nox_data, nox_national_dummies, nox_year_dummies], axis = 1)
ghg_data = pd.concat([ghg_data, ghg_national_dummies, ghg_year_dummies], axis = 1)
co2_data = co2_data.drop(['Country', 'Year', 1993, 'United States'], axis = 1)
ch4_data = ch4_data.drop(['Country', 'Year', 1993, 'United States'], axis = 1)
nox_data = nox_data.drop(['Country', 'Year', 1993, 'United States'], axis = 1)
ghg_data = ghg_data.drop(['Country', 'Year', 1993, 'United States'], axis = 1)
# Create the Y and X matrices
CO2 = co2_data['ln_co2']
CH4 = ch4_data['ln_ch4']
NOX = nox_data['ln_nox']
GHG = ghg_data['ln_ghg']
X_CO2 = co2_data.drop(['ln_co2'], axis = 1)
X_CH4 = ch4_data.drop(['ln_ch4'], axis = 1)
X_NOX = nox_data.drop(['ln_nox'], axis = 1)
X_GHG = ghg_data.drop(['ln_ghg'], axis = 1)
# Running pollution regressions
co2_mod = stats.OLS(CO2, X_CO2)
ch4_mod = stats.OLS(CH4, X_CH4)
nox_mod = stats.OLS(NOX, X_NOX)
ghg_mod = stats.OLS(GHG, X_GHG)
models = [co2_mod, ch4_mod, nox_mod, ghg_mod]
names = ['CO2', 'CH4', 'NOx', 'GHG']
res_list = []
for mod in models:
res = mod.fit(cov_type = 'HC1')
res_list.append(res)
print(res.summary())
file = open('C:/Users/User/Documents/Data/Pollution/' + names[models.index(mod)] + '_kp.txt', 'w')
file.write(res.summary().as_text())
file.close()
restab(res_list, 'C:/Users/User/Documents/Data/Pollution/restab_kp.txt')
# Next we run gdp models to check coefficients
gdp_data = data[['ln_Income', 'ln_Income_lag', 'ln_sk', 'ln_n5', 'Country', 'Year']].dropna()
gdp_national_dummies = pd.get_dummies(gdp_data['Country'])
gdp_year_dummies = | pd.get_dummies(gdp_data['Year']) | pandas.get_dummies |
import inspect
import functools
import os
import warnings
warnings.filterwarnings('ignore', message='numpy.dtype size changed')
warnings.filterwarnings('ignore', message='regionprops and image moments')
warnings.filterwarnings('ignore', message='non-tuple sequence for multi')
warnings.filterwarnings('ignore', message='precision loss when converting')
import numpy as np
import pandas as pd
import skimage.io
import skimage.morphology
import ops.annotate
import ops.features
import ops.process
import ops.io
import ops.in_situ
import ops.utils
from .process import Align
class Snake():
"""Container class for methods that act directly on data (names start with
underscore) and methods that act on arguments from snakemake (e.g., filenames
provided instead of image and table data). The snakemake methods (no underscore)
are automatically loaded by `Snake.load_methods`.
"""
# ALIGNMENT AND SEGMENTATION
@staticmethod
def _align_SBS(data, method='DAPI', upsample_factor=2, window=2, cutoff=1,
align_channels=slice(1, None), keep_trailing=False):
"""Rigid alignment of sequencing cycles and channels.
Parameters
----------
data : numpy array
Image data, expected dimensions of (CYCLE, CHANNEL, I, J).
method : {'DAPI','SBS_mean'}, default 'DAPI'
Method for aligning 'data' across cycles. 'DAPI' uses cross-correlation between subsequent cycles
of DAPI images, assumes sequencing channels are aligned to DAPI images. 'SBS_mean' uses the
mean background signal from the SBS channels to determine image offsets between cycles of imaging,
again using cross-correlation.
upsample_factor : int, default 2
Subpixel alignment is done if `upsample_factor` is greater than one (can be slow).
Parameter passed to skimage.feature.register_translation.
window : int, default 2
A centered subset of data is used if `window` is greater than one. The size of the removed border is
int((x/2.) * (1 - 1/float(window))).
cutoff : float, default 1
Threshold for removing extreme values from SBS channels when using method='SBS_mean'. Channels are normalized
to the 70th percentile, and normalized values greater than `cutoff` are replaced by `cutoff`.
align_channels : slice object or None, default slice(1,None)
If not None, aligns channels (defined by the passed slice object) to each other within each cycle. If
None, does not align channels within cycles. Useful in particular for cases where images for all stage
positions are acquired for one SBS channel at a time, i.e., acquisition order of channels(positions).
keep_trailing : boolean, default True
If True, keeps only the minimum number of trailing channels across cycles. E.g., if one cycle contains 6 channels,
but all others have 5, only uses trailing 5 channels for alignment.
n : int, default 1
The first SBS channel in `data`.
Returns
-------
aligned : numpy array
Aligned image data, same dimensions as `data` unless `data` contained different numbers of channels between cycles
and keep_trailing=True.
"""
data = np.array(data)
if keep_trailing:
valid_channels = min([len(x) for x in data])
data = np.array([x[-valid_channels:] for x in data])
assert data.ndim == 4, 'Input data must have dimensions CYCLE, CHANNEL, I, J'
# align SBS channels for each cycle
aligned = data.copy()
if align_channels is not None:
align_it = lambda x: Align.align_within_cycle(
x, window=window, upsample_factor=upsample_factor)
aligned[:, align_channels] = np.array(
[align_it(x) for x in aligned[:, align_channels]])
if method == 'DAPI':
# align cycles using the DAPI channel
aligned = Align.align_between_cycles(aligned, channel_index=0,
window=window, upsample_factor=upsample_factor)
elif method == 'SBS_mean':
# calculate cycle offsets using the average of SBS channels
target = Align.apply_window(aligned[:, n:], window=window).max(axis=1)
normed = Align.normalize_by_percentile(target)
normed[normed > cutoff] = cutoff
offsets = Align.calculate_offsets(normed, upsample_factor=upsample_factor)
# apply cycle offsets to each channel
for channel in range(aligned.shape[1]):
aligned[:, channel] = Align.apply_offsets(aligned[:, channel], offsets)
return aligned
@staticmethod
def _align_by_DAPI(data_1, data_2, channel_index=0, upsample_factor=2,
autoscale=True):
"""Align the second image to the first, using the channel at position
`channel_index`. The first channel is usually DAPI.
Parameters
----------
data_1 : numpy array
Image data to align to, expected dimensions of (CHANNEL, I, J).
data_2 : numpy array
Image data to align, expected dimensions of (CHANNEL, I, J).
channel_index : int, default 0
DAPI channel index
upsample_factor : int, default 2
Subpixel alignment is done if `upsample_factor` is greater than one (can be slow).
Parameter passed to skimage.feature.register_translation.
autoscale : bool, default True
Automatically scale `data_2` prior to alignment. Offsets are applied to
the unscaled image so no resolution is lost.
Returns
-------
aligned : numpy array
`data_2` with calculated offsets applied to all channels.
"""
images = [data_1[channel_index], data_2[channel_index]]
if autoscale:
images[1] = ops.utils.match_size(images[1], images[0])
_, offset = ops.process.Align.calculate_offsets(images, upsample_factor=upsample_factor)
if autoscale:
offset *= data_2.shape[-1] / data_1.shape[-1]
offsets = [offset] * len(data_2)
aligned = ops.process.Align.apply_offsets(data_2, offsets)
return aligned
@staticmethod
def _segment_nuclei(data, threshold, area_min, area_max, smooth=1.35, radius=15):
"""Find nuclei from DAPI. Uses local mean filtering to find cell foreground from aligned
but unfiltered data, then filters identified regions by mean intensity threshold and area ranges.
Parameters
----------
data : numpy array
Image data, expected dimensions of (CHANNEL, I, J) with the DAPI channel in channel index 0.
Can also be a single-channel DAPI image of dimensions (I,J).
threshold : float
Foreground regions with mean DAPI intensity greater than `threshold` are labeled
as nuclei.
area_min, area_max : floats
After individual nuclei are segmented from foreground using watershed algorithm, nuclei with
`area_min` < area < `area_max` are retained.
smooth : float, default 1.35
Size of gaussian kernel used to smooth the distance map to foreground prior to watershedding.
radius : float, default 15
Radius of disk used in local mean thresholding to identify foreground.
Returns
-------
nuclei : numpy array, dtype uint16
Labeled segmentation mask of nuclei, dimensions are same as trailing two dimensions of `data`.
"""
if isinstance(data, list):
dapi = data[0]
elif data.ndim == 3:
dapi = data[0]
else:
dapi = data
kwargs = dict(threshold=lambda x: threshold,
area_min=area_min, area_max=area_max,
smooth=smooth, radius=radius)
# skimage precision warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nuclei = ops.process.find_nuclei(dapi, **kwargs)
return nuclei.astype(np.uint16)
@staticmethod
def _segment_nuclei_stack(dapi, threshold, area_min, area_max, smooth=1.35, radius=15):
"""Find nuclei from a nuclear stain (e.g., DAPI). Expects data to have shape (I, J)
(segments one image) or (N, I, J) (segments a series of DAPI images).
"""
kwargs = dict(threshold=lambda x: threshold,
area_min=area_min, area_max=area_max,
smooth=smooth, radius=radius)
find_nuclei = ops.utils.applyIJ(ops.process.find_nuclei)
# skimage precision warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nuclei = find_nuclei(dapi, **kwargs)
return nuclei.astype(np.uint16)
@staticmethod
def _segment_cells(data, nuclei, threshold):
"""Segment cells from aligned data. Matches cell labels to nuclei labels.
Note that labels can be skipped, for example if cells are touching the
image boundary.
Parameters
----------
data : numpy array
Image data to use for cell boundary segmentation, expected dimensions of (CYCLE, CHANNEL, I, J),
(CHANNEL, I, J), or (I,J). Takes minimum intensity over cycles, followed by mean intensity over
channels if both are present. If channels are present, but not cycles, takes median over channels.
nuclei : numpy array, dtype uint16
Labeled segmentation mask of nuclei, dimensions are same as trailing two dimensions of `data`. Uses
nuclei as seeds for watershedding and matches cell labels to nuclei labels.
threshold : float
Threshold used on `data` after reduction to 2 dimensions to identify cell boundaries.
Returns
-------
cells : numpy array, dtype uint16
Labeled segmentation mask of cell boundaries, dimensions are same as trailing dimensions of `data`.
Labels match `nuclei` labels.
"""
if data.ndim == 4:
# no DAPI, min over cycles, mean over channels
mask = data[:, 1:].min(axis=0).mean(axis=0)
elif data.ndim == 3:
mask = np.median(data[1:], axis=0)
elif data.ndim == 2:
mask = data
else:
raise ValueError
mask = mask > threshold
try:
# skimage precision warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cells = ops.process.find_cells(nuclei, mask)
except ValueError:
print('segment_cells error -- no cells')
cells = nuclei
return cells
@staticmethod
def _segment_cell_2019(data, nuclei_threshold, nuclei_area_min,
nuclei_area_max, cell_threshold):
"""Combine morphological segmentation of nuclei and cells to have the same
interface as _segment_cellpose.
"""
nuclei = Snake._segment_nuclei(data[0], nuclei_threshold, nuclei_area_min, nuclei_area_max)
cells = Snake._segment_cells(data, nuclei, cell_threshold)
return nuclei, cells
@staticmethod
def _segment_cellpose(data, dapi_index, cyto_index, diameter):
from ops.cellpose import segment_cellpose, segment_cellpose_rgb
# return segment_cellpose(data[dapi_index], data[cyto_index],
# nuclei_diameter=diameter, cell_diameter=diameter)
rgb = Snake._prepare_cellpose(data, dapi_index, cyto_index)
nuclei, cells = segment_cellpose_rgb(rgb, diameter)
return nuclei, cells
@staticmethod
def _prepare_cellpose(data, dapi_index, cyto_index, logscale=True):
"""Export three-channel RGB image for use with cellpose GUI (e.g., to select
cell diameter). Nuclei are exported to blue (cellpose channel=3), cytoplasm to
green (cellpose channel=2).
Unfortunately the cellpose GUI sometimes has issues loading tif files, so this
exports to PNG, which has limited dynamic range. Cellpose performs internal
scaling based on 10th and 90th percentiles of the input.
"""
from ops.cellpose import image_log_scale
from skimage import img_as_ubyte
dapi = data[dapi_index]
cyto = data[cyto_index]
blank = np.zeros_like(dapi)
if logscale:
cyto = image_log_scale(cyto)
cyto /= cyto.max() # for ubyte conversion
dapi_upper = np.percentile(dapi, 99.5)
dapi = dapi / dapi_upper
dapi[dapi > 1] = 1
red, green, blue = img_as_ubyte(blank), img_as_ubyte(cyto), img_as_ubyte(dapi)
return np.array([red, green, blue]).transpose([1, 2, 0])
# IN SITU
@staticmethod
def _transform_log(data, sigma=1, skip_index=None):
"""Apply Laplacian-of-Gaussian filter from scipy.ndimage.
Parameters
----------
data : numpy array
Aligned SBS image data, expected dimensions of (CYCLE, CHANNEL, I, J).
sigma : float, default 1
size of gaussian kernel used in Laplacian-of-Gaussian filter
skip_index : None or int, default None
If an int, skips transforming a channel (e.g., DAPI with `skip_index=0`).
Returns
-------
loged : numpy array
LoG-ed `data`
"""
data = np.array(data)
loged = ops.process.log_ndi(data, sigma=sigma)
if skip_index is not None:
loged[..., skip_index, :, :] = data[..., skip_index, :, :]
return loged
@staticmethod
def _compute_std(data, remove_index=None):
"""Use standard deviation over cycles, followed by mean across channels
to estimate sequencing read locations. If only 1 cycle is present, takes
standard deviation across channels.
Parameters
----------
data : numpy array
LoG-ed SBS image data, expected dimensions of (CYCLE, CHANNEL, I, J).
remove_index : None or int, default None
Index of `data` to remove from subsequent analysis, generally any non-SBS channels (e.g., DAPI)
Returns
-------
consensus : numpy array
Standard deviation score for each pixel, dimensions of (I,J).
"""
if remove_index is not None:
data = remove_channels(data, remove_index)
# for 1-cycle experiments
if len(data.shape)==3:
data = data[:,None,...]
# leading_dims = tuple(range(0, data.ndim - 2))
# consensus = np.std(data, axis=leading_dims)
consensus = np.std(data, axis=0).mean(axis=0)
return consensus
@staticmethod
def _find_peaks(data, width=5, remove_index=None):
"""Find local maxima and label by difference to next-highest neighboring
pixel. Conventionally this is used to estimate SBS read locations by inputting
the standard deviation score as returned by Snake.compute_std().
Parameters
----------
data : numpy array
2D image data
width : int, default 5
Neighborhood size for finding local maxima.
remove_index : None or int, default None
Index of `data` to remove from subsequent analysis, generally any non-SBS channels (e.g., DAPI)
Returns
-------
peaks : numpy array
Local maxima scores, dimensions same as `data`. At a maximum, the value is max - min in the defined
neighborhood, elsewhere zero.
"""
if remove_index is not None:
data = remove_channels(data, remove_index)
if data.ndim == 2:
data = [data]
peaks = [ops.process.find_peaks(x, n=width)
if x.max() > 0 else x
for x in data]
peaks = np.array(peaks).squeeze()
return peaks
@staticmethod
def _max_filter(data, width, remove_index=None):
"""Apply a maximum filter in a window of `width`. Conventionally operates on Laplacian-of-Gaussian
filtered SBS data, dilating sequencing channels to compensate for single-pixel alignment error.
Parameters
----------
data : numpy array
Image data, expected dimensions of (..., I, J) with up to 4 total dimenions.
width : int
Neighborhood size for max filtering
remove_index : None or int, default None
Index of `data` to remove from subsequent analysis, generally any non-SBS channels (e.g., DAPI)
Returns
-------
maxed : numpy array
Maxed `data` with preserved dimensions.
"""
import scipy.ndimage.filters
if data.ndim == 2:
data = data[None, None]
if data.ndim == 3:
data = data[None]
if remove_index is not None:
data = remove_channels(data, remove_index)
maxed = scipy.ndimage.filters.maximum_filter(data, size=(1, 1, width, width))
return maxed
@staticmethod
def _extract_bases(maxed, peaks, cells, threshold_peaks, wildcards, bases='GTAC'):
"""Find the signal intensity from `maxed` at each point in `peaks` above
`threshold_peaks`. Output is labeled by `wildcards` (e.g., well and tile) and
label at that position in integer mask `cells`.
Parameters
----------
maxed : numpy array
Base intensity at each point, output of Snake.max_filter(), expected dimenions
of (CYCLE, CHANNEL, I, J).
peaks : numpy array
Peaks/local maxima score for each pixel, output of Snake.find_peaks().
cells : numpy array, dtype uint16
Labeled segmentation mask of cell boundaries for labeling reads.
threshold_reads : float
Threshold for `peaks` for identifying candidate sequencing reads.
wildcards : dict
Metadata to include in output table, e.g., well, tile, etc. In Snakemake, use wildcards
object.
bases : string, default 'GTAC'
Order of bases corresponding to the order of acquired SBS channels in `maxed`.
Returns
-------
df_bases : pandas DataFrame
Table of all candidate sequencing reads with intensity of each base for every cycle,
(I,J) position of read, and metadata from `wildcards`.
"""
if maxed.ndim == 3:
maxed = maxed[None]
if len(bases) != maxed.shape[1]:
error = 'Sequencing {0} bases {1} but maxed data had shape {2}'
raise ValueError(error.format(len(bases), bases, maxed.shape))
# "cycle 0" is reserved for phenotyping
cycles = list(range(1, maxed.shape[0] + 1))
bases = list(bases)
values, labels, positions = (
ops.in_situ.extract_base_intensity(maxed, peaks, cells, threshold_peaks))
df_bases = ops.in_situ.format_bases(values, labels, positions, cycles, bases)
for k,v in sorted(wildcards.items()):
df_bases[k] = v
return df_bases
@staticmethod
def _call_reads(df_bases, peaks=None, correction_only_in_cells=True):
"""Call reads by compensating for channel cross-talk and calling the base
with highest corrected intensity for each cycle. This "median correction"
is performed independently for each tile.
Parameters
----------
df_bases : pandas DataFrame
Table of base intensity for all candidate reads, output of Snake.extract_bases()
peaks : None or numpy array, default None
Peaks/local maxima score for each pixel (output of Snake.find_peaks()) to be included
in the df_reads table for downstream QC or other analysis. If None, does not include
peaks scores in returned df_reads table.
correction_only_in_cells : boolean, default True
If true, restricts median correction/compensation step to account only for reads that
are within a cell, as defined by the cell segmentation mask passed into
Snake.extract_bases(). Often identified spots outside of cells are not true sequencing
reads.
Returns
-------
df_reads : pandas DataFrame
Table of all reads with base calls resulting from SBS compensation and related metadata.
"""
if df_bases is None:
return
if correction_only_in_cells:
if len(df_bases.query('cell > 0')) == 0:
return
cycles = len(set(df_bases['cycle']))
channels = len(set(df_bases['channel']))
df_reads = (df_bases
.pipe(ops.in_situ.clean_up_bases)
.pipe(ops.in_situ.do_median_call, cycles, channels=channels,
correction_only_in_cells=correction_only_in_cells)
)
if peaks is not None:
i, j = df_reads[['i', 'j']].values.T
df_reads['peak'] = peaks[i, j]
return df_reads
@staticmethod
def _call_cells(df_reads, df_pool=None, q_min=0):
"""Call the most-common barcode reads for each cell. If df_pool is supplied,
prioritizes reads mapping to expected sequences.
Parameters
----------
df_reads : pandas DataFrame
Table of all called reads, output of Snake.call_reads()
df_pool : None or pandas DataFrame, default None
Table of designed barcode sequences for mapping reads to expected barcodes. Expected
columns are 'sgRNA', 'gene_symbol', and 'gene_id'.
q_min : float in the range [0,1)
Minimum quality score for read inclusion in the cell calling process.
Returns
-------
df_cells : pandas DataFrame
Table of all cells containing sequencing reads, listing top two most common barcode
sequences. If df_pool is supplied, prioritizes reads mapping to expected sequences.
"""
if df_reads is None:
return
if df_pool is None:
return (df_reads
.query('Q_min >= @q_min')
.pipe(ops.in_situ.call_cells))
else:
prefix_length = len(df_reads.iloc[0].barcode) # get the number of completed SBS cycles
df_pool[PREFIX] = df_pool.apply(lambda x: x.sgRNA[:prefix_length],axis=1)
return (df_reads
.query('Q_min >= @q_min')
.pipe(ops.in_situ.call_cells_mapping,df_pool))
# PHENOTYPE FEATURE EXTRACTION
@staticmethod
def _annotate_SBS(log, df_reads):
# convert reads to a stack of integer-encoded bases
cycles, channels, height, width = log.shape
base_labels = ops.annotate.annotate_bases(df_reads, width=3, shape=(height, width))
annotated = np.zeros((cycles, channels + 1, height, width),
dtype=np.uint16)
annotated[:, :channels] = log
annotated[:, channels] = base_labels
return annotated
@staticmethod
def _annotate_segment(data, nuclei, cells):
"""Show outlines of nuclei and cells on sequencing data.
"""
from ops.annotate import outline_mask
if data.ndim == 3:
data = data[None]
cycles, channels, height, width = data.shape
annotated = np.zeros((cycles, channels + 1, height, width),
dtype=np.uint16)
mask = ( (outline_mask(nuclei, direction='inner') > 0)
+ (outline_mask(cells, direction='inner') > 0))
annotated[:, :channels] = data
annotated[:, channels] = mask
return np.squeeze(annotated)
@staticmethod
def _annotate_SBS_extra(log, peaks, df_reads, barcode_table, sbs_cycles,
shape=(1024, 1024)):
barcode_to_prefix = lambda x: ''.join(x[c - 1] for c in sbs_cycles)
barcodes = [barcode_to_prefix(x) for x in barcode_table['barcode']]
df_reads['mapped'] = df_reads['barcode'].isin(barcodes)
# convert reads to a stack of integer-encoded bases
plus = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
xcross = [[1, 0, 1],
[0, 1, 0],
[1, 0, 1]]
notch = [[1, 1, 1],
[1, 1, 1],
[1, 1, 0]]
notch2 = [[1, 1, 1],
[1, 1, 1],
[0, 1, 0]]
top_right = [[0, 0, 0],
[0, 0, 0],
[1, 0, 0]]
f = ops.annotate.annotate_bases
base_labels = f(df_reads.query('mapped'), selem=notch)
base_labels += f(df_reads.query('~mapped'), selem=plus)
# Q_min converted to 30 point integer scale
Q_min = ops.annotate.annotate_points(df_reads, 'Q_min', selem=top_right)
Q_30 = (Q_min * 30).astype(int)
# a "donut" around each peak indicating the peak intensity
peaks_donut = skimage.morphology.dilation(peaks, selem=np.ones((3, 3)))
peaks_donut[peaks > 0] = 0
# nibble some more
peaks_donut[base_labels.sum(axis=0) > 0] = 0
peaks_donut[Q_30 > 0] = 0
cycles, channels, height, width = log.shape
annotated = np.zeros((cycles,
channels + 2,
# channels + 3,
height, width), dtype=np.uint16)
annotated[:, :channels] = log
annotated[:, channels] = base_labels
annotated[:, channels + 1] = peaks_donut
# annotated[:, channels + 2] = Q_30
return annotated[:, 1:]
@staticmethod
def _extract_features(data, labels, wildcards, features=None):
"""Extracts features in dictionary and combines with generic region
features.
Parameters
----------
data : numpy array
Image data of expected dimensions (CHANNEL, I, J)
labels : numpy array
Labeled segmentation mask defining objects to extract features from, dimensions mathcing
trailing (I,J) dimensions of `data`.
wildcards : dict
Metadata to include in output table, e.g., well, tile, etc. In Snakemake, use wildcards
object.
features : None or dict of 'key':function, default None
Features to extract from `data` within `labels` and their definining function calls on an
skimage regionprops object. E.g., features={'max_intensity':lambda r: r.intensity_image[r.image].max()}.
Many pre-defined feature functions and dictionaries are available in the features.py module.
Returns
-------
df : pandas DataFrame
Table of all labeled regions in `labels` and their corresponding `features` measurements from
`data`.
"""
from ops.process import feature_table
from ops.features import features_basic
features = features.copy() if features else dict()
features.update(features_basic)
df = feature_table(data, labels, features)
for k,v in sorted(wildcards.items()):
df[k] = v
return df
@staticmethod
def _extract_named_features(data, labels, feature_names, wildcards):
"""Extracts features in dictionary and combines with generic region
features.
"""
features = ops.features.make_feature_dict(feature_names)
return Snake._extract_features(data, labels, wildcards, features)
@staticmethod
def _extract_named_cell_nucleus_features(
data, cells, nuclei, cell_features, nucleus_features, wildcards,
autoscale=True, join='inner'):
"""Extract named features for cell and nucleus labels and join the results.
:param autoscale: scale the cell and nuclei mask dimensions to the data
"""
if autoscale:
cells = ops.utils.match_size(cells, data[0])
nuclei = ops.utils.match_size(nuclei, data[0])
assert 'label' in cell_features and 'label' in nucleus_features
df_phenotype = pd.concat([
Snake._extract_named_features(data, cells, cell_features, {})
.set_index('label').rename(columns=lambda x: x + '_cell'),
Snake._extract_named_features(data, nuclei, nucleus_features, {})
.set_index('label').rename(columns=lambda x: x + '_nucleus'),
], join=join, axis=1).reset_index().rename(columns={'label': 'cell'})
for k,v in sorted(wildcards.items()):
df_phenotype[k] = v
return df_phenotype
@staticmethod
def _extract_phenotype_FR(data_phenotype, nuclei, wildcards):
"""Features for frameshift reporter phenotyped in DAPI, HA channels.
"""
from ops.features import features_frameshift
return (Snake._extract_features(data_phenotype, nuclei, wildcards, features_frameshift)
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_FR_myc(data_phenotype, nuclei, wildcards):
"""Features for frameshift reporter phenotyped in DAPI, HA, myc channels.
"""
from ops.features import features_frameshift_myc
return (Snake._extract_features(data_phenotype, nuclei, wildcards, features_frameshift_myc)
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_translocation(data_phenotype, nuclei, cells, wildcards):
if (nuclei.max() == 0) or (cells.max() == 0):
return
import ops.features
features_n = ops.features.features_translocation_nuclear
features_c = ops.features.features_translocation_cell
features_n = {k + '_nuclear': v for k,v in features_n.items()}
features_c = {k + '_cell': v for k,v in features_c.items()}
df_n = (Snake._extract_features(data_phenotype, nuclei, wildcards, features_n)
.rename(columns={'area': 'area_nuclear'}))
df_c = (Snake._extract_features(data_phenotype, cells, wildcards, features_c)
.drop(['i', 'j'], axis=1).rename(columns={'area': 'area_cell'}))
# inner join discards nuclei without corresponding cells
df = (pd.concat([df_n.set_index('label'), df_c.set_index('label')], axis=1, join='inner')
.reset_index())
return (df
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_translocation_live(data, nuclei, wildcards):
def _extract_phenotype_translocation_simple(data, nuclei, wildcards):
import ops.features
features = ops.features.features_translocation_nuclear_simple
return (Snake._extract_features(data, nuclei, wildcards, features)
.rename(columns={'label': 'cell'}))
extract = _extract_phenotype_translocation_simple
arr = []
for i, (frame, nuclei_frame) in enumerate(zip(data, nuclei)):
arr += [extract(frame, nuclei_frame, wildcards).assign(frame=i)]
return pd.concat(arr)
@staticmethod
def _extract_phenotype_translocation_ring(data_phenotype, nuclei, wildcards, width=3):
selem = np.ones((width, width))
perimeter = skimage.morphology.dilation(nuclei, selem)
perimeter[nuclei > 0] = 0
inside = skimage.morphology.erosion(nuclei, selem)
inner_ring = nuclei.copy()
inner_ring[inside > 0] = 0
return (Snake._extract_phenotype_translocation(data_phenotype, inner_ring, perimeter, wildcards)
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_minimal(data_phenotype, nuclei, wildcards):
return (Snake._extract_features(data_phenotype, nuclei, wildcards, dict())
.rename(columns={'label': 'cell'}))
@staticmethod
def _extract_phenotype_geom(labels, wildcards):
from ops.features import features_geom
return Snake._extract_features(labels, labels, wildcards, features_geom)
@staticmethod
def _analyze_single(data, alignment_ref, cells, peaks,
threshold_peaks, wildcards, channel_ix=1):
if alignment_ref.ndim == 3:
alignment_ref = alignment_ref[0]
data = np.array([[alignment_ref, alignment_ref],
data[[0, channel_ix]]])
aligned = ops.process.Align.align_between_cycles(data, 0, window=2)
loged = Snake._transform_log(aligned[1, 1])
maxed = Snake._max_filter(loged, width=3)
return (Snake._extract_bases(maxed, peaks, cells, bases=['-'],
threshold_peaks=threshold_peaks, wildcards=wildcards))
@staticmethod
def _track_live_nuclei(nuclei, tolerance_per_frame=5):
# if there are no nuclei, we will have problems
count = nuclei.max(axis=(-2, -1))
if (count == 0).any():
error = 'no nuclei detected in frames: {}'
print(error.format(np.where(count == 0)))
return np.zeros_like(nuclei)
import ops.timelapse
# nuclei coordinates
arr = []
for i, nuclei_frame in enumerate(nuclei):
extract = Snake._extract_phenotype_minimal
arr += [extract(nuclei_frame, nuclei_frame, {'frame': i})]
df_nuclei = pd.concat(arr)
# track nuclei
motion_threshold = len(nuclei) * tolerance_per_frame
G = (df_nuclei
.rename(columns={'cell': 'label'})
.pipe(ops.timelapse.initialize_graph)
)
cost, path = ops.timelapse.analyze_graph(G)
relabel = ops.timelapse.filter_paths(cost, path,
threshold=motion_threshold)
nuclei_tracked = ops.timelapse.relabel_nuclei(nuclei, relabel)
return nuclei_tracked
# SNAKEMAKE
@staticmethod
def _merge_sbs_phenotype(sbs_tables, phenotype_tables, barcode_table, sbs_cycles,
join='outer'):
"""Combine sequencing and phenotype tables with one row per cell, using key
(well, tile, cell). The cell column labels must be the same in both tables (e.g., both
tables generated from the same cell or nuclei segmentation). The default method of joining
(outer) preserves cells present in only the sequencing table or phenotype table (with null
values for missing data).
The barcode table is then joined using its `barcode` column to the most abundant
(`cell_barcode_0`) and second-most abundant (`cell_barcode_1`) barcodes for each cell.
The substring (prefix) of `barcode` used for joining is determined by the `sbs_cycles`
index. Duplicate prefixes are marked in the `duplicate_prefix_0` and `duplicate_prefix_1`
columns (e.g., if insufficient sequencing is available to disambiguate two barcodes).
"""
if isinstance(sbs_tables, pd.DataFrame):
sbs_tables = [sbs_tables]
if isinstance(phenotype_tables, pd.DataFrame):
phenotype_tables = [phenotype_tables]
cols = ['well', 'tile', 'cell']
df_sbs = pd.concat(sbs_tables).set_index(cols)
df_phenotype = pd.concat(phenotype_tables).set_index(cols)
df_combined = pd.concat([df_sbs, df_phenotype], join=join, axis=1).reset_index()
barcode_to_prefix = lambda x: ''.join(x[c - 1] for c in sbs_cycles)
df_barcodes = (barcode_table
.assign(prefix=lambda x: x['barcode'].apply(barcode_to_prefix))
.assign(duplicate_prefix=lambda x: x['prefix'].duplicated(keep=False))
)
if 'barcode' in df_barcodes and 'sgRNA' in df_barcodes:
df_barcodes = df_barcodes.drop('barcode', axis=1)
barcode_info = df_barcodes.set_index('prefix')
return (df_combined
.join(barcode_info, on='cell_barcode_0')
.join(barcode_info.rename(columns=lambda x: x + '_1'),
on='cell_barcode_1')
)
@staticmethod
def _summarize_paramsearch_segmentation(data,segmentations):
summary = np.stack([data[0],np.median(data[1:], axis=0)]+segmentations)
return summary
@staticmethod
def _summarize_paramsearch_reads(barcode_table,reads_tables,cells,sbs_cycles,figure_output):
import matplotlib
import seaborn as sns
matplotlib.use('Agg')
barcode_to_prefix = lambda x: ''.join(x[c - 1] for c in sbs_cycles)
barcodes = (barcode_table.assign(prefix=lambda x:
x['barcode'].apply(barcode_to_prefix))
['prefix']
.pipe(set)
)
n_cells = [(len(np.unique(labels))-1) for labels in cells]
df_reads = pd.concat(reads_tables)
df_reads = pd.concat([df.assign(total_cells=cell_count)
for cell_count,(_,df) in zip(n_cells,df_reads.groupby(['well','tile'],sort=False))]
)
df_reads['mapped'] = df_reads['barcode'].isin(barcodes)
def summarize(df):
return pd.Series({'mapped_reads':df['mapped'].value_counts()[True],
'mapped_reads_within_cells':df.query('cell!=0')['mapped'].value_counts()[True],
'mapping_rate':df['mapped'].value_counts(normalize=True)[True],
'mapping_rate_within_cells':df.query('cell!=0')['mapped'].value_counts(normalize=True)[True],
'average_reads_per_cell':df.query('cell!=0').pipe(len)/df.iloc[0]['total_cells'],
'average_mapped_reads_per_cell':df.query('(cell!=0)&(mapped)').pipe(len)/df.iloc[0]['total_cells'],
'cells_with_reads':df.query('(cell!=0)')['cell'].nunique(),
'cells_with_mapped_reads':df.query('(cell!=0)&(mapped)')['cell'].nunique()})
df_summary = df_reads.groupby(['well','tile','THRESHOLD_READS']).apply(summarize).reset_index()
# plot
fig, axes = matplotlib.pyplot.subplots(2,1,figsize=(7,10),sharex=True)
axes_right = [ax.twinx() for ax in axes]
sns.lineplot(data=df_summary,x='THRESHOLD_READS',y='mapping_rate',color='steelblue',ax=axes[0])
sns.lineplot(data=df_summary,x='THRESHOLD_READS',y='mapped_reads',color='coral',ax=axes_right[0])
sns.lineplot(data=df_summary,x='THRESHOLD_READS',y='mapping_rate_within_cells',color='steelblue',ax=axes[0])
sns.lineplot(data=df_summary,x='THRESHOLD_READS',y='mapped_reads_within_cells',color='coral',ax=axes_right[0])
axes[0].set_ylabel('Mapping rate',fontsize=16)
axes_right[0].set_ylabel('Number of\nmapped reads',fontsize=16)
axes[0].set_title('Read mapping',fontsize=18)
sns.lineplot(data=df_summary,x='THRESHOLD_READS',y='average_reads_per_cell',color='steelblue',ax=axes[1])
sns.lineplot(data=df_summary,x='THRESHOLD_READS',y='average_mapped_reads_per_cell',color='steelblue',ax=axes[1])
sns.lineplot(data=df_summary,x='THRESHOLD_READS',y='cells_with_reads',color='coral',ax=axes_right[1])
sns.lineplot(data=df_summary,x='THRESHOLD_READS',y='cells_with_mapped_reads',color='coral',ax=axes_right[1])
axes[1].set_ylabel('Mean reads per cell',fontsize=16)
axes_right[1].set_ylabel('Number of cells',fontsize=16)
axes[1].set_title('Read mapping per cell',fontsize=18)
[ax.get_lines()[1].set_linestyle('--') for ax in list(axes)+list(axes_right)]
axes[0].legend(handles=axes[0].get_lines()+axes_right[0].get_lines(),
labels=['mapping rate,\nall reads','mapping rate,\nwithin cells','all mapped reads','mapped reads\nwithin cells'],loc=7)
axes[1].legend(handles=axes[1].get_lines()+axes_right[1].get_lines(),
labels=['mean\nreads per cell','mean mapped\nreads per cell','cells with reads','cells with\nmapped reads'],loc=1)
axes[1].set_xlabel('THRESHOLD_READS',fontsize=16)
axes[1].set_xticks(df_summary['THRESHOLD_READS'].unique()[::2])
[ax.tick_params(axis='y',colors='steelblue') for ax in axes]
[ax.tick_params(axis='y',colors='coral') for ax in axes_right]
matplotlib.pyplot.savefig(figure_output,dpi=300,bbox_inches='tight')
return df_summary
@staticmethod
def add_method(class_, name, f):
f = staticmethod(f)
exec('%s.%s = f' % (class_, name))
@staticmethod
def load_methods():
methods = inspect.getmembers(Snake)
for name, f in methods:
if name not in ('__doc__', '__module__') and name.startswith('_'):
Snake.add_method('Snake', name[1:], Snake.call_from_snakemake(f))
@staticmethod
def call_from_snakemake(f):
"""Turn a function that acts on a mix of image data, table data and other
arguments and may return image or table data into a function that acts on
filenames for image and table data, plus other arguments.
If output filename is provided, saves return value of function.
Supported input and output filetypes are .pkl, .csv, and .tif.
"""
def g(**kwargs):
# split keyword arguments into input (needed for function)
# and output (needed to save result)
input_kwargs, output_kwargs = restrict_kwargs(kwargs, f)
# load arguments provided as filenames
input_kwargs = {k: load_arg(v) for k,v in input_kwargs.items()}
results = f(**input_kwargs)
if 'output' in output_kwargs:
outputs = output_kwargs['output']
if len(outputs) == 1:
results = [results]
if len(outputs) != len(results):
error = '{0} output filenames provided for {1} results'
raise ValueError(error.format(len(outputs), len(results)))
for output, result in zip(outputs, results):
save_output(output, result, **output_kwargs)
return functools.update_wrapper(g, f)
Snake.load_methods()
def remove_channels(data, remove_index):
"""Remove channel or list of channels from array of shape (..., CHANNELS, I, J).
"""
channels_mask = np.ones(data.shape[-3], dtype=bool)
channels_mask[remove_index] = False
data = data[..., channels_mask, :, :]
return data
# IO
def load_arg(x):
"""Try loading data from `x` if it is a filename or list of filenames.
Otherwise just return `x`.
"""
one_file = load_file
many_files = lambda x: [load_file(f) for f in x]
for f in one_file, many_files:
try:
return f(x)
except (pd.errors.EmptyDataError, TypeError, IOError) as e:
if isinstance(e, (TypeError, IOError)):
# wasn't a file, probably a string arg
pass
elif isinstance(e, pd.errors.EmptyDataError):
# failed to load file
return None
pass
else:
return x
def save_output(filename, data, **kwargs):
"""Saves `data` to `filename`. Guesses the save function based on the
file extension. Saving as .tif passes on kwargs (luts, ...) from input.
"""
filename = str(filename)
if data is None:
# need to save dummy output to satisfy Snakemake
with open(filename, 'w') as fh:
pass
return
if filename.endswith('.tif'):
return save_tif(filename, data, **kwargs)
elif filename.endswith('.pkl'):
return save_pkl(filename, data)
elif filename.endswith('.csv'):
return save_csv(filename, data)
elif filename.endswith('.png'):
return save_png(filename, data)
else:
raise ValueError('not a recognized filetype: ' + f)
def load_csv(filename):
df = pd.read_csv(filename)
if len(df) == 0:
return None
return df
def load_pkl(filename):
df = pd.read_pickle(filename)
if len(df) == 0:
return None
def load_tif(filename):
return ops.io.read_stack(filename)
def save_csv(filename, df):
df.to_csv(filename, index=None)
def save_pkl(filename, df):
df.to_pickle(filename)
def save_tif(filename, data_, **kwargs):
kwargs, _ = restrict_kwargs(kwargs, ops.io.save_stack)
# `data` can be an argument name for both the Snake method and `save_stack`
# overwrite with `data_`
kwargs['data'] = data_
ops.io.save_stack(filename, **kwargs)
def save_png(filename, data_):
skimage.io.imsave(filename, data_)
def restrict_kwargs(kwargs, f):
"""Partition `kwargs` into two dictionaries based on overlap with default
arguments of function `f`.
"""
f_kwargs = set(get_kwarg_defaults(f).keys()) | set(get_arg_names(f))
keep, discard = {}, {}
for key in kwargs.keys():
if key in f_kwargs:
keep[key] = kwargs[key]
else:
discard[key] = kwargs[key]
return keep, discard
def load_file(filename):
"""Attempt to load file, raising an error if the file is not found or
the file extension is not recognized.
"""
if not isinstance(filename, str):
raise TypeError
if not os.path.isfile(filename):
raise IOError(2, 'Not a file: {0}'.format(filename))
if filename.endswith('.tif'):
return load_tif(filename)
elif filename.endswith('.pkl'):
return load_pkl(filename)
elif filename.endswith('.csv'):
return load_csv(filename)
else:
raise IOError(filename)
def get_arg_names(f):
"""List of regular and keyword argument names from function definition.
"""
argspec = inspect.getargspec(f)
if argspec.defaults is None:
return argspec.args
n = len(argspec.defaults)
return argspec.args[:-n]
def get_kwarg_defaults(f):
"""Get the kwarg defaults as a dictionary.
"""
argspec = inspect.getargspec(f)
if argspec.defaults is None:
defaults = {}
else:
defaults = {k: v for k,v in zip(argspec.args[::-1], argspec.defaults[::-1])}
return defaults
def load_well_tile_list(filename, include='all'):
"""Read and format a table of acquired wells and tiles for snakemake.
Parameters
----------
filename : str, path object, or file-like object
File path to table of acquired wells and tiles.
include : str or list of lists, default "all"
If "all", keeps all wells and tiles defined in the supplied table. If any
other str, this is used as a query of the well-tile table to restrict
which sites are analyzed. If a list of [well,tile] pair lists, restricts
analysis to this defined set of fields-of-view.
Returns
-------
wells : np.ndarray
Array of included wells, should be zipped with `tiles`.
tiles : np.ndarray
Array of included tiles, should be zipped with `wells`.
"""
if filename.endswith('pkl'):
df_wells_tiles = pd.read_pickle(filename)
elif filename.endswith('csv'):
df_wells_tiles = | pd.read_csv(filename) | pandas.read_csv |
"""
sklearn columntransformer
"""
import pandas as pd
import numpy as np
from shapash.utils.category_encoder_backend import inv_transform_ordinal
from shapash.utils.category_encoder_backend import inv_transform_ce
from shapash.utils.category_encoder_backend import supported_category_encoder
from shapash.utils.category_encoder_backend import dummies_category_encoder
from shapash.utils.category_encoder_backend import category_encoder_binary
from shapash.utils.category_encoder_backend import transform_ordinal, get_col_mapping_ce
from shapash.utils.model_synoptic import simple_tree_model_sklearn, catboost_model,\
linear_model, svm_model, xgboost_model, lightgbm_model, dict_model_feature
from shapash.utils.model import extract_features_model
columntransformer = "<class 'sklearn.compose._column_transformer.ColumnTransformer'>"
sklearn_onehot = "<class 'sklearn.preprocessing._encoders.OneHotEncoder'>"
sklearn_ordinal = "<class 'sklearn.preprocessing._encoders.OrdinalEncoder'>"
sklearn_standardscaler = "<class 'sklearn.preprocessing._data.StandardScaler'>"
sklearn_quantiletransformer = "<class 'sklearn.preprocessing._data.QuantileTransformer'>"
sklearn_powertransformer = "<class 'sklearn.preprocessing._data.PowerTransformer'>"
sklearn_model = linear_model + svm_model + simple_tree_model_sklearn
other_model = xgboost_model + catboost_model + lightgbm_model
dummies_sklearn = (sklearn_onehot)
no_dummies_sklearn = (sklearn_ordinal,
sklearn_standardscaler,
sklearn_quantiletransformer,
sklearn_powertransformer)
supported_sklearn = (sklearn_onehot,
sklearn_ordinal,
sklearn_standardscaler,
sklearn_quantiletransformer,
sklearn_powertransformer)
def inv_transform_ct(x_in, encoding):
"""
Inverse transform when using a ColumnsTransformer.
As ColumnsTransformer output hstack the result of transformers, if the TOP-preprocessed data are re-ordered
after the ColumnTransformer the inverse transform must return false result.
We successively inverse the transformers with columns position. That's why inverse colnames
are prefixed by the transformers names.
Parameters
----------
x_in : pandas.DataFrame
Prediction set.
encoding : list
The list must contain a single ColumnsTransformer and an optional list of dict.
Returns
-------
pandas.Dataframe
The reversed transformation for the given list of encoding.
"""
if str(type(encoding)) == columntransformer:
# We use inverse tranform from the encoding method base on columns position
init = 0
rst = pd.DataFrame()
for enc in encoding.transformers_:
name_encoding = enc[0]
ct_encoding = enc[1]
col_encoding = enc[2]
# For Scikit encoding we use the associated inverse transform method
if str(type(ct_encoding)) in supported_sklearn:
frame, init = inv_transform_sklearn_in_ct(x_in,
init,
name_encoding,
col_encoding,
ct_encoding)
# For category encoding we use the mapping
elif str(type(ct_encoding)) in supported_category_encoder:
frame, init = inv_transform_ce_in_ct(x_in,
init,
name_encoding,
col_encoding,
ct_encoding)
# columns not encode
elif name_encoding == 'remainder':
if ct_encoding == 'passthrough':
nb_col = len(col_encoding)
frame = x_in.iloc[:, init:init + nb_col]
else:
frame = pd.DataFrame()
else:
raise Exception(f'{ct_encoding} is not supported yet.')
rst = pd.concat([rst, frame], axis=1)
elif str(type(encoding)) == "<class 'list'>":
rst = inv_transform_ordinal(x_in, encoding)
else:
raise Exception(f"{encoding.__class__.__name__} not supported, no inverse done.")
return rst
def inv_transform_ce_in_ct(x_in, init, name_encoding, col_encoding, ct_encoding):
"""
Inverse transform when using category_encoder in ColumnsTransformer preprocessing.
Parameters
----------
x_in : pandas.DataFrame
Data processed.
init : np.int
Columns index that give the first column to look at.
name_encoding : String
Name of the encoding give by the user.
col_encoding : list
Processed features name.
ct_encoding : category_encoder
Type of encoding.
Returns
-------
frame : pandas.Dataframe
The reversed transformation for the given list of encoding.
init : np.int
Index of the last column use to make the transformation.
"""
colname_output = [name_encoding + '_' + val for val in col_encoding]
colname_input = ct_encoding.get_feature_names()
nb_col = len(colname_input)
x_to_inverse = x_in.iloc[:, init:init + nb_col].copy()
x_to_inverse.columns = colname_input
frame = inv_transform_ce(x_to_inverse, ct_encoding)
frame.columns = colname_output
init += nb_col
return frame, init
def inv_transform_sklearn_in_ct(x_in, init, name_encoding, col_encoding, ct_encoding):
"""
Inverse transform when using sklearn in ColumnsTransformer preprocessing.
Parameters
----------
x_in : pandas.DataFrame
Data processed.
init : np.int
Columns index that give the first column to look at.
name_encoding : String
Name of the encoding give by the user.
col_encoding : list
Processed features name.
ct_encoding : sklearn, category_encoder
Type of encoding.
Returns
-------
frame : pandas.Dataframe
The reversed transformation for the given list of encoding.
init : np.int
Index of the last column use to make the transformation.
"""
colname_output = [name_encoding + '_' + val for val in col_encoding]
if str(type(ct_encoding)) in dummies_sklearn:
colname_input = ct_encoding.get_feature_names(col_encoding)
nb_col = len(colname_input)
else:
nb_col = len(colname_output)
x_inverse = ct_encoding.inverse_transform(x_in.iloc[:, init:init + nb_col])
frame = | pd.DataFrame(x_inverse, columns=colname_output, index=x_in.index) | pandas.DataFrame |
"""Code to provide tex-tables for models of choice.
``produce_reg_df(model, name, data)`` returns a dataframe of regression results
and takes three arguments:
model -- A string of the regression to be run, e.g.:
'expectation ~ covariate_a + covariate_b'
name -- A string to label the model
data -- The dataframe to work with
``tex_models(model_list, filename)`` writes a texfile for a set of models and
takes two arguments:
model_list -- A list of dataframes as returned by the function
``produce_reg_df()``
filename -- The name of the tex-file to write, e.g., 'test.tex'
"""
import pandas as pd
import statsmodels.api as sm
import numpy as np
import re
from statsmodels.iolib.summary2 import summary_params
from patsy import dmatrices
from scipy import stats
def produce_reg_df(model, model_name, panel, reg_type='ols'):
y, x = dmatrices(model, panel)
if reg_type == 'ols':
results = sm.OLS(y, x).fit()
estimates = summary_params(results)[['Coef.', 'Std.Err.', 'P>|t|']]
'''
White’s (1980) heteroskedasticity robust standard errors. Defined as
sqrt(diag(X.T X)^(-1)X.T diag(e_i^(2)) X(X.T X)^(-1) where e_i = resid[i]
HC0_se is a property. It is not evaluated until it is called. When it is
called the RegressionResults instance will then have another attribute
cov_HC0, which is the full heteroskedasticity consistent covariance matrix
and also het_scale, which is in this case just resid**2. HCCM matrices are
only appropriate for OLS.
Note: Delete the following two lines for 'regular' standard errors.
'''
estimates['Std.Err.'] = results.HC0_se
estimates['P>|t|'] = stats.t.sf(
np.abs(estimates['Coef.'] / estimates['Std.Err.']), results.nobs - 1) * 2
elif reg_type == 'probit':
model = sm.Probit(y, x)
results = model.fit()
margeffs = results.get_margeff()
estimates = pd.DataFrame(
[margeffs.margeff, margeffs.margeff_se, margeffs.pvalues],
index=['Coef.', 'Std.Err.', 'P>|t|'],
columns=model.exog_names[1:]).T
estimates = estimates.apply(
lambda x: ['{0:0.3f}'.format(i) for i in x])
estimates['Std.Err.'] = estimates['Std.Err.'].apply(
lambda x: '(' + str(x) + ')')
for i in range(len(estimates)):
estimates['Coef.'].iloc[i] = str(estimates['Coef.'].iloc[i]) + (
(float(estimates['P>|t|'].iloc[i]) <= 0.01) * '_3stars' +
(0.01 < float(estimates['P>|t|'].iloc[i]) <= 0.05) * '_2stars' +
(0.05 < float(estimates['P>|t|'].iloc[i]) <= 0.10) * '_1star' +
(0.1 < float(estimates['P>|t|'].iloc[i])) * ''
)
estimates['P>|t|'] = estimates['P>|t|'].apply(lambda x: '')
# Instead of inserting lines, just replace pvalues by linespace.
estimates = estimates.rename(columns={
'P>|t|': 'addlinespace'}
)
stacked_estimates = pd.DataFrame(
estimates.stack(), columns=[model_name])
if reg_type == 'ols':
stacked_model_stats = pd.DataFrame(
[results.nobs, results.rsquared_adj],
index=['Observations', 'R2'],
columns=[model_name])
elif reg_type == 'probit':
stacked_model_stats = pd.DataFrame(
[results.nobs, results.prsquared],
index=['Observations', 'R2'],
columns=[model_name])
stacked_model = stacked_estimates.append(stacked_model_stats)
return stacked_model
def tex_models(model_list, filename):
''' '''
if len(model_list) > 1:
try:
merged_models = model_list[0].join(
model_list[1:],
how='outer'
)
index_order = []
for m in model_list:
for v in m.index:
if v not in index_order and v not in {'Observations', 'R2'}:
index_order.append(v)
index_order.append('Observations')
index_order.append('R2')
merged_models = merged_models.reindex(index_order)
except ValueError as e:
print('Models need different labels.', e)
raise
else:
merged_models = model_list[0]
merged_models.loc['R2'] = merged_models.loc['R2'].apply(
lambda x: str(np.round(100 * x, 1))
)
merged_models.loc['Observations'] = merged_models.loc['Observations'].apply(
lambda x: format(int(x), ',d')
)
merged_models = merged_models.fillna('')
merged_models.index = pd.Series(merged_models.index).apply(lambda x: str(x))
with pd.option_context("max_colwidth", 1000):
merged_tex = merged_models.to_latex(header=True, escape=False)
merged_tex = re.sub('\'', '', merged_tex)
merged_tex = re.sub('\_3stars', '\sym{***}', merged_tex)
merged_tex = re.sub('\_2stars', '\sym{**}', merged_tex)
merged_tex = re.sub('\_1star', '\sym{*}', merged_tex)
merged_tex = re.sub(r'\\begin{tabular}{.*}', '', merged_tex)
merged_tex = re.sub(r'\\end{tabular}', '', merged_tex)
merged_tex = re.sub(r'\\toprule', '', merged_tex)
merged_tex = re.sub(r'\\bottomrule', '', merged_tex)
merged_tex = re.sub(r' \\\\', r' \\tabularnewline', merged_tex)
merged_tex = re.sub('\(.* Std\.Err\.\)', '', merged_tex)
merged_tex = re.sub('\(.*addlinespace\)', r'\\addlinespace', merged_tex)
merged_tex = re.sub('addlinespace.*?tabularnewline', 'addlinespace', merged_tex)
merged_tex = re.sub('\\\_', ' ', merged_tex)
merged_tex = re.sub(', Coef.\)', '', merged_tex)
merged_tex = re.sub('\n\(', '\n', merged_tex)
merged_tex = re.sub(':leq:', r'$\leq$', merged_tex)
merged_tex = re.sub(':g:', r'$>$', merged_tex)
merged_tex = re.sub(':l:', r'$<$', merged_tex)
merged_tex = re.sub(':in:', r'$\in$', merged_tex)
merged_tex = re.sub(':infty:', r'$\infty$', merged_tex)
merged_tex = re.sub(':times:', r'$\\times$', merged_tex)
merged_tex = re.sub(':euro:', r'\\euro', merged_tex)
merged_tex = re.sub(':text:', r'\\text', merged_tex)
merged_tex = re.sub(':dol:', r'$', merged_tex)
merged_tex = re.sub(':bs:', r'\\', merged_tex)
merged_tex = re.sub(
'No.{} of Observations', '\midrule\n' + r'\\addlinespace' + '\nObservations',
merged_tex
)
merged_tex = re.sub('R2', r'Adj. (pseudo) R$^2$ (\%)', merged_tex)
with open(filename, 'w') as tex_file:
tex_file.write('\\begin{tabular}{l' + '{}'.format('c' * len(model_list)) + '}\n')
tex_file.write('\\toprule\n')
tex_file.write('\\addlinespace\n')
tex_file.write(merged_tex)
tex_file.write('\\addlinespace\n')
tex_file.write('\\bottomrule\n')
tex_file.write('\\end{tabular}\n')
def probit_average_partial_effect_table(probit_model, panel, indicator_dict={}):
"""Return table of average partial effects for *probit_model* (in patsy form),
estimated using data in *panel*.
For each binary variable in model, calculate APE as the difference between average
predicted probability with variable set to 1 and average predicted probability with
variable set to 0.
For each continuous variable in model, calculate APE as difference in predicted
probabilities if each value of variable is increased by 1 standard deviation.
If evaluated for binary variable, checks for possible linked indicator variables.
Calculates APE as difference between index where only *variable* is 1 among linked
indicators and index where all linked indicators and *variable* are 0. *indicator_dict*
hands dictionary of linked variables to function.
"""
y, x = dmatrices(probit_model, panel)
model = sm.Probit(y, x)
fitted_model = model.fit()
table = '\\begin{tabular}{lr}\n \\tabularnewline \\toprule\n'
table += '& Average Partial Effect \\tabularnewline \n'
table += ' \\midrule\n'
for i in fitted_model.model.exog_names[1:]:
probit_data = pd.DataFrame(
fitted_model.model.exog, columns=fitted_model.model.exog_names)
# Check if variable is binary:
binary = (probit_data[i].apply(
lambda x: (x in [0, 1, 0., 1.] or | pd.isnull(x) | pandas.isnull |
from pydp.algorithms import laplacian as dp
import numpy as np
import pandas as pd
import time
import os
import psutil
from utils import *
epsilon = pd.read_pickle('~/publication/files/epsilon.pkl')
library_name = 'pydp'
def openmind_pydp_real_dataset(dataset_folder_path, attribute, query_name, number_of_experiments):
# adult dataset
if attribute == 'age':
df_adult = pd.read_csv(dataset_folder_path+"adult.data", sep=',', header=None)
df = df_adult.iloc[:,0]
maximum = 100.0
minimum = 0.0
i = 1
if attribute == 'hrs':
df_adult = pd.read_csv(dataset_folder_path+"adult.data", sep=',', header=None)
df_hrs = df_adult.iloc[:,12]
df = np.clip(df_hrs, a_max=80, a_min=None)
maximum = max(df)
minimum = 0.0
i = 2
# education dataset
if attribute == 'absences':
df1 = pd.read_csv(dataset_folder_path+"student-mat.csv", sep=";")
df2 = pd.read_csv(dataset_folder_path+"student-por.csv", sep=";")
frames = [df1, df2]
df_education = pd.concat(frames)
df = df_education.absences
maximum = 93.0
minimum = 0.0
i = 3
if attribute == 'grade':
df1 = | pd.read_csv(dataset_folder_path+"student-mat.csv", sep=";") | pandas.read_csv |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty_like(mask['a']),
pd.Series(np.full(mask['a'].shape, False), index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty_like(mask),
pd.DataFrame(np.full(mask.shape, False), index=mask.index, columns=mask.columns)
)
def test_generate(self):
@njit
def choice_func_nb(from_i, to_i, col, n):
if col == 0:
return np.arange(from_i, to_i)
elif col == 1:
return np.full(1, from_i)
else:
return np.full(1, to_i - n)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate(5, choice_func_nb, 1, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate((5, 2), choice_func_nb, 1)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[True, False, False],
[True, False, False],
[True, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate(
(5, 3), choice_func_nb, 1, pick_first=True, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_both(self):
@njit
def entry_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
@njit
def exit_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
en, ex = pd.Series.vbt.signals.generate_both(
5, entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, True, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=1, exit_wait=0)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.Series.vbt.signals.generate_both(
(5,), entry_func_nb, (temp_int,), exit_func_nb, (temp_int,),
index=mask['a'].index, name=mask['a'].name, entry_wait=0, exit_wait=1)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, True, True, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
@njit
def entry_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
@njit
def exit_func2_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
if from_i + 1 < to_i:
temp_int[1] = from_i + 1
return temp_int[:2]
return temp_int[:1]
en, ex = pd.DataFrame.vbt.signals.generate_both(
(5, 3), entry_func2_nb, (temp_int,), exit_func2_nb, (temp_int,),
entry_pick_first=False, exit_pick_first=False,
index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[True, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_exits(self):
@njit
def choice_func_nb(from_i, to_i, col, temp_int):
temp_int[0] = from_i
return temp_int[:1]
temp_int = np.empty((mask.shape[0],), dtype=np.int_)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func_nb, temp_int, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
@njit
def choice_func2_nb(from_i, to_i, col, temp_int):
for i in range(from_i, to_i):
temp_int[i - from_i] = i
return temp_int[:to_i - from_i]
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_exits(choice_func2_nb, temp_int, until_next=False, pick_first=False),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
mask2 = pd.Series([True, True, True, True, True], index=mask.index)
pd.testing.assert_series_equal(
mask2.vbt.signals.generate_exits(choice_func_nb, temp_int, until_next=False, skip_until_exit=True),
pd.Series(
np.array([False, True, False, True, False]),
index=mask.index
)
)
def test_clean(self):
entries = pd.DataFrame([
[True, False, True],
[True, False, False],
[True, True, True],
[False, True, False],
[False, True, True]
], index=mask.index, columns=mask.columns)
exits = pd.Series([True, False, True, False, True], index=mask.index)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries),
pd.DataFrame(
np.array([
[True, False, True],
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
entries.vbt.signals.clean(exits, entry_first=False)[1],
pd.DataFrame(
np.array([
[False, True, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[0],
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.clean(entries, exits)[1],
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.clean(entries, entries, entries)
def test_generate_random(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, n=3, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([False, True, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), n=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=3, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[True, True, True],
[True, True, False],
[False, True, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, False, True],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
pd.Series.vbt.signals.generate_random(
5, prob=0.5, seed=seed, index=mask['a'].index, name=mask['a'].name),
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
with pytest.raises(Exception):
_ = pd.Series.vbt.signals.generate_random((5, 2), prob=3)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=0.5, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[True, True, True],
[False, True, False],
[False, False, False],
[False, False, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, True, True],
[False, False, True],
[False, False, True],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.generate_random(
(5, 3), prob=[0., 0.5, 1.], pick_first=True, seed=seed, index=mask.index, columns=mask.columns),
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_random_both(self):
# n
en, ex = pd.Series.vbt.signals.generate_random_both(
5, n=2, seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, True, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=2, seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[True, True, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, True, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), n=[0, 1, 2], seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, False, True],
[False, True, False],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[False, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((2, 3), n=2, seed=seed, entry_wait=1, exit_wait=0)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True]
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((3, 3), n=2, seed=seed, entry_wait=0, exit_wait=1)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
])
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both((7, 3), n=2, seed=seed, entry_wait=2, exit_wait=2)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False]
])
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[True, True, True]
])
)
)
n = 10
a = np.full(n * 2, 0.)
for i in range(10000):
en, ex = pd.Series.vbt.signals.generate_random_both(1000, n, entry_wait=2, exit_wait=2)
_a = np.empty((n * 2,), dtype=np.int_)
_a[0::2] = np.flatnonzero(en)
_a[1::2] = np.flatnonzero(ex)
a += _a
greater = a > 10000000 / (2 * n + 1) * np.arange(0, 2 * n)
less = a < 10000000 / (2 * n + 1) * np.arange(2, 2 * n + 2)
assert np.all(greater & less)
# probs
en, ex = pd.Series.vbt.signals.generate_random_both(
5, entry_prob=0.5, exit_prob=1., seed=seed, index=mask['a'].index, name=mask['a'].name)
pd.testing.assert_series_equal(
en,
pd.Series(
np.array([True, False, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_series_equal(
ex,
pd.Series(
np.array([False, True, False, False, False]),
index=mask['a'].index,
name=mask['a'].name
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=0.5, exit_prob=1., seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=[0., 0.5, 1.], exit_prob=[0., 0.5, 1.],
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, True, True],
[False, False, False],
[False, False, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., exit_wait=0,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=False, exit_pick_first=True,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
en, ex = pd.DataFrame.vbt.signals.generate_random_both(
(5, 3), entry_prob=1., exit_prob=1., entry_pick_first=True, exit_pick_first=False,
seed=seed, index=mask.index, columns=mask.columns)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(
np.array([
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(
np.array([
[False, False, False],
[True, True, True],
[True, True, True],
[True, True, True],
[True, True, True]
]),
index=mask.index,
columns=mask.columns
)
)
# none
with pytest.raises(Exception):
pd.DataFrame.vbt.signals.generate_random((5, 3))
def test_generate_random_exits(self):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(seed=seed),
pd.Series(
np.array([False, False, True, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[True, True, False],
[False, False, False],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(seed=seed, wait=0),
pd.DataFrame(
np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_series_equal(
mask['a'].vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.Series(
np.array([False, True, False, False, True]),
index=mask['a'].index,
name=mask['a'].name
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=[0., 0.5, 1.], seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., wait=0, seed=seed),
pd.DataFrame(
np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_random_exits(prob=1., until_next=False, seed=seed),
pd.DataFrame(
np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
def test_generate_stop_exits(self):
e = pd.Series([True, False, False, False, False, False])
t = pd.Series([2, 3, 4, 3, 2, 1]).astype(np.float64)
# stop loss
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits(t.vbt.tile(3), [np.nan, -0.5, -1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, False],
[False, True, False]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# take profit
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1),
pd.Series(np.array([False, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True),
pd.Series(np.array([False, False, False, True, False, False]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, pick_first=False),
pd.Series(np.array([False, False, False, True, True, True]))
)
pd.testing.assert_frame_equal(
e.vbt.signals.generate_stop_exits((4 - t).vbt.tile(3), [np.nan, 0.5, 1.], trailing=True, pick_first=False),
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True],
[False, True, True]
]))
)
pd.testing.assert_series_equal(
e.vbt.signals.generate_stop_exits(4 - t, 0.1, trailing=True, exit_wait=3),
pd.Series(np.array([False, False, False, False, True, False]))
)
# chain
e = pd.Series([True, True, True, True, True, True])
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, True]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, entry_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, False, True]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
en, ex = e.vbt.signals.generate_stop_exits(t, -0.1, trailing=True, exit_wait=2, chain=True)
pd.testing.assert_series_equal(
en,
pd.Series(np.array([True, False, False, False, True, False]))
)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, False, False, True, False, False]))
)
# until_next and pick_first
e2 = pd.Series([True, True, True, True, True, True])
t2 = pd.Series([6, 5, 4, 3, 2, 1]).astype(np.float64)
ex = e2.vbt.signals.generate_stop_exits(t2, -0.1, until_next=False, pick_first=False)
pd.testing.assert_series_equal(
ex,
pd.Series(np.array([False, True, True, True, True, True]))
)
def test_generate_ohlc_stop_exits(self):
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=-0.1)
with pytest.raises(Exception):
_ = mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=-0.1)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, 0.1, trailing=True),
mask.vbt.signals.generate_ohlc_stop_exits(ts, sl_stop=0.1, sl_trail=True, reverse=True)
)
pd.testing.assert_frame_equal(
mask.vbt.signals.generate_stop_exits(ts, -0.1),
mask.vbt.signals.generate_ohlc_stop_exits(ts, tp_stop=0.1, reverse=True)
)
def _test_ohlc_stop_exits(**kwargs):
out_dict = {'stop_price': np.nan, 'stop_type': -1}
result = mask.vbt.signals.generate_ohlc_stop_exits(
price['open'], price['high'], price['low'], price['close'],
out_dict=out_dict, **kwargs
)
if isinstance(result, tuple):
_, ex = result
else:
ex = result
return result, out_dict['stop_price'], out_dict['stop_type']
ex, stop_price, stop_type = _test_ohlc_stop_exits()
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, 0],
[0, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, True, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 11.7, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, 1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, False],
[False, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=[np.nan, 0.1, 0.2], sl_trail=True, tp_stop=[np.nan, 0.1, 0.2])
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[False, False, False],
[False, True, False],
[False, False, False],
[False, False, True]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 9.6]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, -1],
[-1, -1, 1]
]), index=mask.index, columns=mask.columns)
)
ex, stop_price, stop_type = _test_ohlc_stop_exits(sl_stop=0.1, sl_trail=True, tp_stop=0.1, exit_wait=0)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[True, False, False],
[False, False, False],
[False, True, False],
[False, False, True],
[True, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[9.0, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 11.7],
[10.8, 9.0, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[1, -1, -1],
[-1, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, 1, -1]
]), index=mask.index, columns=mask.columns)
)
(en, ex), stop_price, stop_type = _test_ohlc_stop_exits(
sl_stop=0.1, sl_trail=True, tp_stop=0.1, chain=True)
pd.testing.assert_frame_equal(
en,
pd.DataFrame(np.array([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
ex,
pd.DataFrame(np.array([
[False, False, False],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_price,
pd.DataFrame(np.array([
[np.nan, np.nan, np.nan],
[11.0, np.nan, np.nan],
[np.nan, 12.1, np.nan],
[np.nan, np.nan, 10.8],
[9.9, np.nan, np.nan]
]), index=mask.index, columns=mask.columns)
)
pd.testing.assert_frame_equal(
stop_type,
pd.DataFrame(np.array([
[-1, -1, -1],
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 1],
[1, -1, -1]
]), index=mask.index, columns=mask.columns)
)
def test_between_ranges(self):
ranges = mask.vbt.signals.between_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 3, 1), (1, 1, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask.vbt.wrapper
mask2 = pd.DataFrame([
[True, True, True],
[True, True, True],
[False, False, False],
[False, False, False],
[False, False, False]
], index=mask.index, columns=mask.columns)
other_mask = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[False, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_ranges(other=other_mask)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 0, 1, 1), (1, 0, 1, 1, 1), (2, 1, 0, 2, 1),
(3, 1, 1, 2, 1), (4, 2, 0, 3, 1), (5, 2, 1, 3, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
ranges = mask2.vbt.signals.between_ranges(other=other_mask, from_other=True)
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 1, 1), (1, 0, 1, 2, 1), (2, 1, 1, 2, 1),
(3, 1, 1, 3, 1), (4, 2, 1, 3, 1), (5, 2, 1, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_partition_ranges(self):
mask2 = pd.DataFrame([
[False, False, False],
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 0, 4, 4, 0), (2, 1, 2, 4, 1), (3, 2, 3, 4, 0)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_between_partition_ranges(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
ranges = mask2.vbt.signals.between_partition_ranges()
record_arrays_close(
ranges.values,
np.array([
(0, 0, 1, 3, 1), (1, 1, 2, 4, 1)
], dtype=range_dt)
)
assert ranges.wrapper == mask2.vbt.wrapper
def test_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.pos_rank(),
pd.Series([-1, 0, 1, -1, 0], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 2, 2],
[2, -1, 3]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask['a'], allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 1, -1],
[-1, 0, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.pos_rank(reset_by=mask, allow_gaps=True),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 1],
[1, 0, -1],
[-1, 1, 0],
[0, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
def test_partition_pos_rank(self):
pd.testing.assert_series_equal(
(~mask['a']).vbt.signals.partition_pos_rank(),
pd.Series([-1, 0, 0, -1, 1], index=mask['a'].index, name=mask['a'].name)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 1, 1],
[1, -1, 1]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(after_false=True),
pd.DataFrame(
np.array([
[-1, -1, -1],
[0, -1, -1],
[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask['a']),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.partition_pos_rank(reset_by=mask),
pd.DataFrame(
np.array([
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-1, 0, 0],
[0, -1, 0]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_fns(self):
pd.testing.assert_frame_equal(
(~mask).vbt.signals.first(),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(1),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, True],
[True, False, False],
[False, True, False],
[False, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.nth(2),
pd.DataFrame(
np.array([
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False],
[False, False, False]
]),
index=mask.index,
columns=mask.columns
)
)
pd.testing.assert_frame_equal(
(~mask).vbt.signals.from_nth(0),
pd.DataFrame(
np.array([
[False, True, True],
[True, False, True],
[True, True, False],
[False, True, True],
[True, False, True]
]),
index=mask.index,
columns=mask.columns
)
)
def test_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 1, 0, 0, 1, 0, 0, 1])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_partition_pos_rank_mapped(self):
mask2 = pd.DataFrame([
[True, False, False],
[True, True, False],
[False, True, True],
[True, False, True],
[False, True, False]
], index=mask.index, columns=mask.columns)
mapped = mask2.vbt.signals.partition_pos_rank_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 0, 1, 0, 0, 1, 0, 0])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 1, 3, 1, 2, 4, 2, 3])
)
assert mapped.wrapper == mask2.vbt.wrapper
def test_nth_index(self):
assert mask['a'].vbt.signals.nth_index(0) == pd.Timestamp('2020-01-01 00:00:00')
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1),
pd.Series([
pd.Timestamp('2020-01-04 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-2),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-02 00:00:00'),
np.nan
], index=mask.columns, name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(0, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
pd.testing.assert_series_equal(
mask.vbt.signals.nth_index(-1, group_by=group_by),
pd.Series([
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timestamp('2020-01-03 00:00:00')
], index=['g1', 'g2'], name='nth_index', dtype='datetime64[ns]')
)
def test_norm_avg_index(self):
assert mask['a'].vbt.signals.norm_avg_index() == -0.25
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(),
pd.Series([-0.25, 0.25, 0.0], index=mask.columns, name='norm_avg_index')
)
pd.testing.assert_series_equal(
mask.vbt.signals.norm_avg_index(group_by=group_by),
pd.Series([0.0, 0.0], index=['g1', 'g2'], name='norm_avg_index')
)
def test_index_mapped(self):
mapped = mask.vbt.signals.index_mapped()
np.testing.assert_array_equal(
mapped.values,
np.array([0, 3, 1, 4, 2])
)
np.testing.assert_array_equal(
mapped.col_arr,
np.array([0, 0, 1, 1, 2])
)
np.testing.assert_array_equal(
mapped.idx_arr,
np.array([0, 3, 1, 4, 2])
)
assert mapped.wrapper == mask.vbt.wrapper
def test_total(self):
assert mask['a'].vbt.signals.total() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total(),
pd.Series([2, 2, 1], index=mask.columns, name='total')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total')
)
def test_rate(self):
assert mask['a'].vbt.signals.rate() == 0.4
pd.testing.assert_series_equal(
mask.vbt.signals.rate(),
pd.Series([0.4, 0.4, 0.2], index=mask.columns, name='rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.rate(group_by=group_by),
pd.Series([0.4, 0.2], index=['g1', 'g2'], name='rate')
)
def test_total_partitions(self):
assert mask['a'].vbt.signals.total_partitions() == 2
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(),
pd.Series([2, 2, 1], index=mask.columns, name='total_partitions')
)
pd.testing.assert_series_equal(
mask.vbt.signals.total_partitions(group_by=group_by),
pd.Series([4, 1], index=['g1', 'g2'], name='total_partitions')
)
def test_partition_rate(self):
assert mask['a'].vbt.signals.partition_rate() == 1.0
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(),
pd.Series([1.0, 1.0, 1.0], index=mask.columns, name='partition_rate')
)
pd.testing.assert_series_equal(
mask.vbt.signals.partition_rate(group_by=group_by),
pd.Series([1.0, 1.0], index=['g1', 'g2'], name='partition_rate')
)
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Total', 'Rate [%]', 'First Index',
'Last Index', 'Norm Avg Index [-1, 1]', 'Distance: Min',
'Distance: Max', 'Distance: Mean', 'Distance: Std', 'Total Partitions',
'Partition Rate [%]', 'Partition Length: Min', 'Partition Length: Max',
'Partition Length: Mean', 'Partition Length: Std',
'Partition Distance: Min', 'Partition Distance: Max',
'Partition Distance: Mean', 'Partition Distance: Std'
], dtype='object')
pd.testing.assert_series_equal(
mask.vbt.signals.stats(),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
1.6666666666666667,
33.333333333333336,
pd.Timestamp('2020-01-02 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
0.0,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
1.6666666666666667,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a'),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'),
2,
40.0,
pd.Timestamp('2020-01-01 00:00:00'),
pd.Timestamp('2020-01-04 00:00:00'),
-0.25,
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan,
2,
100.0,
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('1 days 00:00:00'),
pd.Timedelta('0 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
pd.Timedelta('3 days 00:00:00'),
np.nan
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mask.vbt.signals.stats(column='a', settings=dict(to_timedelta=False)),
pd.Series([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'), 5, 2, 40.0,
| pd.Timestamp('2020-01-01 00:00:00') | pandas.Timestamp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.