prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
from pandas.core.indexes.timedeltas import timedelta_range
def test_asfreq_bug():
df = DataFrame(data=[1, 3], index=[timedelta(), timedelta(minutes=3)])
result = df.resample("1T").asfreq()
expected = DataFrame(
data=[1, np.nan, np.nan, 3],
index=timedelta_range("0 day", periods=4, freq="1T"),
)
tm.assert_frame_equal(result, expected)
def test_resample_with_nat():
# GH 13223
index = pd.to_timedelta(["0s", pd.NaT, "2s"])
result = DataFrame({"value": [2, 3, 5]}, index).resample("1s").mean()
expected = DataFrame(
{"value": [2.5, np.nan, 5.0]},
index=timedelta_range("0 day", periods=3, freq="1S"),
)
tm.assert_frame_equal(result, expected)
def test_resample_as_freq_with_subperiod():
# GH 13022
index = timedelta_range("00:00:00", "00:10:00", freq="5T")
df = DataFrame(data={"value": [1, 5, 10]}, index=index)
result = df.resample("2T").asfreq()
expected_data = {"value": [1, np.nan, np.nan, np.nan, np.nan, 10]}
expected = DataFrame(
data=expected_data, index=timedelta_range("00:00:00", "00:10:00", freq="2T")
)
tm.assert_frame_equal(result, expected)
def test_resample_with_timedeltas():
expected = DataFrame({"A": np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = timedelta_range("0 days", freq="30T", periods=50)
df = DataFrame(
{"A": np.arange(1480)}, index=pd.to_timedelta(np.arange(1480), unit="T")
)
result = df.resample("30T").sum()
tm.assert_frame_equal(result, expected)
s = df["A"]
result = s.resample("30T").sum()
tm.assert_series_equal(result, expected["A"])
def test_resample_single_period_timedelta():
s = Series(list(range(5)), index=timedelta_range("1 day", freq="s", periods=5))
result = s.resample("2s").sum()
expected = Series([1, 5, 4], index=timedelta_range("1 day", freq="2s", periods=3))
tm.assert_series_equal(result, expected)
def test_resample_timedelta_idempotency():
# GH 12072
index = timedelta_range("0", periods=9, freq="10L")
series = Series(range(9), index=index)
result = series.resample("10L").mean()
expected = series.astype(float)
tm.assert_series_equal(result, expected)
def test_resample_offset_with_timedeltaindex():
# GH 10530 & 31809
rng = timedelta_range(start="0s", periods=25, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample("2s", offset="5s").mean()
without_base = ts.resample("2s").mean()
exp_without_base = timedelta_range(start="0s", end="25s", freq="2s")
exp_with_base = timedelta_range(start="5s", end="29s", freq="2s")
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex():
# GH #12169
df = DataFrame({"Group_obj": "A"}, index=pd.to_timedelta(list(range(20)), unit="s"))
df["Group"] = df["Group_obj"].astype("category")
result = df.resample("10s").agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame(
{"Group_obj": ["A", "A"], "Group": ["A", "A"]},
index=pd.TimedeltaIndex([0, 10], unit="s", freq="10s"),
)
expected = expected.reindex(["Group_obj", "Group"], axis=1)
expected["Group"] = expected["Group_obj"]
tm.assert_frame_equal(result, expected)
def test_resample_timedelta_values():
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range("1 day", "6 day", freq="4D")
df = DataFrame({"time": times}, index=times)
times2 = timedelta_range("1 day", "6 day", freq="2D")
exp = Series(times2, index=times2, name="time")
exp.iloc[1] = pd.NaT
res = df.resample("2D").first()["time"]
tm.assert_series_equal(res, exp)
res = df["time"].resample("2D").first()
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"start, end, freq, resample_freq",
[
("8H", "21h59min50s", "10S", "3H"), # GH 30353 example
("3H", "22H", "1H", "5H"),
("527D", "5006D", "3D", "10D"),
("1D", "10D", "1D", "2D"), # GH 13022 example
# tests that worked before GH 33498:
("8H", "21h59min50s", "10S", "2H"),
("0H", "21h59min50s", "10S", "3H"),
("10D", "85D", "D", "2D"),
],
)
def test_resample_timedelta_edge_case(start, end, freq, resample_freq):
# GH 33498
# check that the timedelta bins does not contains an extra bin
idx = timedelta_range(start=start, end=end, freq=freq)
s = Series(np.arange(len(idx)), index=idx)
result = s.resample(resample_freq).min()
expected_index = timedelta_range(freq=resample_freq, start=start, end=end)
tm.assert_index_equal(result.index, expected_index)
assert result.index.freq == expected_index.freq
assert not np.isnan(result[-1])
@pytest.mark.parametrize("duplicates", [True, False])
def test_resample_with_timedelta_yields_no_empty_groups(duplicates):
# GH 10603
df = DataFrame(
np.random.normal(size=(10000, 4)),
index=timedelta_range(start="0s", periods=10000, freq="3906250n"),
)
if duplicates:
# case with non-unique columns
df.columns = ["A", "B", "A", "C"]
result = df.loc["1s":, :].resample("3s").apply(lambda x: len(x))
expected = DataFrame(
[[768] * 4] * 12 + [[528] * 4],
index=
|
timedelta_range(start="1s", periods=13, freq="3s")
|
pandas.core.indexes.timedeltas.timedelta_range
|
import matplotlib
matplotlib.use('Agg')
import vcf, argparse, sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from scipy.stats import chisquare
from collections import defaultdict
def parse_args():
"""
Description:
function 'parse_args' parses arguments from command-line and returns an argparse
object containing the arguments and their values. Default values are 'False' if option
is not listed in the command, else the option value is set to True.
"""
parser = argparse.ArgumentParser('Input both a VUS and pathogenic concordant bgzip compressed and tabix indexed vcf.')
parser.add_argument('-i', '--inVUSvcf', type=str,
help='Input brcaexchange-VUS/sample-genotype concordant vcf filepath.')
parser.add_argument('-j', '--inPATHvcf', type=str,
help='Input brcaexchange-pathogenic/sample-genotype concordant vcf filepath.')
parser.add_argument('-s', '--inSITESvcf', type=str,
help='Input site vcf containing site-specitif annotation information.')
parser.add_argument('-o', '--outReport', type=str,
help='Output report filename.')
parser.add_argument('-v', '--outVariants', type=str,
help='Output apparent-benign VUS variants list filename.')
options = parser.parse_args()
return options
# Shamlessly borrowed from http://specminor.org/2017/01/08/performing-chi-squared-gof-python.html
# Becuase I need to calculate chi-square p-values at greater resolution than what's provided
# by scipy.stats.chisquare
def gf(x):
#Play with these values to adjust the error of the approximation.
upper_bound=100.0
resolution=1000000.0
step=upper_bound/resolution
val=0
rolling_sum=0
while val<=upper_bound:
val+=step
rolling_sum+=step*(val**(x-1)*2.7182818284590452353602874713526624977**(-val))
return rolling_sum
def ilgf(s,z):
val=0
for k in range(0,100):
val+=(((-1)**k)*z**(s+k))/(math.factorial(k)*(s+k))
return val
def chisquarecdf(x,k):
return 1-ilgf(k/2,x/2)/gf(k/2)
def chisquare_custom(observed_values,expected_values):
test_statistic=0
for observed, expected in zip(observed_values, expected_values):
test_statistic+=(abs(float(observed)-float(expected))-0.5)**2/float(expected)
df=1
print("test_statistic: {}".format(test_statistic))
return test_statistic, chisquarecdf(test_statistic,df)
def main(args):
options = parse_args()
if options.inSITESvcf:
sitesvcf_stream = open(options.inSITESvcf, 'rb')
sitesvcf_reader = vcf.Reader(sitesvcf_stream)
## Isolate samples by the 2 different phased PATHOGENIC het calls
with open(options.inPATHvcf, 'rb') as inPATHvcf_file:
vcf_reader_pathogenic = vcf.Reader(inPATHvcf_file)
brca_pathogenic_left_het_sample_list = defaultdict(list)
brca_pathogenic_right_het_sample_list = defaultdict(list)
brca_pathogenic_hom_sample_list = defaultdict(list)
for record in vcf_reader_pathogenic:
allele_freq = ""
try:
allele_freq = record.INFO['AF'][0]
except KeyError:
try:
allele_freq = float(record.INFO['AC'][0])/float(record.INFO['AN'])
except KeyError:
for site_record in sitesvcf_reader.fetch(record.CHROM, record.start, record.end):
if site_record.REF == record.REF and site_record.ALT[0] == record.ALT[0]:
allele_freq = site_record.INFO['AF'][0]
break
print('path site_record: {}_{}_{}_{}_{}_{}_{}'.format(site_record.CHROM, site_record.POS, site_record.REF, site_record.ALT[0], site_record.INFO['AF'], site_record.INFO['AF'][0], allele_freq))
print('path source_record: {}_{}_{}_{}'.format(record.CHROM,record.POS,record.REF,record.ALT[0]))
if str(float(allele_freq)) == "0.0":
print("Error: variant as allele frequency of 0")
print("Bad variant: {}_{}_{}_{}_{}".format(record.CHROM,record.POS,record.REF,record.ALT[0],allele_freq))
continue
variant_record = "{}_{}_{}_{}_{}".format(record.CHROM,record.POS,record.REF,record.ALT[0],allele_freq)
print(variant_record)
print('')
for sample in record.samples:
if sample['GT'] == '1|0':
brca_pathogenic_left_het_sample_list[sample.sample].append(variant_record)
elif sample['GT'] == '0|1':
brca_pathogenic_right_het_sample_list[sample.sample].append(variant_record)
elif sample['GT'] == '1|1':
brca_pathogenic_hom_sample_list[sample.sample].append(variant_record)
## Isolate samples by the 2 different phased VUS het calls
VUS_hom_HWE_stats = defaultdict(list)
with open(options.inVUSvcf, 'rb') as inVUSvcf_file:
vcf_reader_vus = vcf.Reader(inVUSvcf_file)
brca_vus_left_het_sample_list = defaultdict(list)
brca_vus_right_het_sample_list = defaultdict(list)
brca_vus_hom_sample_list = defaultdict(list)
for record in vcf_reader_vus:
allele_freq = ""
try:
allele_freq = record.INFO['AF'][0]
except KeyError:
try:
allele_freq = float(record.INFO['AC'][0])/float(record.INFO['AN'])
except KeyError:
for site_record in sitesvcf_reader.fetch(record.CHROM, record.start, record.end):
if site_record.REF == record.REF and site_record.ALT[0] == record.ALT[0]:
allele_freq = site_record.INFO['AF'][0]
break
print('vus site_record: {}_{}_{}_{}_{}_{}_{}'.format(site_record.CHROM, site_record.POS, site_record.REF, site_record.ALT[0], site_record.INFO['AF'], site_record.INFO['AF'][0], allele_freq))
print('vus source_record: {}_{}_{}_{}'.format(record.CHROM,record.POS,record.REF,record.ALT[0]))
if str(float(allele_freq)) == "0.0":
print("Error: variant as allele frequency of 0")
print("Bad variant: {}_{}_{}_{}_{}".format(record.CHROM,record.POS,record.REF,record.ALT[0],allele_freq))
continue
variant_record = "{}_{}_{}_{}_{}".format(record.CHROM,record.POS,record.REF,record.ALT[0],allele_freq)
HWE_obs_genotype_freq = list()
HWE_exp_genotype_freq = list()
HWE_obs_genotype_freq.append(0)
HWE_obs_genotype_freq.append(0)
HWE_obs_genotype_freq.append(0)
print(variant_record)
print('')
for sample in record.samples:
if sample['GT'] == '0|0':
HWE_obs_genotype_freq[0] += 1
elif sample['GT'] == '1|0':
HWE_obs_genotype_freq[1] += 1
brca_vus_left_het_sample_list[sample.sample].append(variant_record)
elif sample['GT'] == '0|1':
HWE_obs_genotype_freq[1] += 1
brca_vus_right_het_sample_list[sample.sample].append(variant_record)
elif sample['GT'] == '1|1':
HWE_obs_genotype_freq[2] += 1
brca_vus_hom_sample_list[sample.sample].append(variant_record)
# Hardy Weinberg Calculation
if HWE_obs_genotype_freq[2] > 0:
q_allele_freq = float(allele_freq)
p_allele_freq = 1.0 - float(allele_freq)
HWE_exp_genotype_freq.append((p_allele_freq * p_allele_freq)*len(record.samples))
HWE_exp_genotype_freq.append((2.0 * p_allele_freq * q_allele_freq)*len(record.samples))
HWE_exp_genotype_freq.append((q_allele_freq * q_allele_freq)*len(record.samples))
chisquare_value = chisquare(HWE_obs_genotype_freq,HWE_exp_genotype_freq,ddof=1)
#levene_haldane_hwe_calc = hl.eval(hl.hardy_weinberg_test(HWE_obs_genotype_freq[0],HWE_obs_genotype_freq[1],HWE_obs_genotype_freq[2]))
VUS_hom_HWE_stats[variant_record] = [HWE_obs_genotype_freq, HWE_exp_genotype_freq, p_allele_freq, q_allele_freq, chisquare_value[0], chisquare_value[1]]
if options.inSITESvcf:
sitesvcf_stream.close()
## Output to hom var VUS Hardy-Weinberg Equilibrium report
hwe_report_filename = "hom_vus_hwe_{}.txt".format(options.outReport)
hwe_chi_square_stats = list()
hwe_chi_square_pvalues = list()
minor_allele_freqs = list()
#hwe_hl_dist_pvalues = list()
with open(hwe_report_filename, 'w') as hwe_report_file:
hwe_report_file.write("variant_record\thwe_obs_(0/0,0/1,1/1)\thwe_exp_(0/0,0/1,1/1)\tp_freq\tq_freq\tscipy_chi_square_stat\tscipy_chi_square_p_value\n")
for variant_record in VUS_hom_HWE_stats.keys():
hwe_stats = VUS_hom_HWE_stats[variant_record]
hwe_chi_square_stats.append(hwe_stats[4])
hwe_chi_square_pvalues.append(hwe_stats[5])
minor_allele_freqs.append(hwe_stats[3])
#hwe_hl_dist_pvalues.append(hwe_stats[7])
hwe_report_file.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(variant_record, hwe_stats[0], hwe_stats[1], hwe_stats[2], hwe_stats[3], hwe_stats[4], hwe_stats[5]))
# Build histogram plots and save to .png files
print("hwe_chi_square_stats: {}".format(hwe_chi_square_stats))
fig1, ax1 = plt.subplots()
hwe_chi_square_stats =
|
pd.Series(hwe_chi_square_stats)
|
pandas.Series
|
"""Read transaction logs from AceMoney
An investment Action from AceMoney has fields
"Date", "Action", "Symbol", "Account", "Dividend", "Price",
"Quantity", "Commission", "Total", "Comment"
"""
import argparse
from datetime import datetime
import logging
import os
import pickle
from typing import Dict
import pandas as pd
import config
from config import conf
from portdash import portfolio as port
from portdash import create_app
from portdash.apis import quotes
log = logging.getLogger(__name__)
def record_inv_action(portfolio: pd.DataFrame, action: pd.Series) -> pd.DataFrame:
"""Mark the results of an AceMoney investment transaction in a portfolio
`portfolio`: pd.DataFrame
Initialized as per `init_portfolio`
`action`: pd.Series
A row from the all-investment-transactions AceMoney export
"""
port.init_symbol(portfolio, symbol=action["Symbol"])
if not pd.isnull(action["Dividend"]) and action["Dividend"] != 0:
portfolio.loc[action.Date :, f"{action.Symbol}_dist"] += action["Dividend"]
portfolio.loc[action.Date :, f"_total_dist"] += action.Dividend
if not pd.isnull(action["Quantity"]) and action["Quantity"] != 0:
sign = -1 if action["Action"] in ["Sell", "Remove Shares"] else 1
portfolio.loc[action.Date :, f"{action.Symbol}"] += sign * action["Quantity"]
if not pd.isnull(action["Total"]) and action["Total"] != 0:
sign = -1 if action["Action"] == "Buy" else 1
portfolio.loc[action["Date"] :, "cash"] += sign * action["Total"]
if action["Action"] == "Add Shares" and "__contribution__" in action["Comment"]:
price = quotes.get_price(action.Symbol, [action.Date]).values[0]
log.debug(
f"Contribution of {action.Quantity} shares of "
f"{action.Symbol} @ {price} per share."
)
portfolio.loc[action.Date :, "contributions"] += price * action["Quantity"]
if action["Action"] == "Add Shares" and "__dividend__" in action["Comment"]:
value = (
quotes.get_price(action.Symbol, [action.Date]).values[0]
* action["Quantity"]
)
log.debug(
f"Dividend of {action.Quantity} shares of {action.Symbol} "
f"of {action.Date} is ${value}."
)
portfolio.loc[action.Date :, f"{action.Symbol}_dist"] += value
portfolio.loc[action.Date :, f"_total_dist"] += value
return portfolio
def record_trans(portfolio: pd.DataFrame, transactions: pd.DataFrame) -> pd.DataFrame:
"""Insert the contents of an AceMoney account export into a portfolio"""
# A "Category" with an "@" should be accounted for by
# investment transactions
transactions = transactions[
~transactions["Category"].str.contains("@").fillna(False)
]
# A "Category" which starts with "Dividend" is an investment transaction
transactions = transactions[
~transactions["Category"].str.startswith("Dividend").fillna(False)
]
for i, row in transactions.iterrows():
record_acct_action(portfolio, row)
return portfolio
def record_acct_action(portfolio: pd.DataFrame, action: pd.DataFrame) -> pd.DataFrame:
"""Mark the results of an AceMoney account transaction in a portfolio
`portfolio`: pd.DataFrame
Initialized as per `init_portfolio`
`action`: pd.Series
A row from the single account AceMoney export
"""
if action["Date"] <= portfolio.index.max():
# Ignore transactions which might come after the end of the
# period under consideration.
# Assume that the transaction dates are always greater than
# the minimum date (this should be handled in initialization).
dep, wd = action["Deposit"], action["Withdrawal"] # Alias for convenience
is_internal = (
action["Category"] in ["Other Income:Interest", "Mail and Paper Work"]
) or (pd.isnull(action["Category"]) and pd.isnull(action["Payee"]))
if not pd.isnull(wd) and wd != 0:
portfolio.loc[action["Date"] :, "cash"] -= wd
if not is_internal:
portfolio.loc[action["Date"] :, "withdrawals"] += wd
if not pd.isnull(dep) and dep != 0:
portfolio.loc[action["Date"] :, "cash"] += dep
if not is_internal:
portfolio.loc[action["Date"] :, "contributions"] += dep
if action["Category"] == "Other Income:Interest":
portfolio.loc[action["Date"] :, "_total_dist"] += dep
return portfolio
def read_investment_transactions(fname: str = None) -> pd.DataFrame:
"""Read a CSV of investment transactions written by AceMoney"""
if not fname:
fname = conf("investment_transactions")
inv = pd.read_csv(
fname,
dtype={
"Dividend": float,
"Price": float,
"Total": float,
"Commission": float,
"Quantity": float,
},
parse_dates=[0],
thousands=",",
)
log.info(f'Ignore transactions in {conf("skip_accounts")} accounts.')
inv = inv.drop(inv[inv.Account.isin(conf("skip_accounts"))].index)
return inv
def read_portfolio_transactions(
acct_fnames: Dict[str, str] = None, ignore_future: bool = True
) -> Dict[str, pd.DataFrame]:
"""Read a CSV of account transactions written by AceMoney"""
if not acct_fnames:
acct_fnames = conf("account_transactions")
trans = {
acct_name:
|
pd.read_csv(fname, parse_dates=[0], thousands=",")
|
pandas.read_csv
|
import os
import geopandas as gpd
import pandas as pd
import numpy as np
import sys
sys.path.append('../')
from load_paths import load_box_paths
from datetime import date
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import seaborn as sns
import matplotlib.colors as colors
mpl.rcParams['pdf.fonttype'] = 42
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
scen_dir = os.path.join(projectpath, 'NU_civis_outputs', '20200429')
csv_dir = os.path.join(scen_dir, 'csv')
plot_dir = os.path.join(scen_dir, 'plots')
datapath = os.path.join(datapath, 'covid_IDPH')
shp_path = os.path.join(datapath, 'shapefiles')
if __name__ == '__main__' :
channel = 'Number of Covid-19 infections'
ems_shp = gpd.read_file(os.path.join(shp_path, 'EMS_Regions', 'EMS_Regions.shp'))
ems_pop = pd.read_csv(os.path.join(datapath, 'EMS Population', 'EMS_population_from_RTI.csv'))
ems_shp['REGION'] = ems_shp['REGION'].astype(int)
dates = [date(2020,m,1) for m in range(4, 10)]
ems_fnames = [x for x in os.listdir(csv_dir) if 'ems' in x]
for fname in ems_fnames :
il_fname = fname.replace('ems', 'illinois')
adf = pd.read_csv(os.path.join(csv_dir, fname))
adf['Date'] = pd.to_datetime(adf['Date'])
adf = adf[adf['Date'].isin(dates)]
adf = pd.merge(left=adf, right=ems_pop, left_on='ems', right_on='EMS', how='left')
adf['prevalence'] = adf['Number of Covid-19 infections']/adf['population']
fig = plt.figure(figsize=(10,12))
fig.subplots_adjust(top=0.95)
vmin, vmax = 0, np.max(adf['prevalence'])
norm = colors.Normalize(vmin=vmin,
vmax=vmax)
for di, (d, ddf) in enumerate(adf.groupby('Date')) :
ax = fig.add_subplot(3,2,di+1)
ds_shp =
|
pd.merge(left=ems_shp, right=ddf, left_on='REGION', right_on='ems')
|
pandas.merge
|
#!/usr/bin/env python
# coding: utf-8
# # Libraries
# In[17]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
# In[18]:
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# # Dataset
# In[19]:
df_municipios_2015 = pd.read_csv('../../data/bcggammachallenge/municipios/municipios20150101.csv')
df_municipios_2016 = pd.read_csv('../../data/bcggammachallenge/municipios/municipios20160101.csv')
df_municipios_2017 = pd.read_csv('../../data/bcggammachallenge/municipios/municipios20170101.csv')
# In[20]:
df = pd.concat([df_municipios_2015, df_municipios_2016, df_municipios_2017])
# In[21]:
df.head()
# In[22]:
df_ideb_ini = pd.read_csv('../../data/bcggammachallenge/ideb/ideb_municipios_anosiniciais2005_2017.csv',sep = ',',encoding='latin-1')
# In[23]:
df_ideb_ini.columns
# In[24]:
df_ideb_ini[['Cod_Municipio_Completo', 'Ideb2017']].head()
# In[25]:
df_ideb_ini = df_ideb_ini.rename(columns={'Cod_Municipio_Completo': 'cod_municipio'})
df_ideb_ini_2015 = df_ideb_ini.copy()
df_ideb_ini_2017 = df_ideb_ini.copy()
# In[26]:
df_ideb_ini_2015 = df_ideb_ini_2015[['cod_municipio', 'Ideb2015']]
df_ideb_ini_2017 = df_ideb_ini_2017[['cod_municipio', 'Ideb2017']]
# In[27]:
df_ideb_ini_2015.head()
# In[28]:
df_ideb_ini_2017.head()
# In[32]:
df_ideb_ini_2015['cod_municipio'] = df_ideb_ini_2015.cod_municipio.astype(float)
df_ideb_ini_2017['cod_municipio'] = df_ideb_ini_2017.cod_municipio.astype(float)
# In[33]:
df_result_2015 = pd.merge(df_municipios_2015, df_ideb_ini_2015, how='inner', on='cod_municipio')
df_result_2017 = pd.merge(df_municipios_2017, df_ideb_ini_2017, how='inner', on='cod_municipio')
# In[36]:
df_result_2015 = df_result_2015.rename(columns={'Ideb2015': 'ideb'})
df_result_2017 = df_result_2017.rename(columns={'Ideb2017': 'ideb'})
# In[52]:
df_result_2015.sort_values(by=['ideb'], ascending=False).head(8)
# In[59]:
df_result_2017.sort_values(by=['ideb'], ascending=False).head(8)
# In[57]:
print(df_result_2015[df_result_2015['ideb'] != '-']['ideb'].max())
print(df_result_2015[df_result_2015['ideb'] != '-']['ideb'].min())
# In[58]:
print(df_result_2017[df_result_2017['ideb'] != '-']['ideb'].max())
print(df_result_2017[df_result_2017['ideb'] != '-']['ideb'].min())
# ## Correlações linear entre todas as variáveis numéricas com o Ideb
# In[131]:
df_result_2015['ideb'] = df_result_2015['ideb'].replace('-',0)
df_result_2017['ideb'] = df_result_2017['ideb'].replace('-',0)
# In[132]:
df_result_2015['ideb'] = pd.to_numeric(df_result_2015['ideb'])
df_result_2017['ideb'] =
|
pd.to_numeric(df_result_2017['ideb'])
|
pandas.to_numeric
|
from os import makedirs, path
from typing import Union
import pandas as pd
from .filetype import FileType
class DataReader(object):
def __init__(self):
"""
Stores all dataframes and provides methods to feed data into the dataframes.
"""
self.bus_lines = pd.DataFrame(columns=['id', 'name', 'color', 'card_only', 'category'])
self.bus_line_shapes = pd.DataFrame(columns=['id', 'bus_line_id', 'latitude', 'longitude'])
self.bus_stops = pd.DataFrame(columns=['number', 'name', 'type', 'latitude', 'longitude'])
self.itineraries = pd.DataFrame(columns=['id', 'bus_line_id', 'direction'])
self.itinerary_stops = pd.DataFrame(columns=['itinerary_id', 'sequence_number', 'stop_number'])
self.bus_lines_schedule_tables = pd.DataFrame(columns=['table_id', 'bus_line_id', 'bus_stop_id', 'day_type',
'time', 'adaptive'])
self.vehicles_schedule_tables = pd.DataFrame(columns=['table_id', 'bus_line_id', 'bus_stop_id', 'vehicle_id',
'time'])
self.itinerary_stops_extra = pd.DataFrame(columns=['itinerary_id', 'itinerary_name', 'bus_line_id',
'itinerary_stop_id', 'stop_name', 'stop_name_short',
'stop_name_abbr', 'bus_stop_id', 'sequence_number', 'type',
'special_stop'])
self.itinerary_distances = pd.DataFrame(columns=['itinerary_stop_id', 'itinerary_next_stop_id', 'distance_m'])
self.companies = pd.DataFrame(columns=['id', 'name'])
self.itinerary_stops_companies = pd.DataFrame(columns=['itinerary_stop_id', 'company_id'])
self.vehicle_log = pd.DataFrame(columns=['timestamp', 'vehicle_id', 'bus_line_id', 'latitude', 'longitude'])
self.points_of_interest = pd.DataFrame(columns=['name', 'description', 'category', 'latitude', 'longitude'])
def feed_data(self, file: Union[bytes, str], data_type: FileType):
"""
Feeds data into the reader's internal dataframes.
:param file: File which contains the data.
If a *bytes* object is provided, the object will be interpreted as the actual decompressed content of the file.
Alternatively, if a *str* object is provided, the object will be interpreted as the path to a file in the user's
operating system. Supports the same compression types supported by pandas.
:param data_type: Type of data. See :class:`FileType` for available types
"""
# User provided raw binary data or file path (both are supported by pandas)
if isinstance(file, bytes) or isinstance(file, str):
# pd.read_json can take a long time. Therefore, we only read the file if the data_type parameter is valid.
if data_type == FileType.LINHAS:
file_data = pd.read_json(file)
self._feed_linhas_json(file_data)
elif data_type == FileType.POIS:
file_data = pd.read_json(file)
self._feed_pois_json(file_data)
elif data_type == FileType.PONTOS_LINHA:
file_data = pd.read_json(file)
self._feed_pontos_linha_json(file_data)
elif data_type == FileType.SHAPE_LINHA:
file_data = pd.read_json(file)
self._feed_shape_linha_json(file_data)
elif data_type == FileType.TABELA_LINHA:
file_data = pd.read_json(file)
self._feed_tabela_linha_json(file_data)
elif data_type == FileType.TABELA_VEICULO:
file_data = pd.read_json(file)
self._feed_tabela_veiculo_json(file_data)
elif data_type == FileType.TRECHOS_ITINERARIOS:
file_data =
|
pd.read_json(file)
|
pandas.read_json
|
import streamlit as st
import pandas as pd
import requests
import os
from dotenv import load_dotenv
from nomics import Nomics
import json
import plotly
import yfinance as yf
import matplotlib.pyplot as plt
from PIL import Image
from fbprophet import Prophet
import hvplot as hv
import hvplot.pandas
import datetime as dt
from babel.numbers import format_currency
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from pandas.tseries.offsets import DateOffset
from sklearn.metrics import classification_report
from sklearn.ensemble import AdaBoostClassifier
import numpy as np
from tensorflow import keras
import plotly.express as px
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
# 2 PERFORM EXPLORATORY DATA ANALYSIS AND VISUALIZATION
# Function to normalize stock prices based on their initial price
def normalize(df):
x = df.copy()
for i in x.columns[1:]:
x[i] = x[i]/x[i][0]
return x
# Function to plot interactive plots using Plotly Express
print("Function to plot interactive plots using Plotly Express")
def interactive_plot(df, title):
fig = px.line(title = title)
for i in df.columns[1:]:
fig.add_scatter(x = df['Date'], y = df[i], name = i)
fig.show()
# Function to concatenate the date, stock price, and volume in one dataframe
def individual_stock(price_df, vol_df, name):
return pd.DataFrame({'Date': price_df['Date'], 'Close': price_df[name], 'Volume': vol_df[name]})
# Load .env environment variables
load_dotenv()
## Page expands to full width
st.set_page_config(layout='wide')
image = Image.open('images/crypto_image.jpg')
st.image(image,width = 600)
# Header for main and sidebar
st.title( "Crypto Signal Provider Web App")
st.markdown("""This app displays top 10 cryptocurrencies by market cap.""")
st.caption("NOTE: USDT & USDC are stablecoins pegged to the Dollar.")
st.sidebar.title("Crypto Signal Settings")
# Get nomics api key
nomics_api_key = os.getenv("NOMICS_API_KEY")
nomics_url = "https://api.nomics.com/v1/prices?key=" + nomics_api_key
nomics_currency_url = ("https://api.nomics.com/v1/currencies/ticker?key=" + nomics_api_key + "&interval=1d,30d&per-page=10&page=1")
# Read API in json
nomics_df = pd.read_json(nomics_currency_url)
# Create an empty DataFrame for top cryptocurrencies by market cap
top_cryptos_df = pd.DataFrame()
# Get rank, crytocurrency, price, price_date, market cap
top_cryptos_df = nomics_df[['rank', 'logo_url', 'name', 'currency', 'price', 'price_date', 'market_cap']]
# This code gives us the sidebar on streamlit for the different dashboards
option = st.sidebar.selectbox("Dashboards", ('Top 10 Cryptocurrencies by Market Cap', 'Time-Series Forecasting - FB Prophet', "LSTM Model", 'Keras Model', 'Machine Learning Classifier - AdaBoost', 'Support Vector Machines', 'Logistic Regression'))
# Rename column labels
columns=['Rank', 'Logo', 'Currency', 'Symbol', 'Price (USD)', 'Price Date', 'Market Cap']
top_cryptos_df.columns=columns
# Set rank as index
top_cryptos_df.set_index('Rank', inplace=True)
# Convert text data type to numerical data type
top_cryptos_df['Market Cap'] = top_cryptos_df['Market Cap'].astype('int')
# Convert Timestamp to date only
top_cryptos_df['Price Date']=pd.to_datetime(top_cryptos_df['Price Date']).dt.date
# Replace nomics ticker symbol with yfinance ticker symbol
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("LUNA","LUNA1")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("FTXTOKEN","FTT")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("UNI","UNI1")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("AXS2","AXS")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("SAND2","SAND")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("HARMONY","ONE1")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("HELIUM","HNT")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("GRT","GRT1")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("IOT","MIOTA")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("BLOCKSTACK","STX")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("FLOW2","FLOW")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("BITTORRENT","BTT")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("AMP2","AMP")
top_cryptos_df.loc[:,"Symbol"] = top_cryptos_df.loc[:,"Symbol"].str.replace("HOT","HOT1")
# Format Market Cap with commas to separate thousands
top_cryptos_df["Market Cap"] = top_cryptos_df.apply(lambda x: "{:,}".format(x['Market Cap']), axis=1)
# Formatting Price (USD) to currency
top_cryptos_df["Price (USD)"] = top_cryptos_df["Price (USD)"].apply(lambda x: format_currency(x, currency="USD", locale="en_US"))
# Convert your links to html tags
def path_to_image_html(Logo):
return '<img src="'+ Logo +'" width=30 >'
# Pulls list of cryptocurrencies from nomics and concatenates to work with Yahoo Finance
coin = top_cryptos_df['Symbol'] + "-USD"
# Creates a dropdown list of cryptocurrencies based on top 100 list
dropdown = st.sidebar.multiselect("Select 1 coin to analyze", coin, default=['SOL-USD'])
# Create start date for analysis
start = st.sidebar.date_input('Start Date', value = pd.to_datetime('2020-01-01'))
# Create end date for analysis
end = st.sidebar.date_input('End Date', value = pd.to_datetime('today'))
# This option gives users the ability to view the current top 100 cryptocurrencies
if option == 'Top 10 Cryptocurrencies by Market Cap':
# Displays image in dataframe
top_cryptos_df.Logo = path_to_image_html(top_cryptos_df.Logo)
st.write(top_cryptos_df.to_html(escape=False), unsafe_allow_html=True)
st.text("")
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
coin_list['Ticker'] = coin_choice
coin_list.index=pd.to_datetime(coin_list.index).date
# Displays dataframe of selected cryptocurrency
st.subheader(f"Selected Crypto: {dropdown}")
st.dataframe(coin_list)
st.text("")
# Display coin_list into a chart
st.subheader(f'Selected Crypto Over Time: {dropdown}')
st.line_chart(coin_list['Adj Close'])
# This option gives users the ability to use FB Prophet
if option == 'Time-Series Forecasting - FB Prophet':
st.subheader("Time-Series Forecasting - FB Prophet")
# Line charts are created based on dropdown selection
if len(dropdown) > 0:
coin_choice = dropdown[0]
coin_list = yf.download(coin_choice,start,end)
coin_list['Ticker'] = coin_choice
# Reset the index so the date information is no longer the index
coin_list_df = coin_list.reset_index().filter(['Date','Adj Close'])
# Label the columns ds and y so that the syntax is recognized by Prophet
coin_list_df.columns = ['ds','y']
# Drop NaN values form the coin_list_df DataFrame
coin_list_df = coin_list_df.dropna()
# Call the Prophet function and store as an object
model_coin_trends = Prophet()
# Fit the time-series model
model_coin_trends.fit(coin_list_df)
# Create a future DataFrame to hold predictions
# Make the prediction go out as far as 60 days
periods = st.number_input("Enter number of prediction days", 30)
future_coin_trends = model_coin_trends.make_future_dataframe(periods = 30, freq='D')
# Make the predictions for the trend data using the future_coin_trends DataFrame
forecast_coin_trends = model_coin_trends.predict(future_coin_trends)
# Plot the Prophet predictions for the Coin trends data
st.markdown(f"Predictions Based on {dropdown} Trends Data")
st.pyplot(model_coin_trends.plot(forecast_coin_trends));
# Set the index in the forecast_coin_trends DataFrame to the ds datetime column
forecast_coin_trends = forecast_coin_trends.set_index('ds')
# View only the yhat,yhat_lower and yhat_upper columns in the DataFrame
forecast_coin_trends_df = forecast_coin_trends[['yhat', 'yhat_lower', 'yhat_upper']]
# From the forecast_coin_trends_df DataFrame, rename columns
coin_columns=['Most Likely (Average) Forecast', 'Worst Case Prediction', 'Best Case Prediction']
forecast_coin_trends_df.columns=coin_columns
forecast_coin_trends_df.index=
|
pd.to_datetime(forecast_coin_trends_df.index)
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 16:26:42 2018
@author: weiss
2018
4.22
1.增加了近月函数this_contract()
2.调整了部分语法结构适应合约key
3.调整了部分逻辑冗余
4.23
1.修复更新bug
4.30
1.修复近月合约数据缺失
5.7
1.改正近月合约定义
5.8
1.修正合并数据逻辑
5.10
1.修正合并数据部分的代码误删和错误
2.精简和优化部分代码结构
5.11-5.12
1.增加contract_indic选项(None、'this'、'this&next'),丰富相关逻辑
2.优化代码逻辑结构
5.18
1.增加中金所爬虫函数以及对应信号函数
7.16
1.增加中金所爬虫函数的向后更新功能
2.增加中金所前5、前10的信号,优化输出格式
"""
import time as t
import datetime
import pandas as pd
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore")
try:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen, Request
from urllib2 import HTTPError
import xml.etree.ElementTree as ET
#from WindPy import *
#w.start()
class oir(object):
def __init__(self,homePath, updatebegin = 20100101, endDate = \
int(t.strftime('%Y%m%d',t.localtime(t.time()))) ,params = [5,10,20]):
self.homePath = homePath + '/'
self.tradeDateList = pd.read_csv(self.homePath +'tradeDateList.csv')
self.params = params
self.suffix = '.h5'
self.beginDate = updatebegin
self.time = t.time()
if t.localtime(self.time).tm_hour < 15:
self.workDate = int(t.strftime('%Y%m%d',\
t.localtime(self.time - 24 * 60 *60)))
else:
self.workDate = int(t.strftime('%Y%m%d',t.localtime(self.time)))
#确定所需更新的日期
if endDate < self.workDate:
self.workDate = endDate
def this_contract(self,windSymbol):
symbol = windSymbol.split('.')[0]
def change_spot(y_month):
weekday = datetime.datetime.strptime(y_month+'-01', "%Y-%m-%d").weekday()
if weekday <= 5:
return (14 + 5 - weekday)
else:
return (14 + 6)
def this_month(date):
day = np.int32(str(date)[6:8])
if day >= change_spot(str(date)[:4]+'-'+str(date)[4:6]):
month = np.int32(str(date)[4:6])
if month == 12:
return str(np.int32(str(date)[2:4])+1)+'01'
else:
return str(date)[2:4]+"%02d"%(month%12+1)
else:
return str(date)[2:6]
self.tradeDateList[symbol+'_contract'] = \
self.tradeDateList['tradeDate'].apply(lambda x : symbol+this_month(x))
self.tradeDateList.to_csv(self.homePath +'tradeDateList.csv', index=None)
def next_contract(self,windSymbol):
symbol = windSymbol.split('.')[0]
def _next(contract):
month = np.int32(contract[-2:])
if month%3 == 0:
return '0'
else:
return contract[:-2]+"%02d"%(month+(3-month%3))
self.tradeDateList[symbol+'_next'] = \
self.tradeDateList[symbol+'_contract'].apply(lambda x : _next(x))
self.tradeDateList.to_csv(self.homePath +'tradeDateList.csv', index=None)
def updateDataFromWind(self,windSymbol,contract_indic=None):
symbol = windSymbol.split('.')[0]
colNames = ['tradeDate','ranks','member_name','long_position',
'long_position_increase','short_position',
'short_position_increase','volume']
colNamesFinal = ['tradeDate','ranks','member_name','long_position',
'long_position_increase','short_position',
'short_position_increase','net_position',
'net_position_increase','volume','updatingTime']
colNamesCon = ['tradeDate','member_name','long_position',
'long_position_increase','short_position',
'short_position_increase','net_position',
'net_position_increase','volume','updatingTime']
#获取合约数据的函数
def getFutureoirByDate(beginDate,endDate,windSymbol,windCode,position):
if windCode:
data = w.wset("futureoir","startdate="+beginDate+";enddate="+
endDate+";varity="+windSymbol+";wind_code=" +
windCode + ";order_by=" + position +
";ranks=all;field=date,ranks,member_name,long_position,long_position_increase,short_position,short_position_increase,vol")
else:
data = w.wset("futureoir","startdate="+beginDate+";enddate="+
endDate+";varity="+windSymbol+ ";order_by=" + position +
";ranks=all;field=date,ranks,member_name,long_position,long_position_increase,short_position,short_position_increase,vol")
if len(data.Data) == 0:
return pd.DataFrame([])
dataout = pd.DataFrame()
try:
for i in range(len(colNames)):
dataout[colNames[i]] = data.Data[i]
except:
print(windSymbol + " cannot get data on " + date + ' !')
return pd.DataFrame([])
dataout['tradeDate'] = dataout['tradeDate'].astype(str)
dataout['tradeDate'] = pd.to_datetime(dataout['tradeDate'],\
format='%Y-%m-%d',errors='ignore')
dataout['net_position'] = dataout['long_position'] -\
dataout['short_position']
dataout['net_position_increase'] = \
dataout['long_position_increase'] \
- dataout['short_position_increase']
return dataout
dateList = pd.DataFrame()
dateList['tradeDate'] = self.tradeDateList['tradeDate'].astype(str)
if contract_indic == 'this' or contract_indic == 'this&next':
self.this_contract(windSymbol)
dateList[symbol+'_contract'] = self.tradeDateList[symbol+'_contract']\
+'.'+ windSymbol.split('.')[1]
else:
dateList[symbol+'_contract'] = [None]*len(dateList)
for position in ['long','short']:
endDate = str(self.workDate)
#如果存在数据,从上次更新日之后更新
status = 0
data = pd.DataFrame()
if os.path.exists(self.homePath + 'rank' + self.suffix):
try:
lastData = pd.read_hdf(self.homePath + 'rank' \
+ self.suffix, position +'_'+ windSymbol)
if len(lastData) == 0:
continue
lastDate = str(lastData['tradeDate'].iloc[-1])
lastDate = lastDate[0:4] + lastDate[5:7] + lastDate[8:10]
beginDate = dateList[dateList['tradeDate'] > lastDate]\
['tradeDate'].iloc[0]
beginDate = str(beginDate)
if beginDate > endDate:
continue
print(windSymbol+ '_' +position+ ', begin:' + beginDate +\
',end:' + endDate + ' updating...')
data = lastData
except:
status = 1
#不存在
else:
status = 1
if status == 1:
beginDate = str(self.beginDate)
print(windSymbol+ '_' +position+', begin:'+\
beginDate+' getting...')
tempDateList = dateList[dateList['tradeDate'] >= beginDate]
tempDateList = tempDateList[tempDateList['tradeDate'] <=\
endDate].reset_index(drop=True)
for i in range(len(tempDateList)):
date = tempDateList['tradeDate'][i]
contract = tempDateList[symbol+'_contract'][i]
print(date)
if data.empty:
data = getFutureoirByDate(date,date,windSymbol,\
contract,position)
else:
temdata = getFutureoirByDate(date,date,windSymbol,\
contract,position)
data = pd.concat([data,temdata])
data = data.reset_index(drop=True)
data['updatingTime'] = t.strftime('%Y-%m-%d %H:%M:%S')
data = data[colNamesFinal]
data.to_hdf(self.homePath + 'rank'+self.suffix, position + '_' +\
windSymbol)
def x_or_y(df):
c = df.columns
choise = np.sign((df[c[0]]-df[c[1]]).apply(np.sign)+1/2)
result = pd.DataFrame()
result[c[0][:-2]] = (df[c[0]]*(1+choise)+df[c[1]]*(1-choise))/2
if len(c)>2:
result[c[2][:-2]] = (df[c[2]]*(1+choise)+df[c[3]]*(1-choise))/2
return result
#生成连续数据
print('continous data merging...')
long_p = pd.read_hdf(self.homePath + 'rank'+self.suffix, \
'long_' + windSymbol)
short_p = pd.read_hdf(self.homePath + 'rank'+self.suffix, \
'short_' + windSymbol)
con_position = pd.merge(long_p.drop(['ranks','updatingTime'],axis = 1)\
.fillna(0),short_p.drop(['ranks','updatingTime'],\
axis = 1).fillna(0),on=['member_name','tradeDate'],\
how = 'outer').fillna(0)
con_position = con_position.sort_values(\
by=['tradeDate','long_position_x'],ascending = [True,False])
con_p = pd.DataFrame(data = [],\
index = range(len(con_position)),columns = colNamesCon)
con_position = con_position.reset_index()
for z in ['long_position','short_position','net_position']:
print(z +' merging...')
p_df = con_position[[z+'_x',z+'_y',z+'_increase_x',z+'_increase_y']]
con_p[[z,z+'_increase']] = x_or_y(p_df)
p_df = con_position[['volume_x','volume_y']]
print('volume merging...')
con_p['volume'] = x_or_y(p_df)
con_p['tradeDate'] = con_position['tradeDate']
con_p['member_name'] = con_position['member_name']
con_p['updatingTime'] = t.strftime('%Y-%m-%d %H:%M:%S')
con_p=con_p[colNamesCon]
con_p.to_hdf(self.homePath+'rank'+self.suffix,windSymbol)
if contract_indic == 'this&next':
self.next_contract(windSymbol)
dateList[symbol+'_next'] = self.tradeDateList[symbol+'_next']\
+'.'+ windSymbol.split('.')[1]
for position in ['long','short']:
endDate = str(self.workDate)
#如果存在数据,从上次更新日之后更新
status = 0
data = pd.DataFrame()
if os.path.exists(self.homePath + 'rank' + self.suffix):
try:
lastData = pd.read_hdf(self.homePath + 'rank' \
+ self.suffix, position +'_'+ windSymbol+'_next')
if len(lastData) == 0:
continue
lastDate = str(lastData['tradeDate'].iloc[-1])
lastDate = lastDate[0:4] + lastDate[5:7] + lastDate[8:10]
beginDate = dateList[dateList['tradeDate'] > lastDate]\
['tradeDate'].iloc[0]
beginDate = str(beginDate)
if beginDate > endDate:
continue
print(windSymbol+'_next'+ '_' +position+ ', begin:' +\
beginDate +',end:' + endDate + ' updating...')
data = lastData
except:
status = 1
#不存在
else:
status = 1
if status == 1:
beginDate = str(self.beginDate)
print(windSymbol+'_next'+ '_' +position+', begin:'+\
beginDate+' getting...')
tempDateList = dateList[dateList['tradeDate'] >= beginDate]
tempDateList = tempDateList[tempDateList['tradeDate'] <=\
endDate].reset_index(drop=True)
for i in range(len(tempDateList)):
date = tempDateList['tradeDate'][i]
contract = tempDateList[symbol+'_next'][i]
if len(contract)>6:
print(date)
if data.empty:
data = getFutureoirByDate(date,date,windSymbol,\
contract,position)
else:
temdata = getFutureoirByDate(date,date,windSymbol,\
contract,position)
data = pd.concat([data,temdata])
data = data.reset_index(drop=True)
data['updatingTime'] = t.strftime('%Y-%m-%d %H:%M:%S')
data = data[colNamesFinal]
data.to_hdf(self.homePath + 'rank'+self.suffix, position + '_' +\
windSymbol+'_next')
#生成连续数据
print('continous data merging...')
long_p = pd.read_hdf(self.homePath + 'rank'+self.suffix, \
'long_' + windSymbol+'_next')
short_p = pd.read_hdf(self.homePath + 'rank'+self.suffix, \
'short_' + windSymbol+'_next')
con_position = pd.merge(long_p.drop(['ranks','updatingTime'],axis = 1)\
.fillna(0),short_p.drop(['ranks','updatingTime'],\
axis = 1).fillna(0),on=['member_name','tradeDate'],\
how = 'outer').fillna(0)
con_position = con_position.sort_values(\
by=['tradeDate','long_position_x'],ascending = [True,False])
con_p = pd.DataFrame(data = [],\
index = range(len(con_position)),columns = colNamesCon)
con_position = con_position.reset_index()
for z in ['long_position','short_position','net_position']:
print(z +'_next merging...')
p_df = con_position[[z+'_x',z+'_y',z+'_increase_x',z+'_increase_y']]
con_p[[z,z+'_increase']] = x_or_y(p_df)
p_df = con_position[['volume_x','volume_y']]
print('volume_next merging...')
con_p['volume'] = x_or_y(p_df)
con_p['tradeDate'] = con_position['tradeDate']
con_p['member_name'] = con_position['member_name']
con_p['updatingTime'] = t.strftime('%Y-%m-%d %H:%M:%S')
con_p=con_p[colNamesCon]
con_p.to_hdf(self.homePath+'rank'+self.suffix,windSymbol+'_next')
print (symbol + " futureoir source data update complete!")
return
def getSignal(self,windSymbol,contract_indic=None):
con_position = pd.read_hdf(self.homePath+'rank'+self.suffix,windSymbol)
#强制默认参数为[5,10,20],否则出错
sum_position = pd.DataFrame(data = [],index = range(len(con_position)),\
columns = ['tradeDate']+['long_position_increase5']+\
['long_position_increase10']+['long_position_increase20']+\
['short_position_increase5']+['short_position_increase10']+\
['short_position_increase20'])
#生成排名数据
j = 0
for i in range(len(con_position)):
if i == 0 or (con_position['tradeDate'][i] != \
con_position['tradeDate'][i-1]):
sum_position['tradeDate'][j] = con_position['tradeDate'][i]
for tem_i in range(len(self.params)):
sum_position['long_position_increase_'+str(self.params[tem_i])][j] = \
con_position['long_position_increase'][i+len(self.params)-1-tem_i]
sum_position['short_position_increase_'+str(self.params[tem_i])][j] = \
con_position['short_position_increase'][i+len(self.params)-1-tem_i]
j = j + 1
sum_position = sum_position.iloc[0:j]
if contract_indic == 'this&next':
con_position_next =
|
pd.read_hdf(self.homePath+'rank'+self.suffix,windSymbol+'_next')
|
pandas.read_hdf
|
import json
import multiprocessing
import os
from itertools import repeat, product
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from exprimo import set_log_dir, log, PLOT_STYLE
from exprimo.optimize import optimize_with_config
sns.set(style=PLOT_STYLE)
LOG_DIR = os.path.expanduser('~/logs/e3_optimizer-comparison')
set_log_dir(LOG_DIR)
run_config = 'pipelined' # (1, 0, 0, 1)
NETWORK = ('resnet50', 'alexnet', 'inception')[run_config[0] if isinstance(run_config[0], int) else 0]
BATCHES = (1, 10)[run_config[1] if isinstance(run_config[1], int) else 0]
PIPELINE_BATCHES = (1, 2, 4)[run_config[2] if isinstance(run_config[2], int) else 0]
MEMORY_LIMITED = bool(run_config[3] if len(run_config) > 3 and isinstance(run_config[3], int) else 0)
REPEATS = 50
OPTIMIZERS = ('hc', 'sa', 'ga', 'me')
OPTIMIZER_NAMES = {
'hc': 'Hill Climbing',
'sa': 'Simulated Annealing',
'ga': 'Genetic Algorithm',
'me': 'MAP-elites',
}
NETWORK_NAMES = {
'resnet50': 'ResNet-50',
'alexnet': 'AlexNet',
'inception': 'Inception V3'
}
cmap = sns.cubehelix_palette(5, start=.5, rot=-.75, reverse=True)
def test_optimizer(c, r, log_dir):
c['log_dir'] = log_dir + f'/{r:03}'
_, t = optimize_with_config(config=c, verbose=False, set_log_dir=True)
return t
def run_optimizer_test(n_threads=-1):
if n_threads == -1:
n_threads = multiprocessing.cpu_count()
for optimizer in tqdm(OPTIMIZERS):
# log(f'Testing optimizer {optimizer}')
run_name = f'e3_{optimizer}-{NETWORK}{"-pipeline" if PIPELINE_BATCHES > 1 else ""}' \
f'{"-limited" if MEMORY_LIMITED else ""}'
config_path = f'configs/experiments/e3/{run_name}.json'
score_path = os.path.join(LOG_DIR, f'{run_name}_scores.csv')
with open(score_path, 'w') as f:
f.write('run, time\n')
with open(config_path) as f:
config = json.load(f)
config['optimizer_args']['verbose'] = False
config['optimizer_args']['batches'] = BATCHES
config['optimizer_args']['pipeline_batches'] = PIPELINE_BATCHES
log_dir = config['log_dir']
threaded_optimizer = config['optimizer'] in ('ga', 'genetic_algorithm', 'map-elites', 'map_elites')
if n_threads == 1 or threaded_optimizer:
for r in tqdm(range(REPEATS)):
time = test_optimizer(config, r, log_dir)
with open(score_path, 'a') as f:
f.write(f'{r},{time}\n')
else:
worker_pool = multiprocessing.Pool(n_threads)
times = worker_pool.starmap(test_optimizer, zip(repeat(config), (r for r in range(REPEATS)),
repeat(log_dir)))
worker_pool.close()
with open(score_path, 'a') as f:
for r, t in enumerate(times):
f.write(f'{r},{t}\n')
set_log_dir(LOG_DIR)
def plot_results():
all_results = pd.DataFrame()
# CREATE PLOT OF RESULTS
for optimizer in OPTIMIZERS:
run_name = f'e3_{optimizer}-{NETWORK}{"-pipeline" if PIPELINE_BATCHES > 1 else ""}' \
f'{"-limited" if MEMORY_LIMITED else ""}'
score_path = os.path.join(LOG_DIR, f'{run_name}_scores.csv')
scores = pd.read_csv(score_path, index_col=0, squeeze=True)
scores /= PIPELINE_BATCHES
all_results[OPTIMIZER_NAMES[optimizer]] = scores
plt.figure(figsize=(8, 8))
chart = sns.barplot(data=all_results, palette=cmap)
chart.set_xticklabels(
chart.get_xticklabels(),
rotation=45,
horizontalalignment='right',
)
plt.ylabel('Batch execution time (ms)')
plt.xlabel('Optimization algorithm')
plt.tight_layout()
plt.savefig(os.path.join(LOG_DIR, 'score_comparison.pdf'))
plt.show()
plt.close()
def plot_result_all_networks(test_type='normal'):
all_results = pd.DataFrame()
# CREATE PLOT OF RESULTS
for network in ('alexnet', 'resnet50', 'inception'):
for optimizer in OPTIMIZERS:
run_name = f'e3_{optimizer}-{network}{"-pipeline" if test_type == "pipelined" else ""}' \
f'{"-limited" if test_type == "limited" else ""}'
score_path = os.path.join(LOG_DIR, f'{run_name}_scores.csv')
scores =
|
pd.read_csv(score_path, index_col=0, squeeze=True)
|
pandas.read_csv
|
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all
# notebook_metadata_filter: all,-language_info
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + trusted=true
import pandas as pd
import re
import matplotlib.pyplot as plt
import numpy as np
import ast
from lib.functions_data import *
# + trusted=true
import sys
from pathlib import Path
import os
cwd = os.getcwd()
parent = str(Path(cwd).parents[0])
sys.path.append(parent)
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# To avoid pulling the full dataset down each time we re-run the notebook, a CSV of the cut-down dataset is saved for easier reloading.
# + trusted=true
#Checking for the cut of the full dataset and creating it if it doesn't exist:
try:
dec = pd.read_csv(parent + '/data/dec_euctr_extract.csv').drop('Unnamed: 0', axis=1)
except FileNotFoundError:
cols = ['eudract_number_with_country', 'date_of_competent_authority_decision',
'clinical_trial_type', 'national_competent_authority', 'eudract_number',
'date_on_which_this_record_was_first_entered_in_the_eudract_data',
'trial_status', 'date_of_the_global_end_of_the_trial', 'trial_results']
#You can use this URL if you want to download the full raw data
data_link = 'https://www.dropbox.com/s/4qt0msiipyn7crm/euctr_euctr_dump-2020-12-03-095517.csv.zip?dl=1'
dec = pd.read_csv(data_link, compression='zip', low_memory=False, usecols=cols)
dec.to_csv(parent + '/data/dec_euctr_extract.csv')
#This is additional data we collect from the results page we need for certain analyses
results_info = pd.read_csv(parent + '/data/euctr_data_quality_results_scrape_dec_2020.csv')
results_info['trial_start_date'] = pd.to_datetime(results_info.trial_start_date)
# + trusted=true
#Quick look at the spread of trial statuses on the EUCTR
dec.trial_status.value_counts(dropna=False)
# -
# The "date_of_competent_authority_decision" field has 2 nonsensical year values in which the correct value can reasonably be derived from context. We fix those below:
#
# https://www.clinicaltrialsregister.eu/ctr-search/trial/2009-016759-22/DK
#
# https://www.clinicaltrialsregister.eu/ctr-search/trial/2006-006947-30/FR
# + trusted=true
ind = dec[dec.date_of_competent_authority_decision.notnull() &
dec.date_of_competent_authority_decision.str.contains('210')].index
ind = ind.to_list()[0]
ind_2 = dec[dec.date_of_competent_authority_decision.notnull() &
dec.date_of_competent_authority_decision.str.contains('2077')].index
ind_2 = ind_2.to_list()[0]
dec.at[ind, 'date_of_competent_authority_decision'] = '2010-06-18'
dec.at[ind_2, 'date_of_competent_authority_decision'] = '2007-04-05'
# + trusted=true
#get rid of all protocols from non EU/EEA countries
dec_filt = dec[dec.clinical_trial_type != 'Outside EU/EEA'].reset_index(drop=True)
#lets see how many that is:
print(len(dec) - len(dec_filt))
# + trusted=true
dec_ctas = dec[['eudract_number', 'eudract_number_with_country']].groupby('eudract_number').count()['eudract_number_with_country']
print(f'There are {len(dec_ctas)} registered trials and {dec_ctas.sum()} CTAs including non-EU/EEA CTAs')
# + trusted=true
decf_ctas = dec_filt[['eudract_number', 'eudract_number_with_country']].groupby('eudract_number').count()['eudract_number_with_country']
print(f'There are {len(decf_ctas)} registered trials and {decf_ctas.sum()} CTAs excluding non-EU/EEA CTAs')
# + trusted=true
#Making dates into dates and adding a column of just the "Year" for relevant dates
dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'] = pd.to_datetime(dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'])
dec_filt['entered_year'] = dec_filt['date_on_which_this_record_was_first_entered_in_the_eudract_data'].dt.year
dec_filt['date_of_competent_authority_decision'] = pd.to_datetime(dec_filt['date_of_competent_authority_decision'])
dec_filt['approved_year'] = dec_filt['date_of_competent_authority_decision'].dt.year
# + trusted=true
#Creating a copy of the original dataset we can mess with and
#renaming columns to better variable names
analysis_df = dec_filt.copy()
analysis_df.columns = ['eudract_number_country',
'approved_date',
'clinical_trial_type',
'nca',
'eudract_number',
'date_entered',
'trial_status',
'completion_date',
'trial_results',
'entered_year',
'approved_year']
#And update the NCA names to the more accurate recent names
analysis_df['nca'] = analysis_df['nca'].replace(nca_name_mapping)
# + trusted=true
#Table 1
analysis_df[['nca', 'eudract_number_country']].groupby('nca').count()
# + trusted=true
#You can reproduce the data on the earliest registered protocol for each country by running this cell
#with the appropriate country abbreviation. For example, to get the date for Italy:
print(earliest_record_check(analysis_df, 'Italy - AIFA'))
#Uncomment this to get the date for all countries at once
#for abrev in country_abrevs.keys():
# print(f'Country: {abrev}\nEarliest record date: {earliest_record_check(dec_filt, abrev)}')
# + trusted=true
#lastly this is helpful to have the country names in various orders
ordered_countries_original = list(dec_filt.national_competent_authority.value_counts().index)
ordered_countries_new = list(analysis_df.nca.value_counts().index)
# -
# # Registrations Over Time
# + trusted=true
reg_df = analysis_df[['eudract_number', 'nca', 'date_entered', 'entered_year', 'approved_date', 'approved_year']].reset_index(drop=True)
reg_df.head()
# + trusted=true
#Data for Overall Trend in Registrations
grouped_overall = reg_df[['eudract_number']].groupby([reg_df.entered_year]).count()
earliest_entered = reg_df[['eudract_number', 'date_entered']].groupby('eudract_number', as_index=False).min()
earliest_entered['year'] = earliest_entered.date_entered.dt.year
unique_trials = earliest_entered[['eudract_number', 'year']].groupby('year').count()
# + trusted=true
fig, ax = plt.subplots(figsize = (12,6), dpi=400)
grouped_overall[(grouped_overall.index > 2004) & (grouped_overall.index < 2020)].plot(ax=ax, legend=False, lw=2,
marker='.', markersize=12)
unique_trials[(unique_trials.index > 2004) & (unique_trials.index < 2020)].plot(ax=ax, legend=False, grid=True,
lw=2, marker='^', markersize=10)
ax.legend(['Total CTAs', 'Unique Trials'], bbox_to_anchor = (1, 1))
ax.set_xticks(range(2005, 2020))
ax.set_yticks(range(0,7500, 500))
plt.xlabel('CTA Entry Year', labelpad=10)
plt.ylabel('Records Entered')
plt.title('Trend in new CTA and Trial Registration on the EUCTR', pad=10)
#fig.savefig(parent + '/data/Figures/fig_s1.jpg', bbox_inches='tight', dpi=400)
fig.show()
# -
# Now we're interested in breaking the data down a bit further. Here we will break it down into quarters and years for more detailed analysis. We graph the years for which we have full EUCTR data (2005-2019).
# + trusted=true
grouped = reg_df[['eudract_number']].groupby([reg_df.nca, pd.PeriodIndex(reg_df.date_entered, freq='Q')]).count()
get_index = reg_df[['eudract_number']].groupby(pd.PeriodIndex(reg_df.date_entered, freq='Q')).count()
quarters = list(get_index.index)
# + trusted=true
grouped_2 = reg_df[['eudract_number']].groupby([reg_df.nca, pd.PeriodIndex(reg_df.date_entered, freq='Y')]).count()
get_index = reg_df[['eudract_number']].groupby(pd.PeriodIndex(reg_df.date_entered, freq='Y')).count()
years = list(get_index.index)
# + trusted=true
grouped_year = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.entered_year]).count()
grouped_year_2 = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.approved_year]).count()
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
#fig.suptitle("Cumulative trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped.loc[ordered_countries_new[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], quarters)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
cumulative = consolidated.cumsum()
# Plotting the country trend
cumulative.plot(ax=y, lw=4, sharex='col',legend=False)
#Plotting the reference line
cumulative.loc[[cumulative.index[0], cumulative.index[-1]]].plot(ax=y, legend=False, lw=2, style='--')
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Cumulative Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['Cumulative Count of New CTA Registrations', 'Stable Trend Line'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.2, -.55), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_1.jpg', bbox_inches='tight', dpi=400)
#fig.savefig(parent + '/data/Figures/fig_1.eps', bbox_inches='tight', dpi=400)
# + trusted=true
#Reduced Figure
fig, axes = plt.subplots(figsize = (20, 3), nrows=1, ncols=5, dpi=400)
#fig.suptitle("Cumulative trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
included_countries = ['UK - MHRA', 'France - ANSM', 'Norway - NoMA', 'Romania - ANMDM', 'Italy - AIFA']
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped.loc[included_countries[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], quarters)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
cumulative = consolidated.cumsum()
# Plotting the country trend
cumulative.plot(ax=y, lw=4, sharex='col',legend=False)
#Plotting the reference line
cumulative.loc[[cumulative.index[0], cumulative.index[-1]]].plot(ax=y, legend=False, lw=2, style='--')
y.set_title(included_countries[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Cumulative Trial Count', ha='center', va='center', rotation='vertical', fontsize=15)
fig.text(.5, -0.04, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['Cumulative Count of Protocol Registrations', 'Stable Trend Line'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.75, -.3), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_1.jpg', bbox_inches='tight', dpi=400)
#fig.savefig(parent + '/data/Figures/fig_1.eps', bbox_inches='tight', dpi=400)
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
fig.suptitle("Trends in trial registrations by NCA by Year", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped_2.loc[ordered_countries_new[x]]
first_reporting_year = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], years)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_year, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
#if ordered_countries_original[x] == 'Slovenia - JAZMP':
# y.set_yticks(range(0,16,3))
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
y.set_ylim(ymin=0)
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s2.jpg', bbox_inches='tight', dpi=400)
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 3), nrows=1, ncols=5, dpi=400)
#fig.suptitle("Trends in trial registrations by NCA by Year", y=1.02, fontsize=23)
fig.tight_layout()
included_countries = ['UK - MHRA', 'France - ANSM', 'Norway - NoMA', 'Romania - ANMDM', 'Italy - AIFA']
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped_2.loc[included_countries[x]]
first_reporting_year = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], years)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_year, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
#if ordered_countries_original[x] == 'Slovenia - JAZMP':
# y.set_yticks(range(0,16,3))
y.set_title(included_countries[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
y.set_ylim(ymin=0)
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s2.jpg', bbox_inches='tight', dpi=400)
# -
# For comparison here are the raw trends in new registrations by quarter
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
fig.suptitle("Trends in trial registrations by NCA by Quarter", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped.loc[ordered_countries_new[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], quarters)
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
consolidated = data[(data.index.year > 2004) & (data.index.year < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
if ordered_countries_original[x] == 'Slovenia - JAZMP':
y.set_yticks(range(0,16,3))
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s2.jpg', bbox_inches='tight', dpi=400)
# -
# Lasty, we can sense check that these dates make sense by comparing the year the CTA was entered to the date the NCA gave approval. When we graph them on top of each other, we can see that the overall trend align very well though with approvals being slightly less susceptable to large jumps.
# + trusted=true
grouped_year = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.entered_year]).count()
grouped_year_2 = reg_df[['eudract_number']].groupby([reg_df.nca, reg_df.approved_year]).count()
# -
# Here is the trend by year, not quarter, but we do not include this graph in the paper as it is duplicated in the next graph.
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=300)
fig.suptitle("Trends in trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped_year.loc[ordered_countries_new[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], range(2004, 2020))
data = pd.DataFrame({'eudract_number': adjusted_data})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
consolidated = data[(data.index > 2004) & (data.index < 2020) & data.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number.values[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
pd.set_option('mode.chained_assignment', 'warn')
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
plt.legend(['First Entered Date', 'NCA Approval Date'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.2, -.5), fontsize=15)
plt.show()
# + trusted=true
fig, axes = plt.subplots(figsize = (20, 16), nrows=7, ncols=4, dpi=400)
fig.suptitle("Trends in trial registrations by NCA", y=1.02, fontsize=23)
fig.tight_layout()
pd.set_option('mode.chained_assignment', None)
for x, y in enumerate(fig.axes):
country = grouped_year.loc[ordered_countries_new[x]]
country_2 = grouped_year_2.loc[ordered_countries_new[x]]
first_reporting_quarter = country[country.eudract_number > 0].index.min()
adjusted_data = zero_out_dict(country.to_dict()['eudract_number'], range(2004, 2020))
adjusted_data_2 = zero_out_dict(country_2.to_dict()['eudract_number'], range(2004, 2020))
data = pd.DataFrame({'eudract_number': adjusted_data})
data_2 = pd.DataFrame({'eudract_number': adjusted_data_2})
x_ticks = data.index
#Get rid of leading zeros
data['eudract_number'] = np.where(data.index < first_reporting_quarter, np.nan, data.eudract_number)
data_2['eudract_number'] = np.where(data_2.index < first_reporting_quarter, np.nan, data_2.eudract_number)
consolidated = data[(data.index > 2004) & (data.index < 2020) & data.eudract_number.notnull()]
consolidated_2 = data_2[(data_2.index > 2004) & (data_2.index < 2020) & data_2.eudract_number.notnull()]
leading_zero_check = True
i=0
while leading_zero_check:
if consolidated.eudract_number.values[i] == 0:
consolidated.at[consolidated.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
while leading_zero_check:
if consolidated_2.eudract_number.values[i] == 0:
consolidated_2.at[consolidated_2.index[i], 'eudract_number'] = np.nan
i+=1
else:
leading_zero_check = False
consolidated = consolidated[consolidated.eudract_number.notnull()]
consolidated_2 = consolidated_2[consolidated_2.eudract_number.notnull()]
consolidated.plot(ax=y, lw=2, sharex='col',legend=False)
consolidated_2.plot(ax=y, lw=2, sharex='col',legend=False)
y.set_title(ordered_countries_new[x], pad=6, fontsize=16)
y.set_axisbelow(True)
y.grid(zorder=0)
y.set_xlabel('')
y.set_xlim(x_ticks[0], x_ticks[-1])
fig.text(-0.015, 0.5, 'Trial Count', ha='center', va='center', rotation='vertical', fontsize=20)
fig.text(.5, -0.02, 'Record Entry Year', ha='center', va='center', fontsize=20)
pd.set_option('mode.chained_assignment', 'warn')
plt.legend(['First Entered Date', 'NCA Approval Date'],
loc='upper center', ncol=5, bbox_to_anchor = (-1.2, -.5), fontsize=15)
plt.show()
#fig.savefig(parent + '/data/Figures/fig_s3.jpg', bbox_inches='tight', dpi=400)
# -
# # Cross-checking countries listed in results with public CTAs
# + trusted=true
results_info_filt = results_info[results_info.recruitment_countries.notnull()].reset_index(drop=True)
# + trusted=true
protocols = results_info_filt.trial_countries.to_list()
results_countries = results_info_filt.recruitment_countries.to_list()
start_date = results_info_filt.trial_start_date.to_list()
trial_ids = results_info_filt.trial_id.to_list()
zipped_cats = zip(trial_ids, protocols, results_countries, start_date)
results_list = compare_enrollment_registration(zipped_cats)
missing_protocols = pd.DataFrame(results_list)
missing_protocols['total_missing'] = missing_protocols.unaccounted.apply(len)
# + trusted=true
acct = missing_protocols.accounted.to_list()
unacct = missing_protocols.unaccounted.to_list()
# + trusted=true
accounted_count = {}
unaccounted_count = {}
for ac, un in zip(acct, unacct):
if ac:
for a in ac:
accounted_count[a] = accounted_count.get(a, 0) + 1
if un:
for u in un:
unaccounted_count[u] = unaccounted_count.get(u, 0) + 1
# + trusted=true
accounted_series = pd.Series(accounted_count)
unaccounted_series =
|
pd.Series(unaccounted_count)
|
pandas.Series
|
import logging
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
class ChartIndicatorException(Exception):
pass
class PlottingExeception(ChartIndicatorException):
pass
class TraceCandlesException(ChartIndicatorException):
pass
class ErrorImplementingIndicator(ChartIndicatorException):
pass
log = logging.getLogger("candlestick-chart-indicator")
class CandlestickChartIndicator(ABC):
"""
Base class responsible for the implementation of candlestick graphics, and their data.
detail:
This class implements a "Chain of Responsibility" design pattern.
https://en.wikipedia.org/wiki/Chain-of-responsibility_pattern.
"""
@abc.abstractmethod
def inicate(self):
pass
class MA(CandlestickChartIndicator):
"""
Class responsible for implementing a simple Moving Average that stops
filter out price fluctuations helping to identify trends.
"""
def indicate(self, data_frame, data=[], **kwargs):
try:
ma = data_frame['close'].rolling(window=kwargs.get("days", 21)).mean()
trace_avg = go.Scatter(x=ma.index, y=MA, name='MA', line=dict(color='#BEBECF'), opacity=0.8)
data.append(trace_avg)
except (ErrorImplementingIndicator, TypeError) as e:
log.warning(f"Error implementing 'ma' indicator: {e}")
finally:
return data
class EMA(CandlestickChartIndicator):
"""
Class responsible for implementing an exponential moving average
EMA = Price today * K + EMA yesterday x (1-k) where K = 2 /(N+1)
"""
def indicate(self, data_frame, data=[], **kwargs):
try:
k = (2 / (kwargs.get("days", 21) + 1))
ma = data_frame['close'].rolling(window=kwargs.get("days", 21)).mean()
ema_data = pd.DataFrame(index=ma.index)
ema_data['PRICE'] = data_frame['close']
ema_data['MA'] = ma
ema_data['EMA'] = np.NaN
ema_data['EMA'][0] = ema_data['MA'][1]
for i in range(1, len(ema_data)):
ema_data['EMA'][i] = (ema_data['PRICE'][i] * k) + ((1-k) * ema_data['EMA'][i-1])
trace_ema = go.Scatter(
x=ema_data.index, y=ema_data['MA'], name='EMA', line=dict(color='#17BECF'), opacity=0.8)
data.append(trace_ema)
except (ErrorImplementingIndicator, TypeError) as e:
log.warning(f"Error implementing 'ema' indicator: {e}")
finally:
return data
class CrossingMovingAvarege(CandlestickChartIndicator):
"""
Class responsible for implementing the crossing of moving averages that consists of indicating
buying and selling an asset whenever the averages cross.
detail:
This indicator consists of 2 sets of simple moving averages. an acquaintance
as short average or short and another known as long average or long whenever short crosses
the long down we make a sale, whenever the long crosses the short up we buy.
"""
def indicate(self, data_frame, data=[], **kwargs):
try:
short_rolling = data_frame['close'].rolling(window=kwargs.get("short_rolling", 9)).mean()
long_rolling = data_frame['close'].rolling(window=kwargs.get("long_rolling", 21)).mean()
trace_short_rolling = go.Scatter(
x=short_rolling.index, y=short_rolling, name='SHORT', line=dict(color='#17BECF'), opacity=0.5)
trace_long_rolling = go.Scatter(
x=long_rolling.index, y=long_rolling, name='LONG', line=dict(color='#17becf'), opacity=0.5)
data.append(trace_short_rolling)
data.append(trace_long_rolling)
except (ErrorImplementingIndicator, TypeError) as e:
log.warning(f"Error implementing 'crossing moving avarege' indicator: {e}")
finally:
return data
class MACD(CandlestickChartIndicator):
"""
Class responsible for implementing a MACD -> Convergence - Divergence
of the moving average, which uses 3 exponential moving averages.
"""
def indicator(self, data_frame, data=[], **kwargs):
try:
high_average = data_frame['max'].rolling(window=kwargs.get("high", 8)).mean()
low_average = data_frame['min'].rolling(window=kwargs.get("low", 8)).mean()
hilo_high = pd.DataFrame(index=data_frame.index)
hilo_low =
|
pd.DataFrame(index=data_frame.index)
|
pandas.DataFrame
|
import time
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pathlib import Path
import context
from mhealth.utils.plotter_helper import save_figure
from mhealth.utils.commons import create_progress_bar
# Used if command-line option --parameters is not provided.
DEFAULT_PARAMETERS = ["Temperatur", "Herzfrequenz", "Atemfrequenz"]
# Data sources included in HF-AF_25052021.csv.
VALIDATION_DATA_SOURCES = ["WELCHALLYN_MONITOR", "PHILIPS_GATEWAY"]
# Half-ranges relevant for the validation: x +/- delta
DELTAS = {
"Atemfrequenz": 3, # ±3bpm
"Herzfrequenz": 10, # ±10bpm
"Temperatur": 0.5 # ±0.5°C
}
# Half-range of the for the timestamp delta, in minutes.
DELTA_TS = 2.5 # ±2.5min
# Devices are identified by the bed number they are used with.
# In case of device breakdown (or other problems), some devices
# were replaced by a device of another room. The below lookup
# specifies which the bed ids (devices) must be renamed, as well
# as the time range, between which the lookup applies.
DEVICE_REPLACEMENT_LOOKUP = {
# Alias True From To
"2653F" : ("2655F", "2021-05-14 12:00:00+02:00", None),
"2652F" : ("2656FL", "2021-05-18 00:00:00+02:00", None),
"2661TL" : ("2661FL", "2021-05-20 00:00:00+02:00", None),
"2664T" : ("2664F", "2021-05-12 00:00:00+02:00", None),
"2665T" : ("2665F", None, "2021-05-19 10:30:00+02:00"),
}
# Expected value ranges per vital parameter.
VALUE_RANGES = {
"Atemfrequenz": [0, 35],
"Herzfrequenz": [30, 130],
"Temperatur": [35, 40],
}
BIN_WIDTHS = {
"Atemfrequenz": 0.5,
"Herzfrequenz": 1,
"Temperatur": 0.01,
}
BIN_WIDTHS_VALID = {
"Atemfrequenz": 1,
"Herzfrequenz": 2,
"Temperatur": 0.1,
}
def tic():
return time.time()
def toc(label, start):
diff = time.time()-start
print(label + (": %.3f" % diff))
def check_dir(path):
if not path.is_dir():
msg = "Requested folder does not exist: %s"
raise FileNotFoundError(msg % path)
def ensure_dir(path, exist_ok=True):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True, exist_ok=exist_ok)
return path.is_dir()
def apply_replacement_lookup(df):
print("Applying device replacements...")
def dt_to_str(dt):
return "--" if dt is None else dt.strftime("%m.%d.%y %H:%M")
for id_alias, replace_data in DEVICE_REPLACEMENT_LOOKUP.items():
id_true, repl_start, repl_stop = replace_data
repl_start = pd.to_datetime(repl_start)
repl_stop =
|
pd.to_datetime(repl_stop)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
### WEIGHTS FIXTURES ------------------------------------------------------
@pytest.fixture()
def reindex_weights_to_indices_outcome_start_jan():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-02-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-03-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-04-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-05-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-06-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-07-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-08-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-09-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-10-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-11-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2012-12-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-02-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-03-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-04-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-05-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-06-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-07-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-08-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-09-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-10-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-11-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2013-12-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-02-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-03-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(Timestamp('2014-04-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
(
|
Timestamp('2014-05-01 00:00:00')
|
pandas.Timestamp
|
import pandas as pd
import numpy as np
import os
from IPython import embed
file1 =
|
pd.read_csv("./csvs/efficientnet-b3-model_512-_adam_aug_confidence.csv",header=None)
|
pandas.read_csv
|
#varianta cu pachetul CSV
import csv
import pandas as pd
with open('test.csv', 'r') as f:
r = csv.reader(f, delimiter=',')
for row in r: #loop
for i in range(0, len(row)):
if len(row) == 19: #vreau toate randurile de pe toate coloanele - 19 coloane
print(row[i]+ ",")
# varianta cu pachetul Pandas
data = pd.read_csv('test.csv', delimiter=',', header=None, nrows=120)
print(data.head()) #head() - doar primele 5 randuri
#tuppples
cuts = ("slim", "regular", "large")
cuts = cuts + ('extraslim','extralarge', 'traditional') #concatenarea tuplului cu mai multe elemente
print(cuts)
print(cuts.count('slim'))
print(cuts.index('regular'))
#lists
listaMea = list(cuts)
print(listaMea)
element = listaMea.pop()
print(element)
print(listaMea)
lista2 = listaMea.copy()
print(lista2)
lista2.reverse()
print(lista2)
lista2.append('firisor de aur')
print(lista2)
lista3 = (2, 8, 9, 3, 3, 2)
lista3=list(lista3)
print(lista3)
lista3.sort()
print(lista3)
lista_sortata =sorted(lista3)
print(lista_sortata)
c1=5.63
c2=6.88
c3=4.8
c4=1.09
c5=108
c6=31.5
c7=43.41
c8=23.95
c9=36.33
c10=150
from math import *
# print(round(c1))
# print(round(c2))
# print(round(c3))
# print(round(c4))
# print(round(c5))
print(sqrt(c1))
print(sqrt(c2))
print(sqrt(c3))
def func1(): #definire
for i in range(0,5):
print(lista2[i])
func1()
import pandas as pd
# pd.set_option('display.width', 120) # setare char width
#
# df = pd.read_csv('test.csv')
# print(df.iloc[12], '\n', type(df.iloc[12])) # print row #13 ==> obiect de tipul Series
# print('-' * 50) # despartire - beautifier
# print(df.iloc[[1, 3, 5]], '\n', type(df.iloc[[1, 3, 5]])) #lista de intregi # prints rows (tipul lista),
# # ==> obiect de tipul DataFrame
#Exemplul 10.a
import pandas as pd
df = pd.read_csv('test.csv')
# print(df.loc[(df['type']==3),['name']]) # materialele de tipul auxiliare (3)
# def func2():
# for i in range(0,5):
# while len(lista3)!=0:
# lista3.pop()
# print(lista3)
# func2()
lista2.clear()
print(lista2)
lista2.insert(2,'bro<NAME>')
print(lista2)
import math
df = pd.read_csv('suppliers.csv', index_col="name")
# print('Valoarea medie a lunilor in care am vandut este', df['month'].mean())
# print('Valoarea maxima a zilelor inregistrate este', df['day'].max())
# print('Primul an inregistrat al iesirilor este', df['year'].min())
print(df)
df.dropna(inplace=True)
print(df)
print(df.loc[0, 'id'])
df.loc[0,'id'] = 1255
print(df.loc[0, 'id'])
#Dicts
dict = {"laptop":"Dell", "software":"Windows", "periferice":"kit mouse-tastatura RGB"}
print(dict)
dict["laptop"] = "Lenovo"
print(dict)
dict.popitem()
#sterge de la coada
print(dict)
x=dict.items()
#returneaza o lista de tupluri - pereche cheie-valoare
import matplotlib.pyplot as plt
print(x)
df1 = pd.DataFrame(
{
"1": "InterSport",
"2": "Taco",
"3": "0CCC",
"4": "PPP",
},
index=[0, 1, 2, 3],
)
df2 = pd.DataFrame(
{
"5": "eMAG",
"6": "AboutYou",
"7": "InterSport",
"8": "Taco",
},
index=[4, 5, 6, 7],
)
result = pd.concat([df1, df2], axis=1, join="inner")
print(result)
print(df['Price'])
df['Price'].plot(kind='hist')
plt.ylabel('Price')
plt.xlabel('Price min-qty')
plt.show()
from sklearn.preprocessing import MinMaxScaler
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
pd.options.display.max_columns = 11
test = pd.read_csv('test.csv')
train =
|
pd.read_csv('train.csv')
|
pandas.read_csv
|
"""
Code Developed and Modified By : <NAME>
Git Link : https://github.com/unexh/
"""
#importing libraries
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import pandas as pd
import numpy as np
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from nltk.tokenize import word_tokenize
import sys
#Linking essentials
csvFileLink = r'NewZomato.csv'
backgroundImageLink = r'images\sizller-with-noodles22.jpg'
searchIconLink = r'images\icons8-search-50(2).png'
backGround2ImageLink= r'images\Sitting-near-table-darkened.jpg'
backButtonImageLink =r'images\icons8-back-50.png'
#a class to contain Restaurant Related Data
class RestaurantData:
#RestaurantData Attributes
selectedCityName=str("")
selectedLocalityName=str("")
selectedRestaurantName=str("")
DataFieldWhole =
|
pd.read_csv(csvFileLink,encoding='latin')
|
pandas.read_csv
|
#! /usr/bin/env python3
import os
import sys
import json
import numpy as np
import pandas as pd
from glob import glob
from enum import Enum
from dateutil import tz
from datetime import datetime, timedelta
map_station = {
1:"Castello, <NAME>", 2:"Hotel Carlton", 3:"Via del Podestà", 4:"Corso di P.Reno / Via Ragno" ,
5:"Piazza Trento Trieste", 6:"Piazza Stazione"
}
if __name__ == '__main__':
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('-sh', '--show', action='store_true')
parser.add_argument('-c', '--cfg', help='config file', required=True)
parser.add_argument('-d', '--data', help='counters data csv', required=True)
parser.add_argument('-tt', '--time_ticks', help='set time spacing between ticks', type=int, default=300)
parser.add_argument('-tl', '--time_labels', help='set time spacing between ticks\' labels', type=int, default=3600)
args = parser.parse_args()
filein = args.data
base = filein[:filein.rfind('/')]
base_save = os.path.join(os.environ['WORKSPACE'], 'slides', 'work_lavoro', 'ferrara', 'data', 'compare_presence')
if not os.path.exists(base_save): os.mkdir(base_save)
fname = filein[filein.find('/')+1:filein.rfind('.')].split('_')
fine_freq = fname[-2]
fine_freq_s = int(fine_freq[:-1])
interp = fname[-1]
dt_ticks = args.time_ticks
if dt_ticks > fine_freq_s:
tus = dt_ticks // fine_freq_s
else:
tus = 1
dt_ticks = fine_freq_s
dt_lbls = args.time_labels
if dt_lbls > dt_ticks:
lus = dt_lbls // dt_ticks
else:
lus = 1
dt_lbls = dt_ticks
print(f'Data sampling {fine_freq_s}. Ticks sampling {dt_ticks} u {tus}. Labels sampling {dt_lbls} u {lus}')
with open(args.cfg) as f:
config = json.load(f)
base_start_date = config['base_start_date']
base_stop_date = config['base_stop_date']
first_start_date = config['first_start_date']
first_stop_date = config['first_stop_date']
second_start_date = config['second_start_date']
second_stop_date = config['second_stop_date']
build_df = config['build']
conf_name = args.cfg
conf_name = conf_name[:conf_name.rfind('.')]
b_start_date = f'{conf_name}_{base_start_date}_{base_stop_date}_wifi'
b_first_date = f'{conf_name}_{first_start_date}_{first_stop_date}_wifi'
b_second_date = f'{conf_name}_{second_start_date}_{second_stop_date}_wifi'
file_base_date = f'{b_start_date}/counters_{fine_freq}_lin.csv'
file_first_date = f'{b_first_date}/counters_{fine_freq}_lin.csv'
file_second_date = f'{b_second_date}/counters_{fine_freq}_lin.csv'
def box_centered_kernel(tot_len, box_len):
pad_len = tot_len - box_len
kern = np.concatenate([
np.zeros((pad_len // 2)),
np.ones((box_len)) / box_len,
np.zeros((pad_len - pad_len // 2))
])
return kern
def building(filein):
base = filein[:filein.rfind('/')]
stats = pd.read_csv(filein, sep=';', parse_dates=['time'], index_col='time')
stats.index = stats.index.time
tuplecol = [ tuple(c.replace('\'', '').replace('(', '').replace(')','').replace(' ','').split(',')) for c in stats.columns ]
stats.columns = tuplecol
stats = stats.stack()
stats.index = pd.MultiIndex.from_tuples([ (t, i[0], i[1]) for t, i in stats.index ], names=['time', 'station_id', 'date'])
stats = stats.reset_index()
cols = stats.columns.values
cols[-1] = 'cnt'
stats.columns = cols
stats.date = pd.to_datetime(stats.date)
stats['datatime'] = stats.apply(lambda r : pd.datetime.combine(r['date'],r['time']),1)
stats.datatime = pd.to_datetime(stats.datatime)
stats.index = stats.datatime
stats = stats.sort_index()
stats = stats.drop(['time', 'date', 'datatime'], axis=1)
for sid, dfg in stats.groupby(['station_id']):
dfg_smooth = dfg.copy()
ma_size = 5
kern = box_centered_kernel(len(dfg_smooth), ma_size)
smooth =
|
pd.DataFrame([], columns=dfg_smooth.columns, index=dfg_smooth.index)
|
pandas.DataFrame
|
import json
import sys
import os
import io
import copy
from collections import OrderedDict
import warnings
from typing import Optional
import numpy as np
import tiledb
from tiledb import TileDBError
# from tiledb.tests.common import xprint
if sys.version_info >= (3, 3):
unicode_type = str
else:
unicode_type = unicode
unicode_dtype = np.dtype(unicode_type)
def check_dataframe_deps():
pd_error = """Pandas version >= 1.0 required for dataframe functionality.
Please `pip install pandas>=1.0` to proceed."""
pa_error = """PyArrow version >= 1.0 is suggested for dataframe functionality.
Please `pip install pyarrow>=1.0`."""
from distutils.version import LooseVersion
try:
import pandas as pd
except ImportError:
raise Exception(pd_error)
if LooseVersion(pd.__version__) < LooseVersion("1.0"):
raise Exception(pd_error)
try:
import pyarrow as pa
if LooseVersion(pa.__version__) < LooseVersion("1.0"):
warnings.warn(pa_error)
except ImportError:
warnings.warn(pa_error)
# Note: 'None' is used to indicate optionality for many of these options
# For example, if the `sparse` argument is unspecified we will default
# to False (dense) unless the input has string or heterogenous indexes.
TILEDB_KWARG_DEFAULTS = {
"ctx": None,
"sparse": None,
"index_dims": None,
"allows_duplicates": True,
"mode": "ingest",
"attr_filters": None,
"dim_filters": None,
"coords_filters": None,
"full_domain": False,
"tile": None,
"row_start_idx": None,
"fillna": None,
"column_types": None,
"capacity": None,
"date_spec": None,
"cell_order": "row-major",
"tile_order": "row-major",
"timestamp": None,
"debug": None,
}
def parse_tiledb_kwargs(kwargs):
parsed_args = dict(TILEDB_KWARG_DEFAULTS)
for key in TILEDB_KWARG_DEFAULTS.keys():
if key in kwargs:
parsed_args[key] = kwargs.pop(key)
return parsed_args
class ColumnInfo:
def __init__(self, dtype, repr: Optional[str] = None, nullable: bool = False):
self.dtype = dtype
self.repr = repr
self.nullable = nullable
def dtype_from_column(col):
import pandas as pd
col_dtype = col.dtype
if col_dtype in (
np.int32,
np.int64,
np.uint32,
np.uint64,
np.float,
np.double,
np.uint8,
):
return ColumnInfo(col_dtype)
if isinstance(
col_dtype,
(
pd.Int64Dtype,
pd.Int32Dtype,
pd.Int16Dtype,
pd.Int8Dtype,
pd.UInt64Dtype,
pd.UInt32Dtype,
pd.UInt16Dtype,
pd.UInt8Dtype,
),
):
return ColumnInfo(col_dtype.numpy_dtype, repr=str(col_dtype), nullable=True)
if isinstance(col_dtype, pd.BooleanDtype):
return ColumnInfo(np.uint8, repr=pd.BooleanDtype(), nullable=True)
# TODO this seems kind of brittle
if col_dtype.base == np.dtype("M8[ns]"):
if col_dtype == np.dtype("datetime64[ns]"):
return ColumnInfo(col_dtype)
elif hasattr(col_dtype, "tz"):
raise ValueError("datetime with tz not yet supported")
else:
raise ValueError(
"unsupported datetime subtype ({})".format(type(col_dtype))
)
# Pandas 1.0 has StringDtype extension type
if col_dtype.name == "string":
return ColumnInfo(unicode_dtype)
if col_dtype == "bool":
return ColumnInfo(np.uint8, repr=np.dtype("bool"))
if col_dtype == np.dtype("O"):
# Note: this does a full scan of the column... not sure what else to do here
# because Pandas allows mixed string column types (and actually has
# problems w/ allowing non-string types in object columns)
inferred_dtype = pd.api.types.infer_dtype(col)
if inferred_dtype == "bytes":
return ColumnInfo(np.bytes_)
elif inferred_dtype == "string":
# TODO we need to make sure this is actually convertible
return ColumnInfo(unicode_dtype)
elif inferred_dtype == "mixed":
raise ValueError(
"Column '{}' has mixed value dtype and cannot yet be stored as a TileDB attribute".format(
col.name
)
)
raise ValueError("Unhandled column type: '{}'".format(col_dtype))
# TODO make this a staticmethod on Attr?
def attrs_from_df(df, index_dims=None, filters=None, column_types=None, ctx=None):
attr_reprs = dict()
if ctx is None:
ctx = tiledb.default_ctx()
if column_types is None:
column_types = dict()
attrs = list()
for name, col in df.items():
if isinstance(filters, dict):
if name in filters:
attr_filters = filters[name]
else:
attr_filters = None
elif filters is not None:
attr_filters = filters
else:
attr_filters = tiledb.FilterList([tiledb.ZstdFilter(1, ctx=ctx)])
# ignore any column used as a dim/index
if index_dims and name in index_dims:
continue
if name in column_types:
spec_type = column_types[name]
# Handle ExtensionDtype
if hasattr(spec_type, "type"):
spec_type = spec_type.type
attr_info = ColumnInfo(spec_type)
else:
attr_info = dtype_from_column(col)
attrs.append(
tiledb.Attr(
name=name,
dtype=attr_info.dtype,
filters=attr_filters,
nullable=attr_info.nullable,
)
)
if attr_info.repr is not None:
attr_reprs[name] = attr_info.repr
return attrs, attr_reprs
def dim_info_for_column(ctx, df, col, tile=None, full_domain=False, index_dtype=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if len(col_values) < 1:
raise ValueError(
"Empty column '{}' cannot be used for dimension!".format(col_name)
)
if index_dtype is not None:
dim_info = ColumnInfo(index_dtype)
elif col_values.dtype is np.dtype("O"):
col_val0_type = type(col_values[0])
if col_val0_type in (bytes, unicode_type):
# TODO... core only supports TILEDB_ASCII right now
dim_info = ColumnInfo(np.bytes_)
else:
raise TypeError(
"Unknown column type not yet supported ('{}')".format(col_val0_type)
)
else:
dim_info = dtype_from_column(col_values)
return dim_info
def dim_for_column(
ctx, name, dim_info, col, tile=None, full_domain=False, ndim=None, dim_filters=None
):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if tile is None:
if ndim is None:
raise TileDBError("Unexpected Nonetype ndim")
if ndim == 1:
tile = 10000
elif ndim == 2:
tile = 1000
elif ndim == 3:
tile = 100
else:
tile = 10
dtype = dim_info.dtype
if full_domain:
if not dim_info.dtype in (np.bytes_, np.unicode_):
# Use the full type domain, deferring to the constructor
(dtype_min, dtype_max) = tiledb.libtiledb.dtype_range(dim_info.dtype)
dim_max = dtype_max
if dtype.kind == "M":
date_unit = np.datetime_data(dtype)[0]
dim_min = np.datetime64(dtype_min + 1, date_unit)
tile_max = np.iinfo(np.uint64).max - tile
# modular arithmetic gives misleading overflow warning.
with np.errstate(over="ignore"):
dtype_range = np.uint64(dtype_max) - np.uint64(dtype_min)
if np.abs(dtype_range) > tile_max:
dim_max = np.datetime64(dtype_max - tile, date_unit)
elif dtype is np.int64:
dim_min = dtype_min + 1
else:
dim_min = dtype_min
if dtype.kind != "M" and np.issubdtype(dtype, np.integer):
tile_max = np.iinfo(np.uint64).max - tile
if np.abs(np.uint64(dtype_max) - np.uint64(dtype_min)) > tile_max:
dim_max = dtype_max - tile
else:
dim_min, dim_max = (None, None)
else:
dim_min = np.min(col_values)
dim_max = np.max(col_values)
if not dim_info.dtype in (np.bytes_, np.unicode_):
if np.issubdtype(dtype, np.integer) or dtype.kind == "M":
dim_range = np.uint64(np.abs(np.uint64(dim_max) - np.uint64(dim_min)))
# we can't make a tile larger than the dimension range
if dim_range < tile:
tile = dim_range
if tile < 1:
tile = 1
elif np.issubdtype(dtype, np.float64):
dim_range = dim_max - dim_min
if dim_range < tile:
tile = np.ceil(dim_range)
dim = tiledb.Dim(
name=name,
domain=(dim_min, dim_max),
dtype=dim_info.dtype,
tile=tile,
filters=dim_filters,
)
return dim
def get_index_metadata(dataframe):
md = dict()
for index in dataframe.index.names:
index_md_name = index
if index == None:
index_md_name = "__tiledb_rows"
# Note: this may be expensive.
md[index_md_name] = dtype_from_column(
dataframe.index.get_level_values(index)
).dtype
return md
def create_dims(
ctx, dataframe, index_dims, tile=None, full_domain=False, sparse=None, filters=None
):
import pandas as pd
index = dataframe.index
index_dict = OrderedDict()
index_dtype = None
per_dim_tile = False
if tile is not None:
if isinstance(tile, dict):
per_dim_tile = True
# input check, can't do until after per_dim_tile
if (
per_dim_tile
and not all(map(lambda x: isinstance(x, (int, float)), tile.values()))
) or (per_dim_tile is False and not isinstance(tile, (int, float))):
raise ValueError(
"Invalid tile kwarg: expected int or dict of column names mapped to ints. "
"Got '{}'".format(tile)
)
if isinstance(index, pd.MultiIndex):
for name in index.names:
index_dict[name] = dataframe.index.get_level_values(name)
elif isinstance(index, (pd.Index, pd.RangeIndex, pd.Int64Index)):
if hasattr(index, "name") and index.name is not None:
name = index.name
else:
index_dtype = np.dtype("uint64")
name = "__tiledb_rows"
index_dict[name] = index.values
else:
raise ValueError("Unhandled index type {}".format(type(index)))
# create list of dim types
# we need to know all the types in order to validate before creating Dims
dim_types = list()
for idx, (name, values) in enumerate(index_dict.items()):
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
dim_types.append(
dim_info_for_column(
ctx,
dataframe,
values,
tile=dim_tile,
full_domain=full_domain,
index_dtype=index_dtype,
)
)
if any([d.dtype in (np.bytes_, np.unicode_) for d in dim_types]):
if sparse is False:
raise TileDBError("Cannot create dense array with string-typed dimensions")
elif sparse is None:
sparse = True
d0 = dim_types[0]
if not all(d0.dtype == d.dtype for d in dim_types[1:]):
if sparse is False:
raise TileDBError(
"Cannot create dense array with heterogeneous dimension data types"
)
elif sparse is None:
sparse = True
# Fall back to default dense type if unspecified and not inferred from dimension types
if sparse is None:
sparse = False
ndim = len(dim_types)
dims = list()
for idx, (name, values) in enumerate(index_dict.items()):
# get the FilterList, if any
if isinstance(filters, dict):
if name in filters:
dim_filters = filters[name]
else:
dim_filters = None
elif filters is not None:
dim_filters = filters
else:
dim_filters = None
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
dims.append(
dim_for_column(
ctx,
name,
dim_types[idx],
values,
tile=dim_tile,
full_domain=full_domain,
ndim=ndim,
dim_filters=dim_filters,
)
)
if index_dims:
for name in index_dims:
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
# get the FilterList, if any
if isinstance(filters, dict) and name in filters:
dim_filters = filters[name]
elif filters is not None:
dim_filters = filters
else:
dim_filters = None
col = dataframe[name]
dims.append(
dim_for_column(
ctx,
dataframe,
col.values,
name,
tile=dim_tile,
dim_filters=dim_filters,
)
)
return dims, sparse
def write_array_metadata(array, attr_metadata=None, index_metadata=None):
"""
:param array: open, writable TileDB array
:param metadata: dict
:return:
"""
if attr_metadata:
attr_md_dict = {n: str(t) for n, t in attr_metadata.items()}
array.meta["__pandas_attribute_repr"] = json.dumps(attr_md_dict)
if index_metadata:
index_md_dict = {n: str(t) for n, t in index_metadata.items()}
array.meta["__pandas_index_dims"] = json.dumps(index_md_dict)
def dataframe_to_np_arrays(dataframe, fillna=None):
import pandas as pd
if not hasattr(pd, "StringDtype"):
raise Exception(
"Unexpectedly found pandas version < 1.0; please install >= 1.0 for dataframe functionality."
)
ret = dict()
nullmaps = dict()
for k, v in dataframe.to_dict(orient="series").items():
if pd.api.types.is_extension_array_dtype(v):
#
if fillna is not None and k in fillna:
# raise ValueError("Missing 'fillna' value for column '{}' with pandas extension dtype".format(k))
ret[k] = v.to_numpy(na_value=fillna[k])
else:
# use default 0/empty for the dtype
ret[k] = v.to_numpy(dtype=v.dtype.numpy_dtype, na_value=v.dtype.type())
nullmaps[k] = (~v.isna()).to_numpy(dtype="uint8")
else:
ret[k] = v.to_numpy()
return ret, nullmaps
def from_dataframe(uri, dataframe, **kwargs):
# deprecated in 0.6.3
warnings.warn(
"tiledb.from_dataframe is deprecated; please use .from_pandas",
DeprecationWarning,
)
from_pandas(uri, dataframe, **kwargs)
def from_pandas(uri, dataframe, **kwargs):
"""Create TileDB array at given URI from a Pandas dataframe
Supports most Pandas series types, including nullable integers and
bools.
:param uri: URI for new TileDB array
:param dataframe: pandas DataFrame
:param mode: Creation mode, one of 'ingest' (default), 'schema_only', 'append'
:Keyword Arguments: optional keyword arguments for TileDB conversion, see
``tiledb.from_csv`` for additional details.
:raises: :py:exc:`tiledb.TileDBError`
:return: None
"""
check_dataframe_deps()
import pandas as pd
if "tiledb_args" in kwargs:
tiledb_args = kwargs.pop("tiledb_args")
else:
tiledb_args = parse_tiledb_kwargs(kwargs)
ctx = tiledb_args.get("ctx", None)
tile_order = tiledb_args["tile_order"]
cell_order = tiledb_args["cell_order"]
allows_duplicates = tiledb_args.get("allows_duplicates", False)
sparse = tiledb_args["sparse"]
index_dims = tiledb_args.get("index_dims", None)
mode = tiledb_args.get("mode", "ingest")
attr_filters = tiledb_args.get("attr_filters", None)
dim_filters = tiledb_args.get("dim_filters", None)
coords_filters = tiledb_args.get("coords_filters", None)
full_domain = tiledb_args.get("full_domain", False)
capacity = tiledb_args.get("capacity", False)
tile = tiledb_args.get("tile", None)
nrows = tiledb_args.get("nrows", None)
row_start_idx = tiledb_args.get("row_start_idx", None)
fillna = tiledb_args.get("fillna", None)
date_spec = tiledb_args.get("date_spec", None)
column_types = tiledb_args.get("column_types", None)
if mode != "append" and tiledb.array_exists(uri):
raise TileDBError("Array URI '{}' already exists!".format(uri))
write = True
create_array = True
if mode is not None:
if mode == "schema_only":
write = False
elif mode == "append":
create_array = False
if not sparse and row_start_idx is None:
raise TileDBError(
"Cannot append to dense array without 'row_start_idx'"
)
elif mode != "ingest":
raise TileDBError("Invalid mode specified ('{}')".format(mode))
if sparse == False and (index_dims is None or "index_col" not in kwargs):
full_domain = True
if capacity is None:
capacity = 0 # this will use the libtiledb internal default
if ctx is None:
ctx = tiledb.default_ctx()
if create_array:
if nrows:
if full_domain is None:
full_domain = False
# create the domain and attributes
# if sparse==None then this function may return a default based on types
dims, sparse = create_dims(
ctx,
dataframe,
index_dims,
sparse=sparse,
tile=tile,
full_domain=full_domain,
filters=dim_filters,
)
domain = tiledb.Domain(*dims, ctx=ctx)
attrs, attr_metadata = attrs_from_df(
dataframe,
index_dims=index_dims,
filters=attr_filters,
column_types=column_types,
)
# don't set allows_duplicates=True for dense
allows_duplicates = allows_duplicates and sparse
# now create the ArraySchema
schema = tiledb.ArraySchema(
domain=domain,
attrs=attrs,
cell_order=cell_order,
tile_order=tile_order,
coords_filters=coords_filters,
allows_duplicates=allows_duplicates,
capacity=capacity,
sparse=sparse,
)
tiledb.Array.create(uri, schema, ctx=ctx)
tiledb_args["mode"] = "append"
# apply fill replacements for NA values if specified
if fillna is not None:
dataframe.fillna(fillna, inplace=True)
# apply custom datetime parsing to given {'column_name': format_spec} pairs
# format_spec should be provied using Python format codes:
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
if date_spec is not None:
if type(date_spec) is not dict:
raise TypeError(
"Expected 'date_spec' to be a dict, got {}".format(type(date_spec))
)
for name, spec in date_spec.items():
dataframe[name] = pd.to_datetime(dataframe[name], format=spec)
# write the metadata so we can reconstruct dataframe
if create_array:
index_metadata = get_index_metadata(dataframe)
with tiledb.open(uri, "w", ctx=ctx) as A:
write_array_metadata(A, attr_metadata, index_metadata)
if write:
write_dict, nullmaps = dataframe_to_np_arrays(dataframe, fillna=fillna)
if tiledb_args.get("debug", True):
print("`tiledb.read_pandas` writing '{}' rows".format(len(dataframe)))
timestamp = tiledb_args.get("timestamp", None)
try:
A = tiledb.open(uri, "w", timestamp=timestamp, ctx=ctx)
if A.schema.sparse:
coords = []
for k in range(A.schema.ndim):
dim_name = A.schema.domain.dim(k).name
if (
not create_array
and dim_name not in dataframe.index.names
and dim_name != "__tiledb_rows"
):
# this branch handles the situation where a user did not specify
# index_col and is using mode='append'. We would like to try writing
# with the columns corresponding to existing dimension name.
coords.append(write_dict.pop(dim_name))
else:
coords.append(dataframe.index.get_level_values(k))
# TODO ensure correct col/dim ordering
A._setitem_impl(tuple(coords), write_dict, nullmaps)
else:
if row_start_idx is None:
row_start_idx = 0
row_end_idx = row_start_idx + len(dataframe)
A._setitem_impl(slice(row_start_idx, row_end_idx), write_dict, nullmaps)
finally:
A.close()
def _tiledb_result_as_dataframe(readable_array, result_dict):
import pandas as pd
# TODO missing key in the rep map should only be a warning, return best-effort?
# TODO this should be generalized for round-tripping overloadable types
# for any array (e.g. np.uint8 <> bool)
repr_meta = None
index_dims = None
if "__pandas_attribute_repr" in readable_array.meta:
# backwards compatibility
repr_meta = json.loads(readable_array.meta["__pandas_attribute_repr"])
if "__pandas_index_dims" in readable_array.meta:
index_dims = json.loads(readable_array.meta["__pandas_index_dims"])
indexes = list()
rename_cols = dict()
for col_name, col_val in result_dict.items():
if repr_meta and col_name in repr_meta:
new_col = pd.Series(col_val, dtype=repr_meta[col_name])
result_dict[col_name] = new_col
elif index_dims and col_name in index_dims:
new_col = pd.Series(col_val, dtype=index_dims[col_name])
result_dict[col_name] = new_col
if col_name == "__tiledb_rows":
rename_cols["__tiledb_rows"] = None
indexes.append(None)
else:
indexes.append(col_name)
for col_key, col_name in rename_cols.items():
result_dict[col_name] = result_dict.pop(col_key)
df = pd.DataFrame.from_dict(result_dict)
if len(indexes) > 0:
df.set_index(indexes, inplace=True)
return df
def open_dataframe(uri, ctx=None):
"""Open TileDB array at given URI as a Pandas dataframe
If the array was saved using tiledb.from_dataframe, then columns
will be interpreted as non-primitive pandas or numpy types when
available.
:param uri:
:return: dataframe constructed from given TileDB array URI
**Example:**
>>> import tiledb
>>> df = tiledb.open_dataframe("iris.tldb")
>>> tiledb.object_type("iris.tldb")
'array'
"""
check_dataframe_deps()
if ctx is None:
ctx = tiledb.default_ctx()
# TODO support `distributed=True` option?
with tiledb.open(uri, ctx=ctx) as A:
nonempty = A.nonempty_domain()
data = A.multi_index.__getitem__(tuple(slice(s1, s2) for s1, s2 in nonempty))
new_df = _tiledb_result_as_dataframe(A, data)
return new_df
def _iterate_csvs_pandas(csv_list, pandas_args):
"""Iterate over a list of CSV files. Uses pandas.read_csv with pandas_args and returns
a list of dataframe(s) for each iteration, up to the specified 'chunksize' argument in
'pandas_args'
"""
import pandas as pd
assert "chunksize" in pandas_args
chunksize = pandas_args["chunksize"]
rows_read = 0
result_list = list()
file_iter = iter(csv_list)
next_file = next(file_iter, None)
while next_file is not None:
df_iter = pd.read_csv(next_file, **pandas_args)
df_iter.chunksize = chunksize - rows_read
df = next(df_iter, None)
while df is not None:
result_list.append(df)
rows_read += len(df)
df_iter.chunksize = chunksize - rows_read
if rows_read == chunksize:
yield result_list
# start over
rows_read = 0
df_iter.chunksize = chunksize
result_list = list()
df = next(df_iter, None)
next_file = next(file_iter, None)
if next_file is None and len(result_list) > 0:
yield result_list
def from_csv(uri, csv_file, **kwargs):
"""
Create TileDB array at given URI from a CSV file or list of files
:param uri: URI for new TileDB array
:param csv_file: input CSV file or list of CSV files.
Note: multi-file ingestion requires a `chunksize` argument. Files will
be read in batches of at least `chunksize` rows before writing to the
TileDB array.
:Keyword Arguments:
- Any ``pandas.read_csv`` supported keyword argument.
- TileDB-specific arguments:
* ``allows_duplicates``: Generated schema should allow duplicates
* ``cell_order``: Schema cell order
* ``tile_order``: Schema tile order
* ``mode``: (default ``ingest``), Ingestion mode: ``ingest``, ``schema_only``,
``append``
* ``full_domain``: Dimensions should be created with full range of the dtype
* ``attr_filters``: FilterList to apply to Attributes: FilterList or Dict[str -> FilterList]
for any attribute(s). Unspecified attributes will use default.
* ``dim_filters``: FilterList to apply to Dimensions: FilterList or Dict[str -> FilterList]
for any dimensions(s). Unspecified dimensions will use default.
* ``coords_filters``: FilterList to apply to all coordinates (Dimensions)
* ``sparse``: (default True) Create sparse schema
* ``tile``: Dimension tiling: accepts either an int that applies the tiling to all dimensions
or a dict("dim_name": int) to specifically assign tiling to a given dimension
* ``capacity``: Schema capacity.
* ``timestamp``: Write TileDB array at specific timestamp.
* ``row_start_idx``: Start index to start new write (for row-indexed ingestions).
* ``date_spec``: Dictionary of {``column_name``: format_spec} to apply to date/time
columns which are not correctly inferred by pandas 'parse_dates'.
Format must be specified using the Python format codes:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
:return: None
**Example:**
>>> import tiledb
>>> tiledb.from_csv("iris.tldb", "iris.csv")
>>> tiledb.object_type("iris.tldb")
'array'
"""
check_dataframe_deps()
import pandas
if "tiledb_args" in kwargs:
tiledb_args = kwargs.get("tiledb_args")
else:
tiledb_args = parse_tiledb_kwargs(kwargs)
multi_file = False
debug = tiledb_args.get("debug", False)
pandas_args = copy.deepcopy(kwargs)
##########################################################################
# set up common arguments
##########################################################################
if isinstance(csv_file, str) and not os.path.isfile(csv_file):
# for non-local files, use TileDB VFS i/o
ctx = tiledb_args.get("ctx", tiledb.default_ctx())
vfs = tiledb.VFS(ctx=ctx)
csv_file = tiledb.FileIO(vfs, csv_file, mode="rb")
elif isinstance(csv_file, (list, tuple)):
# TODO may be useful to support a filter callback here
multi_file = True
mode = tiledb_args.get("mode", None)
if mode is not None:
# For schema_only mode we need to pass a max read count into
# pandas.read_csv
# Note that 'nrows' is a pandas arg!
if mode == "schema_only" and not "nrows" in kwargs:
pandas_args["nrows"] = 500
elif mode not in ["ingest", "append"]:
raise TileDBError("Invalid mode specified ('{}')".format(mode))
if mode != "append" and tiledb.array_exists(uri):
raise TileDBError("Array URI '{}' already exists!".format(uri))
# this is a pandas pass-through argument, do not pop!
chunksize = kwargs.get("chunksize", None)
if multi_file and not (chunksize or mode == "schema_only"):
raise TileDBError("Multiple input CSV files requires a 'chunksize' argument")
if multi_file:
input_csv_list = csv_file
else:
input_csv = csv_file
##########################################################################
# handle multi_file and chunked arguments
##########################################################################
# we need to use full-domain for multi or chunked reads, because we
# won't get a chance to see the full range during schema creation
if multi_file or chunksize is not None:
if not "nrows" in kwargs:
tiledb_args["full_domain"] = True
##########################################################################
# read path
##########################################################################
if multi_file:
array_created = False
if mode == "append":
array_created = True
rows_written = 0
# multi-file or chunked always writes to full domain
# TODO: allow specifying dimension range for schema creation
tiledb_args["full_domain"] = True
for df_list in _iterate_csvs_pandas(input_csv_list, pandas_args):
if df_list is None:
break
df = pandas.concat(df_list)
tiledb_args["row_start_idx"] = rows_written
from_pandas(uri, df, tiledb_args=tiledb_args, pandas_args=pandas_args)
rows_written += len(df)
if mode == "schema_only":
break
elif chunksize is not None:
rows_written = 0
# for chunked reads, we need to iterate over chunks
df_iter =
|
pandas.read_csv(input_csv, **pandas_args)
|
pandas.read_csv
|
import os
from datetime import datetime
import numpy as np
import pandas as pd
import torch
from scipy.special import softmax
from sklearn.metrics import (
accuracy_score,
precision_recall_fscore_support,
r2_score,
roc_auc_score,
)
from torch.nn import CrossEntropyLoss, L1Loss, MSELoss, NLLLoss
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from roost.core import Normalizer, RobustL1Loss, RobustL2Loss, sampled_softmax
def init_model(
model_class,
model_name,
model_params,
run_id,
optim,
learning_rate,
weight_decay,
momentum,
device,
milestones=[],
gamma=0.3,
resume=None,
fine_tune=None,
transfer=None,
):
robust = model_params["robust"]
n_targets = model_params["n_targets"]
if fine_tune is not None:
print(f"Use material_nn and output_nn from '{fine_tune}' as a starting point")
checkpoint = torch.load(fine_tune, map_location=device)
# update the task disk to fine tuning task
checkpoint["model_params"]["task_dict"] = model_params["task_dict"]
model = model_class(
**checkpoint["model_params"],
device=device,
)
model.to(device)
model.load_state_dict(checkpoint["state_dict"])
# model.trunk_nn.reset_parameters()
# for m in model.output_nns:
# m.reset_parameters()
assert model.model_params["robust"] == robust, (
"cannot fine-tune "
"between tasks with different numbers of outputs - use transfer "
"option instead"
)
assert model.model_params["n_targets"] == n_targets, (
"cannot fine-tune "
"between tasks with different numbers of outputs - use transfer "
"option instead"
)
elif transfer is not None:
print(
f"Use material_nn from '{transfer}' as a starting point and "
"train the output_nn from scratch"
)
checkpoint = torch.load(transfer, map_location=device)
model = model_class(device=device, **model_params)
model.to(device)
model_dict = model.state_dict()
pretrained_dict = {
k: v for k, v in checkpoint["state_dict"].items() if k in model_dict
}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
elif resume:
# TODO work out how to ensure that we are using the same optimizer
# when resuming such that the state dictionaries do not clash.
print(f"Resuming training from '{resume}'")
checkpoint = torch.load(resume, map_location=device)
model = model_class(
**checkpoint["model_params"],
device=device,
)
model.to(device)
model.load_state_dict(checkpoint["state_dict"])
model.epoch = checkpoint["epoch"]
model.best_val_score = checkpoint["best_val_score"]
else:
model = model_class(device=device, **model_params)
model.to(device)
# Select Optimiser
if optim == "SGD":
optimizer = torch.optim.SGD(
model.parameters(),
lr=learning_rate,
weight_decay=weight_decay,
momentum=momentum,
)
elif optim == "Adam":
optimizer = torch.optim.Adam(
model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
elif optim == "AdamW":
optimizer = torch.optim.AdamW(
model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
else:
raise NameError("Only SGD, Adam or AdamW are allowed as --optim")
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=milestones, gamma=gamma
)
if resume:
# NOTE the user could change the optimizer when resuming creating a bug
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
print(f"Total Number of Trainable Parameters: {model.num_params:,}")
# TODO parallelise the code over multiple GPUs. Currently DataParallel
# crashes as subsets of the batch have different sizes due to the use of
# lists of lists rather the zero-padding.
# if (torch.cuda.device_count() > 1) and (device==torch.device("cuda")):
# print("The model will use", torch.cuda.device_count(), "GPUs!")
# model = nn.DataParallel(model)
model.to(device)
return model, optimizer, scheduler
def init_losses(task_dict, loss_dict, robust=False): # noqa: C901
criterion_dict = {}
for name, task in task_dict.items():
# Select Task and Loss Function
if task == "classification":
if loss_dict[name] != "CSE":
raise NameError("Only CSE loss allowed for classification tasks")
if robust:
criterion_dict[name] = (task, NLLLoss())
else:
criterion_dict[name] = (task, CrossEntropyLoss())
if task == "mask":
if loss_dict[name] != "Brier":
raise NameError("Only Brier loss allowed for masking tasks")
if robust:
criterion_dict[name] = (task, MSELoss())
else:
criterion_dict[name] = (task, MSELoss())
elif task == "dist":
if loss_dict[name] == "L1":
criterion_dict[name] = (task, L1Loss())
elif loss_dict[name] == "L2":
criterion_dict[name] = (task, MSELoss())
else:
raise NameError("Only L1 or L2 losses are allowed for regression tasks")
elif task == "regression":
if robust:
if loss_dict[name] == "L1":
criterion_dict[name] = (task, RobustL1Loss)
elif loss_dict[name] == "L2":
criterion_dict[name] = (task, RobustL2Loss)
else:
raise NameError(
"Only L1 or L2 losses are allowed for robust regression tasks"
)
else:
if loss_dict[name] == "L1":
criterion_dict[name] = (task, L1Loss())
elif loss_dict[name] == "L2":
criterion_dict[name] = (task, MSELoss())
else:
raise NameError(
"Only L1 or L2 losses are allowed for regression tasks"
)
return criterion_dict
def init_normalizers(task_dict, device, resume=False):
if resume:
checkpoint = torch.load(resume, map_location=device)
normalizer_dict = {}
for task, state_dict in checkpoint["normalizer_dict"].items():
normalizer_dict[task] = Normalizer.from_state_dict(state_dict)
return normalizer_dict
normalizer_dict = {}
for target, task in task_dict.items():
# Select Task and Loss Function
if task == "regression":
normalizer_dict[target] = Normalizer()
else:
normalizer_dict[target] = None
return normalizer_dict
def train_ensemble(
model_class,
model_name,
run_id,
ensemble_folds,
epochs,
train_set,
val_set,
log,
data_params,
setup_params,
restart_params,
model_params,
loss_dict,
patience=None,
):
"""
Train multiple models
"""
train_generator = DataLoader(train_set, **data_params)
if val_set is not None:
data_params.update({"batch_size": 16 * data_params["batch_size"]})
val_generator = DataLoader(val_set, **data_params)
else:
val_generator = None
for j in range(ensemble_folds):
# this allows us to run ensembles in parallel rather than in series
# by specifying the run-id arg.
if ensemble_folds == 1:
j = run_id
model, optimizer, scheduler = init_model(
model_class=model_class,
model_name=model_name,
model_params=model_params,
run_id=j,
**setup_params,
**restart_params,
)
criterion_dict = init_losses(model.task_dict, loss_dict, model_params["robust"])
normalizer_dict = init_normalizers(
model.task_dict, setup_params["device"], restart_params["resume"]
)
for target, normalizer in normalizer_dict.items():
if normalizer is not None:
sample_target = torch.Tensor(
train_set.dataset.df[target].iloc[train_set.indices].values
)
if not restart_params["resume"]:
normalizer.fit(sample_target)
print(
f"Dummy MAE: {torch.mean(torch.abs(sample_target-normalizer.mean)):.4f}"
)
if log:
writer = SummaryWriter(
log_dir=(
f"runs/{model_name}/{model_name}-r{j}_{datetime.now():%d-%m-%Y_%H-%M-%S}"
)
)
else:
writer = None
if (val_set is not None) and (model.best_val_scores is None):
print("Getting Validation Baseline")
with torch.no_grad():
v_metrics = model.evaluate(
generator=val_generator,
criterion_dict=criterion_dict,
optimizer=None,
normalizer_dict=normalizer_dict,
action="val",
)
val_score = {}
for name, task in model.task_dict.items():
if task == "regression":
val_score[name] = v_metrics[name]["MAE"]
print(
f"Validation Baseline - {name}: MAE {val_score[name]:.3f}"
)
elif task == "classification":
val_score[name] = v_metrics[name]["Acc"]
print(
f"Validation Baseline - {name}: Acc {val_score[name]:.3f}"
)
model.best_val_scores = val_score
model.fit(
train_generator=train_generator,
val_generator=val_generator,
optimizer=optimizer,
scheduler=scheduler,
epochs=epochs,
criterion_dict=criterion_dict,
normalizer_dict=normalizer_dict,
model_name=model_name,
run_id=j,
writer=writer,
patience=patience,
)
@torch.no_grad()
def results_multitask( # noqa: C901
model_class,
model_name,
run_id,
ensemble_folds,
test_set,
data_params,
robust,
task_dict,
device,
eval_type="checkpoint",
print_results=True,
save_results=True,
):
"""
take an ensemble of models and evaluate their performance on the test set
"""
assert print_results or save_results, (
"Evaluating Model pointless if both 'print_results' and "
"'save_results' are False."
)
print(
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
"------------Evaluate model on Test Set------------\n"
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
)
test_generator = DataLoader(test_set, **data_params)
results_dict = {n: {} for n in task_dict}
for name, task in task_dict.items():
if task == "regression":
results_dict[name]["pred"] = np.zeros((ensemble_folds, len(test_set)))
if robust:
results_dict[name]["ale"] = np.zeros((ensemble_folds, len(test_set)))
elif task == "classification":
results_dict[name]["logits"] = []
results_dict[name]["pre-logits"] = []
if robust:
results_dict[name]["pre-logits_ale"] = []
for j in range(ensemble_folds):
if ensemble_folds == 1:
resume = f"models/{model_name}/{eval_type}-r{run_id}.pth.tar"
print("Evaluating Model")
else:
resume = f"models/{model_name}/{eval_type}-r{j}.pth.tar"
print(f"Evaluating Model {j + 1}/{ensemble_folds}")
assert os.path.isfile(resume), f"no checkpoint found at '{resume}'"
checkpoint = torch.load(resume, map_location=device)
assert (
checkpoint["model_params"]["robust"] == robust
), f"robustness of checkpoint '{resume}' is not {robust}"
assert (
checkpoint["model_params"]["task_dict"] == task_dict
), f"task_dict of checkpoint '{resume}' does not match current task_dict"
model = model_class(**checkpoint["model_params"], device=device)
model.to(device)
model.load_state_dict(checkpoint["state_dict"])
normalizer_dict = {}
for task, state_dict in checkpoint["normalizer_dict"].items():
if state_dict is not None:
normalizer_dict[task] = Normalizer.from_state_dict(state_dict)
else:
normalizer_dict[task] = None
y_test, output, *ids = model.predict(generator=test_generator)
# TODO should output also be a dictionary?
for pred, target, (name, task) in zip(output, y_test, model.task_dict.items()):
if task == "regression":
if model.robust:
mean, log_std = pred.chunk(2, dim=1)
pred = normalizer_dict[name].denorm(mean.data.cpu())
ale_std = torch.exp(log_std).data.cpu() * normalizer_dict[name].std
results_dict[name]["ale"][j, :] = ale_std.view(-1).numpy()
else:
pred = normalizer_dict[name].denorm(pred.data.cpu())
results_dict[name]["pred"][j, :] = pred.view(-1).numpy()
elif task == "classification":
if model.robust:
mean, log_std = pred.chunk(2, dim=1)
logits = (
sampled_softmax(mean, log_std, samples=10).data.cpu().numpy()
)
pre_logits = mean.data.cpu().numpy()
pre_logits_std = torch.exp(log_std).data.cpu().numpy()
results_dict[name]["pre-logits_ale"].append(pre_logits_std)
else:
pre_logits = pred.data.cpu().numpy()
logits = softmax(pre_logits, axis=1)
results_dict[name]["pre-logits"].append(pre_logits)
results_dict[name]["logits"].append(logits)
results_dict[name]["target"] = target
# TODO cleaner way to get identifier names
if save_results:
save_results_dict(
dict(zip(test_generator.dataset.dataset.identifiers, ids)),
results_dict,
model_name,
)
if print_results:
for name, task in task_dict.items():
print(f"\nTask: '{name}' on Test Set")
if task == "regression":
print_metrics_regression(**results_dict[name])
elif task == "classification":
print_metrics_classification(**results_dict[name])
return results_dict
def print_metrics_regression(target, pred, **kwargs):
"""print out metrics for a regression task
Args:
target (ndarray(n_test)): targets for regression task
pred (ndarray(n_ensemble, n_test)): model predictions
kwargs: unused entries from the results dictionary
"""
ensemble_folds = pred.shape[0]
res = pred - target
mae = np.mean(np.abs(res), axis=1)
mse = np.mean(np.square(res), axis=1)
rmse = np.sqrt(mse)
r2 = r2_score(
np.repeat(target[:, np.newaxis], ensemble_folds, axis=1),
pred.T,
multioutput="raw_values",
)
r2_avg = np.mean(r2)
r2_std = np.std(r2)
mae_avg = np.mean(mae)
mae_std = np.std(mae) / np.sqrt(mae.shape[0])
rmse_avg = np.mean(rmse)
rmse_std = np.std(rmse) / np.sqrt(rmse.shape[0])
if ensemble_folds == 1:
print("Model Performance Metrics:")
print(f"R2 Score: {r2_avg:.4f} ")
print(f"MAE: {mae_avg:.4f}")
print(f"RMSE: {rmse_avg:.4f}")
else:
print("Model Performance Metrics:")
print(f"R2 Score: {r2_avg:.4f} +/- {r2_std:.4f}")
print(f"MAE: {mae_avg:.4f} +/- {mae_std:.4f}")
print(f"RMSE: {rmse_avg:.4f} +/- {rmse_std:.4f}")
# calculate metrics and errors with associated errors for ensembles
y_ens = np.mean(pred, axis=0)
mae_ens = np.abs(target - y_ens).mean()
mse_ens = np.square(target - y_ens).mean()
rmse_ens = np.sqrt(mse_ens)
r2_ens = r2_score(target, y_ens)
print("\nEnsemble Performance Metrics:")
print(f"R2 Score : {r2_ens:.4f} ")
print(f"MAE : {mae_ens:.4f}")
print(f"RMSE : {rmse_ens:.4f}")
def print_metrics_classification(target, logits, average="macro", **kwargs):
"""print out metrics for a classification task
Args:
target (ndarray(n_test)): categorical encoding of the tasks
logits (ndarray(n_test, n_targets)): logits predicted by the model
kwargs: unused entries from the results dictionary
"""
acc = np.zeros(len(logits))
roc_auc = np.zeros(len(logits))
precision = np.zeros(len(logits))
recall = np.zeros(len(logits))
fscore = np.zeros(len(logits))
target_ohe = np.zeros_like(logits[0])
target_ohe[np.arange(target.size), target] = 1
for j, y_logit in enumerate(logits):
acc[j] = accuracy_score(target, np.argmax(y_logit, axis=1))
roc_auc[j] = roc_auc_score(target_ohe, y_logit, average=average)
precision[j], recall[j], fscore[j] = precision_recall_fscore_support(
target, np.argmax(logits[j], axis=1), average=average
)[:3]
if len(logits) == 1:
print("\nModel Performance Metrics:")
print(f"Accuracy : {acc[0]:.4f} ")
print(f"ROC-AUC : {roc_auc[0]:.4f}")
print(f"Weighted Precision : {precision[0]:.4f}")
print(f"Weighted Recall : {recall[0]:.4f}")
print(f"Weighted F-score : {fscore[0]:.4f}")
else:
acc_avg = np.mean(acc)
acc_std = np.std(acc) / np.sqrt(acc.shape[0])
roc_auc_avg = np.mean(roc_auc)
roc_auc_std = np.std(roc_auc) / np.sqrt(roc_auc.shape[0])
prec_avg = np.mean(precision)
prec_std = np.std(precision) / np.sqrt(precision.shape[0])
recall_avg = np.mean(recall)
recall_std = np.std(recall) / np.sqrt(recall.shape[0])
fscore_avg = np.mean(fscore)
fscore_std = np.std(fscore) / np.sqrt(fscore.shape[0])
print("\nModel Performance Metrics:")
print(f"Accuracy : {acc_avg:.4f} +/- {acc_std:.4f}")
print(f"ROC-AUC : {roc_auc_avg:.4f} +/- {roc_auc_std:.4f}")
print(f"Weighted Precision : {prec_avg:.4f} +/- {prec_std:.4f}")
print(f"Weighted Recall : {recall_avg:.4f} +/- {recall_std:.4f}")
print(f"Weighted F-score : {fscore_avg:.4f} +/- {fscore_std:.4f}")
# calculate metrics and errors with associated errors for ensembles
ens_logits = np.mean(logits, axis=0)
ens_acc = accuracy_score(target, np.argmax(ens_logits, axis=1))
ens_roc_auc = roc_auc_score(target_ohe, ens_logits, average=average)
ens_prec, ens_recall, ens_fscore = precision_recall_fscore_support(
target, np.argmax(ens_logits, axis=1), average=average
)[:3]
print("\nEnsemble Performance Metrics:")
print(f"Accuracy : {ens_acc:.4f} ")
print(f"ROC-AUC : {ens_roc_auc:.4f}")
print(f"Weighted Precision : {ens_prec:.4f}")
print(f"Weighted Recall : {ens_recall:.4f}")
print(f"Weighted F-score : {ens_fscore:.4f}")
def save_results_dict(ids, results_dict, model_name):
"""save the results to a file after model evaluation
Args:
idx ([str]): list of unique identifiers
comp ([str]): list of compositions
results_dict ({name: {col: data}}): nested dictionary of results
model_name (str): [description]
"""
results = {}
for name in results_dict:
for col, data in results_dict[name].items():
# NOTE we save pre_logits rather than logits due to fact
# that with the hetroskedastic setup we want to be able to
# sample from the gaussian distributed pre_logits we parameterise.
if "pre-logits" in col:
for n_ens, y_pre_logit in enumerate(data):
results.update(
{
f"{name}_{col}_c{lab}_n{n_ens}": val.ravel()
for lab, val in enumerate(y_pre_logit.T)
}
)
elif "pred" in col:
preds = {
f"{name}_{col}_n{n_ens}": val.ravel()
for (n_ens, val) in enumerate(data)
}
results.update(preds)
elif "ale" in col: # elif so that pre-logit-ale doesn't trigger
results.update(
{
f"{name}_{col}_n{n_ens}": val.ravel()
for (n_ens, val) in enumerate(data)
}
)
elif col == "target":
results.update({f"{name}_{col}": data})
df =
|
pd.DataFrame({**ids, **results})
|
pandas.DataFrame
|
import luigi
import os
import pandas as pd
from db import extract
from db import sql
from forecast import util
import shutil
import luigi.contrib.hadoop
from sqlalchemy import create_engine
from pysandag.database import get_connection_string
from pysandag import database
from db import log
class EmpPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return None
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
# db_run_id = log.new_run(name='emp_run_log', run_id=db_run_id['max'].iloc[0])
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
dem_sim_rates = extract.create_df('dem_sim_rates', 'dem_sim_rates_table',
rate_id=self.dem_id, index=None)
dem_sim_rates.to_hdf('temp/data.h5', 'dem_sim_rates', mode='a')
econ_sim_rates = extract.create_df('econ_sim_rates', 'econ_sim_rates_table',
rate_id=self.econ_id, index=None)
econ_sim_rates.to_hdf('temp/data.h5', 'econ_sim_rates', mode='a')
in_query = getattr(sql, 'inc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex', 'mildep'])
pop = pop.join(pop_mil)
pop['persons'] = (pop['persons'] - pop['mil_mildep'])
pop = pop.reset_index(drop=False)
pop['age_cat'] = ''
pop.loc[pop['age'].isin(list(range(0, 5))), ['age_cat']] = '00_04'
pop.loc[pop['age'].isin(list(range(5, 10))), ['age_cat']] = '05_09'
pop.loc[pop['age'].isin(list(range(10, 15))), ['age_cat']] = '10_14'
pop.loc[pop['age'].isin(list(range(15, 18))), ['age_cat']] = '15_17'
pop.loc[pop['age'].isin(list(range(18, 20))), ['age_cat']] = '18_19'
pop.loc[pop['age'].isin(list(range(20, 21))), ['age_cat']] = '20_20'
pop.loc[pop['age'].isin(list(range(21, 22))), ['age_cat']] = '21_21'
pop.loc[pop['age'].isin(list(range(22, 25))), ['age_cat']] = '22_24'
pop.loc[pop['age'].isin(list(range(25, 30))), ['age_cat']] = '25_29'
pop.loc[pop['age'].isin(list(range(30, 35))), ['age_cat']] = '30_34'
pop.loc[pop['age'].isin(list(range(35, 40))), ['age_cat']] = '35_39'
pop.loc[pop['age'].isin(list(range(40, 45))), ['age_cat']] = '40_44'
pop.loc[pop['age'].isin(list(range(45, 50))), ['age_cat']] = '45_49'
pop.loc[pop['age'].isin(list(range(50, 55))), ['age_cat']] = '50_54'
pop.loc[pop['age'].isin(list(range(55, 60))), ['age_cat']] = '55_59'
pop.loc[pop['age'].isin(list(range(60, 62))), ['age_cat']] = '60_61'
pop.loc[pop['age'].isin(list(range(62, 65))), ['age_cat']] = '62_64'
pop.loc[pop['age'].isin(list(range(65, 67))), ['age_cat']] = '65_66'
pop.loc[pop['age'].isin(list(range(67, 70))), ['age_cat']] = '67_69'
pop.loc[pop['age'].isin(list(range(70, 75))), ['age_cat']] = '70_74'
pop.loc[pop['age'].isin(list(range(75, 80))), ['age_cat']] = '75_79'
pop.loc[pop['age'].isin(list(range(80, 85))), ['age_cat']] = '80_84'
pop.loc[pop['age'].isin(list(range(85, 103))), ['age_cat']] = '85_99'
pop = pd.DataFrame(pop['persons'].groupby([pop['yr'], pop['age_cat'], pop['sex'], pop['race_ethn']]).sum())
pop.to_hdf('temp/data.h5', 'pop', mode='a')
class MilPopulation(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return EmpPopulation(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
db_connection_string = database.get_connection_string('model_config.yml', 'in_db')
sql_in_engine = create_engine(db_connection_string)
in_query = getattr(sql, 'max_run_id')
db_run_id = pd.read_sql(in_query, engine, index_col=None)
run_id = pd.Series([db_run_id['id'].iloc[0]])
run_id.to_hdf('temp/data.h5', 'run_id', mode='a')
tables = util.yaml_to_dict('model_config.yml', 'db_tables')
dem_sim_rates = pd.read_hdf('temp/data.h5', 'dem_sim_rates')
in_query = getattr(sql, 'inc_mil_gc_pop') % (tables['inc_pop_table'], run_id[0])
in_query2 = getattr(sql, 'inc_mil_hh_pop') % (tables['population_table'], dem_sim_rates.base_population_id[0])
pop = pd.read_sql(in_query, engine, index_col=['age', 'race_ethn', 'sex'])
pop_mil = pd.read_sql(in_query2, sql_in_engine, index_col=['age', 'race_ethn', 'sex'])
pop_mil = pop_mil.loc[pop_mil['mildep'] == 'Y']
pop = pop.join(pop_mil)
pop.rename(columns={'persons': 'mil_gc_pop'}, inplace=True)
pop.rename(columns={'mil_mildep': 'mil_hh_pop'}, inplace=True)
pop = pop.reset_index(drop=False)
pop = pd.DataFrame(pop[['mil_gc_pop', 'mil_hh_pop']].groupby([pop['yr']]).sum())
pop.to_hdf('temp/data.h5', 'mil_pop', mode='a')
class LaborForceParticipationRates(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return EmpPopulation(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
lfpr = extract.create_df('lfp_rates', 'lfp_rates_table', rate_id=econ_sim_rates.lfpr_id[0], index=['yr', 'age_cat', 'sex', 'race_ethn'])
lfpr.to_hdf('temp/data.h5', 'lfpr', mode='a')
class LaborForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return LaborForceParticipationRates(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
pop = pd.read_hdf('temp/data.h5', 'pop')
lfpr = pd.read_hdf('temp/data.h5', 'lfpr')
labor_force = pop.join(lfpr)
labor_force['labor_force'] = (labor_force['persons'] * labor_force['lfpr']).round()
labor_force = labor_force.iloc[~labor_force.index.get_level_values('age_cat').isin(['00_04', '05_09', '10_14'])]
labor_force.to_hdf('temp/data.h5', 'labor_force', mode='a')
class CohortUrRate(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return LaborForce(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
cohort_ur = extract.create_df('cohort_ur', 'cohort_ur_table', rate_id=econ_sim_rates.ur1_id[0], index=['yr', 'age_cat', 'sex', 'race_ethn'])
cohort_ur.to_hdf('temp/data.h5', 'cohort_ur', mode='a')
yearly_ur = extract.create_df('yearly_ur', 'yearly_ur_table', rate_id=econ_sim_rates.ur2_id[0], index=['yr'])
yearly_ur.to_hdf('temp/data.h5', 'yearly_ur', mode='a')
class WorkForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return CohortUrRate(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
labor_force = pd.read_hdf('temp/data.h5', 'labor_force')
cohort_ur = pd.read_hdf('temp/data.h5', 'cohort_ur')
yearly_ur = pd.read_hdf('temp/data.h5', 'yearly_ur')
work_force = labor_force.join(cohort_ur)
work_force['unemployed'] = (work_force['labor_force'] * work_force['ur2']).round()
computed_ur = work_force.reset_index(drop=False)
computed_ur = pd.DataFrame(computed_ur[['labor_force', 'unemployed']].groupby([computed_ur['yr']]).sum())
computed_ur['computed_ur'] = (computed_ur['unemployed'] / computed_ur['labor_force'])
computed_ur = computed_ur.join(yearly_ur)
computed_ur['adjustment'] = (computed_ur['ur1'] / computed_ur['computed_ur'])
work_force = work_force.join(computed_ur['adjustment'])
work_force['unemployed'] = (work_force['unemployed'] * work_force['adjustment']).round()
work_force['work_force'] = (work_force['labor_force'] - work_force['unemployed'])
work_force.to_hdf('temp/data.h5', 'work_force', mode='a')
# Code to check if after adjustment ur matches target
'''
computed_ur = work_force.reset_index(drop=False)
computed_ur = pd.DataFrame(computed_ur[['labor_force', 'unemployed']].groupby([computed_ur['yr']]).sum())
computed_ur['computed_ur'] = (computed_ur['unemployed'] / computed_ur['labor_force'])
print computed_ur
'''
class LocalWorkForce(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return WorkForce(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
out_commuting = extract.create_df('out_commuting', 'out_commuting_table', rate_id=econ_sim_rates.oc_id[0], index=['yr'])
work_force = pd.read_hdf('temp/data.h5', 'work_force')
work_force = work_force.reset_index(drop=False)
work_force = pd.DataFrame(work_force[['labor_force', 'unemployed', 'work_force']].groupby([work_force['yr']]).sum())
work_force = work_force.join(out_commuting)
work_force['work_force_outside'] = (work_force['work_force'] * work_force['wtlh_lh']).round()
work_force['work_force_local'] = (work_force['work_force'] - work_force['work_force_outside']).round()
work_force.to_hdf('temp/data.h5', 'work_force_local', mode='a')
class Jobs(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return LocalWorkForce(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
econ_sim_rates = pd.read_hdf('temp/data.h5', 'econ_sim_rates')
local_jobs = extract.create_df('local_jobs', 'local_jobs_table', rate_id=econ_sim_rates.lj_id[0], index=['yr'])
in_commuting = extract.create_df('in_commuting', 'in_commuting_table',rate_id=econ_sim_rates.ic_id[0], index=['yr'])
work_force_local = pd.read_hdf('temp/data.h5', 'work_force_local')
work_force_local = work_force_local.join(local_jobs)
work_force_local['jobs_local'] = (work_force_local['work_force_local'] * work_force_local['jlw']).round()
work_force_local = work_force_local.join(in_commuting)
work_force_local['jobs_total'] = (work_force_local['jobs_local'] * work_force_local['wh_whlh']).round()
work_force_local['jobs_external'] = (work_force_local['jobs_total'] - work_force_local['jobs_local']).round()
# pull information from here
work_force_local.to_hdf('temp/data.h5', 'jobs', mode='a')
class SectoralPay(luigi.Task):
econ_id = luigi.Parameter()
dem_id = luigi.Parameter()
def requires(self):
return Jobs(econ_id=self.econ_id, dem_id=self.dem_id)
def output(self):
return luigi.LocalTarget('temp/data.h5')
def run(self):
engine = create_engine(get_connection_string("model_config.yml", 'output_database'))
econ_sim_rates =
|
pd.read_hdf('temp/data.h5', 'econ_sim_rates')
|
pandas.read_hdf
|
# Packages
# Basic packages
import numpy as np
from scipy import integrate, stats, spatial
from scipy.special import expit, binom
import pandas as pd
import xlrd # help read excel files directly from source into pandas
import copy
import warnings
# Building parameter/computation graph
import inspect
from collections import OrderedDict
# OS/filesystem tools
import time
from datetime import datetime
import random
import string
import os
import shutil
import sys
import cloudpickle
# Distributed computing tools
import dask
import distributed
from dask.distributed import Client
from dask.distributed import as_completed
import itertools
# State Dimensions
# Health states (S, E and D are fixed to 1 dimension)
nI_symp = 2 # number of sympyomatic infected states
nI = 2+nI_symp # number of total infected states (disease stages), the +2 are Exposed and I_nonsymptomatic
nR = 2 # number of recovery states (antibody development post-disease, IgM and IgG are two stages)
nHS = 2+nI+nR # number of total health states, the +2: S, D are suspectible and dead
# Age groups (risk groups)
nAge = 9 # In accordance w Imperial #13 report (0-9, 10-19, ... 70-79, 80+)
# Isolation states
nIso = 4 # None/distancing, Case isolation, Hospitalised, Hospital staff
# Testing states
nTest = 4 # untested/negative, Virus positive, Antibody positive, Both positive
stateTensor = np.ones((nAge, nHS, nIso, nTest))
# Population (data from Imperial #13 ages.csv/UK)
agePopulationTotal = 1000.*np.array([8044.056,7642.473,8558.707,9295.024,8604.251,9173.465,7286.777,5830.635,3450.616])
#agePopulationTotal = 1000.*pd.read_csv("https://raw.githubusercontent.com/ImperialCollegeLondon/covid19model/master/data/ages.csv").iloc[3].values[2:]
# Currently: let's work with england population only instead of full UK, as NHS England + CHESS data is much clearer than other regions
agePopulationTotal *= 55.98/66.27 # (google england/uk population 2018, assuming age dist is similar)
agePopulationRatio = agePopulationTotal/np.sum(agePopulationTotal)
# Helper function to adjust average rates to age-aware rates
def adjustRatesByAge_KeepAverageRate(rate, ageRelativeAdjustment, agePopulationRatio=agePopulationRatio, maxOutRate=10):
"""This is a helper function and wont be picked up as a model parameter!"""
if rate == 0:
return np.zeros_like(ageRelativeAdjustment)
if rate >= maxOutRate:
warnings.warn("covidTesting::adjustRatesByAge_KeepAverageRate Input rate {} > maxOutRate {}, returning input rates".format(rate, maxOutRate))
return rate*np.ones_like(ageRelativeAdjustment)
out = np.zeros_like(ageRelativeAdjustment)
out[0] = maxOutRate+1 # just to start the while loop below
while np.sum(out>=maxOutRate)>0:
corrFactor = np.sum(agePopulationRatio/(1+ageRelativeAdjustment))
out = rate * (1+ageRelativeAdjustment) * corrFactor
if np.sum(out>=maxOutRate)>0:
warnings.warn("covidTesting::adjustRatesByAge_KeepAverageRate Adjusted rate larger than {} encountered, reducing ageAdjustment variance by 10%".format(maxOutRate))
tmp_mean = np.mean(ageRelativeAdjustment)
ageRelativeAdjustment = tmp_mean + np.sqrt(0.9)*(ageRelativeAdjustment-tmp_mean)
return out
# For calculations see data_cleaning_py.ipynb, calculations from NHS England dataset as per 05 Apr
relativeDeathRisk_given_COVID_by_age = np.array([-0.99742186, -0.99728639, -0.98158438, -0.9830432 , -0.82983414,
-0.84039294, 0.10768979, 0.38432409, 5.13754904])
#ageRelativeDiseaseSeverity = np.array([-0.8, -0.6, -0.3, -0.3, -0.1, 0.1, 0.35, 0.4, 0.5]) # FIXED (above) - this is a guess, find data and fix
#ageRelativeRecoverySpeed = np.array([0.2]*5+[-0.1, -0.2, -0.3, -0.5]) # TODO - this is a guess, find data and fix
ageRelativeRecoverySpeed = np.array([0.]*9) # For now we make it same for everyone, makes calculations easier
# For calculations see data_cleaning_py.ipynb, calculations from NHS England dataset as per 05 Apr
caseFatalityRatioHospital_given_COVID_by_age = np.array([0.00856164, 0.03768844, 0.02321319, 0.04282494, 0.07512237,
0.12550367, 0.167096 , 0.37953452, 0.45757006])
def trFunc_diseaseProgression(
# Basic parameters to adhere to
nonsymptomatic_ratio = 0.86,
# number of days between measurable events
infect_to_symptoms = 5.,
#symptom_to_death = 16.,
symptom_to_recovery = 10., # 20.5, #unrealiticly long for old people
symptom_to_hospitalisation = 5.76,
hospitalisation_to_recovery = 14.51,
IgG_formation = 15.,
# Age related parameters
# for now we'll assume that all hospitalised cases are known (overall 23% of hospitalised COVID patients die. 9% overall case fatality ratio)
caseFatalityRatioHospital_given_COVID_by_age = caseFatalityRatioHospital_given_COVID_by_age,
ageRelativeRecoverySpeed = ageRelativeRecoverySpeed,
# Unknown rates to estimate
nonsymp_to_recovery = 15.,
inverse_IS1_IS2 = 4.,
**kwargs
):
# Now we have all the information to build the age-aware multistage SIR model transition matrix
# The full transition tensor is a sparse map from the Age x HealthState x isolation state to HealthState,
# and thus is a 4th order tensor itself, representing a linear mapping
# from "number of people aged A in health state B and isolation state C to health state D.
trTensor_diseaseProgression = np.zeros((nAge, nHS, nIso, nHS))
# Use basic parameters to regularise inputs
E_IS1 = 1./infect_to_symptoms
# Numbers nonsymptomatic is assumed to be 86% -> E->IN / E-IS1 = 0.86/0.14
E_IN = 0.86/0.14 * E_IS1
# Nonsymptomatic recovery
IN_R1 = 1./nonsymp_to_recovery
IS1_IS2 = 1./inverse_IS1_IS2
IS2_R1 = 1./(symptom_to_recovery-inverse_IS1_IS2)
R1_R2 = 1./IgG_formation
# Disease progression matrix # TODO - calibrate (together with transmissionInfectionStage)
# rows: from-state, cols: to-state (non-symmetric!)
# - this represent excess deaths only, doesn't contain baseline deaths!
# Calculate all non-serious cases that do not end up in hospitals.
# Note that we only have reliable death data from hospitals (NHS England), so we do not model people dieing outside hospitals
diseaseProgBaseline = np.array([
# to: E, IN, IS1, IS2, R1, R2, D
[ 0 , E_IN, E_IS1, 0, 0, 0, 0 ], # from E
[ 0, 0, 0, 0, IN_R1, 0, 0 ], # from IN
[ 0 , 0, 0, IS1_IS2, 0, 0, 0 ], # from IS1
[ 0 , 0, 0, 0, IS2_R1, 0, 0 ], # from IS2
[ 0 , 0, 0, 0, 0, R1_R2, 0 ], # from R1
[ 0 , 0, 0, 0, 0, 0, 0 ], # from R2
[ 0 , 0, 0, 0, 0, 0, 0 ] # from D
])
ageAdjusted_diseaseProgBaseline = copy.deepcopy(np.repeat(diseaseProgBaseline[np.newaxis],nAge,axis=0))
# Modify all death and R1 rates:
for ii in range(ageAdjusted_diseaseProgBaseline.shape[1]):
# Adjust death rate by age dependent disease severity
ageAdjusted_diseaseProgBaseline[:,ii,-1] = adjustRatesByAge_KeepAverageRate(
ageAdjusted_diseaseProgBaseline[0,ii,-1],
ageRelativeAdjustment=relativeDeathRisk_given_COVID_by_age
)
# Adjust recovery rate by age dependent recovery speed
ageAdjusted_diseaseProgBaseline[:,ii,-3] = adjustRatesByAge_KeepAverageRate(
ageAdjusted_diseaseProgBaseline[0,ii,-3],
ageRelativeAdjustment=ageRelativeRecoverySpeed,
agePopulationRatio=agePopulationRatio
)
ageAdjusted_diseaseProgBaseline_Hospital = copy.deepcopy(ageAdjusted_diseaseProgBaseline)
# Calculate hospitalisation based rates, for which we do have data. Hospitalisation can end up with deaths
# Make sure that the ratio of recoveries in hospital honour the case fatality ratio appropriately
# IS2 -> death
ageAdjusted_diseaseProgBaseline_Hospital[:,3,-1] = (
# IS2 -> recovery
ageAdjusted_diseaseProgBaseline_Hospital[:,3,-3] * (
# multiply by cfr / (1-cfr) to get correct rate towards death
caseFatalityRatioHospital_given_COVID_by_age/(
1 - caseFatalityRatioHospital_given_COVID_by_age)
)
)
# TODO - time to death might be incorrect overall without an extra delay state, especially for young people
# Non-hospitalised disease progression
for i1 in [0,1,3]:
trTensor_diseaseProgression[:,1:,i1,1:] = ageAdjusted_diseaseProgBaseline
# hospitalised disease progression
trTensor_diseaseProgression[:,1:,2,1:] = ageAdjusted_diseaseProgBaseline_Hospital
return trTensor_diseaseProgression
# Larger data driver approaches, with age distribution, see data_cleaning_R.ipynb for details
ageHospitalisationRateBaseline = pd.read_csv('../data/clean_hosp-epis-stat-admi-summ-rep-2015-16-rep_table_6.csv', sep=',').iloc[:,-1].values
ageHospitalisationRecoveryRateBaseline = 1./pd.read_csv('../data/clean_10641_LoS_age_provider_suppressed.csv', sep=',').iloc[:,-1].values
# Calculate initial hospitalisation (occupancy), that will be used to initialise the model
initBaselineHospitalOccupancyEquilibriumAgeRatio = ageHospitalisationRateBaseline/(ageHospitalisationRateBaseline+ageHospitalisationRecoveryRateBaseline)
# Take into account the NHS work-force in hospitals that for our purposes count as "hospitalised S" population,
# also unaffected by quarantine measures
ageNhsClinicalStaffPopulationRatio = pd.read_csv('../data/clean_nhsclinicalstaff.csv', sep=',').iloc[:,-1].values
# Extra rate of hospitalisation due to COVID-19 infection stages
# TODO - find / estimate data on this (unfortunately true rates are hard to get due to many unknown cases)
# Symptom to hospitalisation is 5.76 days on average (Imperial #8)
infToHospitalExtra = np.array([1e-4, 1e-3, 2e-2, 1e-2])
# We do know at least how age affects these risks:
# For calculations see data_cleaning_py.ipynb, calculations from CHESS dataset as per 05 Apr
relativeAdmissionRisk_given_COVID_by_age = np.array([-0.94886625, -0.96332087, -0.86528671, -0.79828999, -0.61535305,
-0.35214767, 0.12567034, 0.85809052, 3.55950368])
riskOfAEAttandance_by_age = np.array([0.41261361, 0.31560648, 0.3843979 , 0.30475704, 0.26659415,
0.25203475, 0.24970244, 0.31549102, 0.65181376])
# Build the transition tensor from any non-hospitalised state to a hospitalised state
# (being in home quarantine is assumed to affect only the infection probability [below], not the hospitalisation probability)
# caseIsolationHospitalisationRateAdjustment = 1.
# This function takes as input the number of people in given age and health state, and in any non-hospitalised state
# and returns the number of people staying in the same age and health state,
# but now hospitalised (the rest of people remain in whatever state they were in)
def trFunc_HospitalAdmission(
ageHospitalisationRateBaseline = ageHospitalisationRateBaseline,
infToHospitalExtra = infToHospitalExtra,
ageRelativeExtraAdmissionRiskToCovid = relativeAdmissionRisk_given_COVID_by_age * riskOfAEAttandance_by_age,
**kwargs
):
# This tensor will pointwise multiply an nAge x nHS slice of the stateTensor
trTensor_HospitalAdmission = np.zeros((nAge, nHS))
ageAdjusted_infToHospitalExtra = copy.deepcopy(np.repeat(infToHospitalExtra[np.newaxis],nAge,axis=0))
for ii in range(ageAdjusted_infToHospitalExtra.shape[1]):
# Adjust death rate by age dependent disease severity
ageAdjusted_infToHospitalExtra[:,ii] = adjustRatesByAge_KeepAverageRate(
infToHospitalExtra[ii],
ageRelativeAdjustment=ageRelativeExtraAdmissionRiskToCovid
)
# Add baseline hospitalisation to all non-dead states
trTensor_HospitalAdmission[:,:-1] += np.expand_dims(ageHospitalisationRateBaseline,-1)
# Add COVID-caused hospitalisation to all infected states (TODO: This is summation of rates for independent processes, should be correct, but check)
trTensor_HospitalAdmission[:,1:(nI+1)] += ageAdjusted_infToHospitalExtra
return trTensor_HospitalAdmission
def trFunc_HospitalDischarge(
ageHospitalisationRecoveryRateBaseline = ageHospitalisationRecoveryRateBaseline,
dischargeDueToCovidRateMultiplier = 3.,
**kwargs
):
trTensor_HospitalDischarge = np.zeros((nAge, nHS))
# Baseline discharges apply to all non-symptomatic patients (TODO: take into account testing state!)
trTensor_HospitalDischarge[:, :3] += ageHospitalisationRecoveryRateBaseline[:,np.newaxis]
# No discharges for COVID symptomatic people from the hospital until they recover
# TODO - check with health experts if this is correct assumption; probably also depends on testing state
trTensor_HospitalDischarge[:, 3:5] = 0.
trTensor_HospitalDischarge[:, 5:7] = dischargeDueToCovidRateMultiplier * ageHospitalisationRecoveryRateBaseline[:,np.newaxis]
return trTensor_HospitalDischarge
ageSocialMixingBaseline = pd.read_csv('../data/socialcontactdata_UK_Mossong2008_social_contact_matrix.csv', sep=',').iloc[:,1:].values
ageSocialMixingBaseline = (ageSocialMixingBaseline+ageSocialMixingBaseline.T)/2.
ageSocialMixingDistancing = pd.read_csv('../data/socialcontactdata_UK_Mossong2008_social_contact_matrix_with_distancing.csv', sep=',').iloc[:,1:].values
ageSocialMixingDistancing = (ageSocialMixingDistancing+ageSocialMixingDistancing.T)/2.
ageSocialMixingIsolation = np.zeros_like(ageSocialMixingBaseline)
elevatedMixingRatioInHospital = 3.0
withinHospitalSocialMixing = elevatedMixingRatioInHospital * np.sum(np.dot(agePopulationRatio, ageSocialMixingBaseline))
transmissionInfectionStage = np.array([0.001, 0.1, 0.6, 0.5])
def trFunc_newInfections_Complete(
stateTensor,
policySocialDistancing, # True / False, no default because it's important to know which one we use at any moment!
policyImmunityPassports, # True / False, no default because it's important to know which one we use at any moment!
ageSocialMixingBaseline = ageSocialMixingBaseline,
ageSocialMixingDistancing = ageSocialMixingDistancing,
ageSocialMixingIsolation = ageSocialMixingIsolation,
withinHospitalSocialMixing = withinHospitalSocialMixing,
transmissionInfectionStage = transmissionInfectionStage,
**kwargs
):
ageIsoContractionRate = np.zeros((nAge, nIso, nTest))
# Add non-hospital infections
#--------------------------------
curNonIsolatedSocialMixing = ageSocialMixingDistancing if policySocialDistancing else ageSocialMixingBaseline
# Add baseline interactions only between non-isolated people
for k1 in [0,3]:
for k2 in [0,3]:
ageIsoContractionRate[:,k1,:] += np.expand_dims(
np.matmul(
curNonIsolatedSocialMixing,
np.einsum('ijl,j->i',
stateTensor[:,1:(nI+1),k2,:], transmissionInfectionStage) # all infected in non-isolation
),
axis=1
)
if policyImmunityPassports:
# If the immunity passports policy is on, everyone who tested antibody positive, can roam freely
# Therefore replace the interactions between people with testingState = 2 with ageSocialMixingBaseline
# we do this by using the distributive property of matrix multiplication, and adding extra interactions
# "ageSocialMixingBaseline"-"curNonIsolatedSocialMixing" with each other (this is zero if no social distancing!)
# TODO - this is a bit hacky?, but probably correct - double check though!
for k1 in [0,3]:
for k2 in [0,3]:
ageIsoContractionRate[:,k1,2:] += np.matmul(
ageSocialMixingBaseline-curNonIsolatedSocialMixing,
np.einsum('ijk,j->ik',
stateTensor[:,1:(nI+1),k2,2:], transmissionInfectionStage) # all infected in non-isolation
)
# Add isolation interactions only between isolated and non-isolated people
# non-isolated contracting it from isolated
for k1 in [0,3]:
ageIsoContractionRate[:,k1,:] += np.expand_dims(
np.matmul(
ageSocialMixingIsolation,
np.einsum('ijl,j->i',
stateTensor[:,1:(nI+1),1,:], transmissionInfectionStage) # all infected in isolation
),
axis=1
)
# isolated contracting it from non-isolated
for k1 in [0,3]:
ageIsoContractionRate[:,1,:] += np.expand_dims(
np.matmul(
ageSocialMixingIsolation,
np.einsum('ijl,j->i',
stateTensor[:,1:(nI+1),k1,:], transmissionInfectionStage) # all infected in non-hospital, non-isolation
),
axis = 1
)
# isolated cannot contracting it from another isolated
# Add in-hospital infections (of hospitalised patients, and staff)
#--------------------------------
# (TODO - within hospitals we probably want to take into effect the testing state;
# tested people are better isolated and there's less mixing)
ageIsoContractionRate[:,2:,:] += np.expand_dims(
withinHospitalSocialMixing *
np.einsum('ijkl,j->i',
stateTensor[:,1:(nI+1),2:,:], transmissionInfectionStage), # all infected in hospital (sick or working)
axis = (1,2))
return ageIsoContractionRate/np.sum(stateTensor) # Normalise the rate by total population
def trFunc_travelInfectionRate_ageAdjusted(
t, # Time (int, in days) within simulation
travelMaxTime = 200,
travelBaseRate = 5e-4, # How many people normally travel back to the country per day # TODO - get data
travelDecline_mean = 15.,
travelDecline_slope = 1.,
travelInfection_peak = 1e-1,
travelInfection_maxloc = 10.,
travelInfection_shape = 2.,
**kwargs
):
tmpTime = np.arange(travelMaxTime)
# nAge x T TODO get some realistic data on this
travelAgeRateByTime = travelBaseRate * np.outer(agePopulationRatio, 1-expit((tmpTime-travelDecline_mean)/travelDecline_slope))
# 1 x T TODO get some realistic data on this, maybe make it age weighted
travelContractionRateByTime = stats.gamma.pdf(tmpTime, a=travelInfection_shape, loc=0., scale=travelInfection_maxloc/(travelInfection_shape-1))
travelContractionRateByTime = travelInfection_peak*travelContractionRateByTime/np.max(travelContractionRateByTime)
if t >= travelAgeRateByTime.shape[-1]:
return np.zeros(travelAgeRateByTime.shape[0])
else:
return travelAgeRateByTime[:,int(t)] * travelContractionRateByTime[int(t)]
def inpFunc_testSpecifications(
PCR_FNR_I1_to_R2 = np.array([ 0.9, 0.4, 0.15, 0.35, 0.5, 0.8]),
PCR_FPR = 0.01,
antigen_FNR_I1_to_R2 = np.array([ 0.95, 0.6, 0.35, 0.45, 0.6, 0.9]),
antigen_FPR = 0.1,
antibody_FNR_I1_to_R2 = np.array([0.99, 0.85, 0.8, 0.65, 0.3, 0.05]),
antibody_FPR_S_to_I4 = np.array([0.05, 0.04, 0.03, 0.02, 0.01])
):
testSpecifications = pd.DataFrame(
columns=["Name"],#, "Infection stage"],#, "Sensitivity", "Specificity"],
data = (
["PCR"] * nHS +
["Antigen"] * (nHS) +
["Antibody"] * (nHS))
)
testSpecifications['OutputTestState'] = [1]*nHS + [1]*nHS + [2]*nHS # what information state does a pos test transition you to.
testSpecifications['TruePosHealthState'] = [np.arange(1,nI+1)]*nHS + [np.arange(1,nI+1)]*nHS + [np.arange(nI+1,nI+nR+1)]*nHS # what information state does a pos test transition you to.
# In some health states some people are true negatives and some are true positives! (No, makes litte sense to use, just account for it in FPR? Only matters for test makers...)
# testSpecifications['AmbiguousPosHealthState'] = [np.arange(nI+1, nI+nR+1)]*nHS + [np.arange(nI+1, nI+nR+1)]*nHS + [np.arange(1, nI+1)]*nHS # what information state does a pos test transition you to.
testSpecifications['InputHealthState'] = list(np.tile(range(nHS),3))
# These numbers below are "defaults" illustrating the concept, but are modified by the inputs!!!
testSpecifications['FalseNegativeRate'] = [ # ratio of positive (infected / immune) people missed by the test
# For each health stage:
# S -> I1 (asymp) -> I2 (mild symp) -> I3 (symp, sick) -> I4 (symp, less sick) -> R1 / R2 (IgM, IgG avail) -> D
# PCR
0., 0.9, 0.4, 0.15, 0.35, 0.5, 0.8, 0.,
# Antigen
0., 0.95, 0.6, 0.35, 0.45, 0.6, 0.9, 0.,
# Antibody
0., 0.99, 0.85, 0.8, 0.65, 0.3, 0.05, 0.
]
testSpecifications.loc[1:6,'FalseNegativeRate'] = PCR_FNR_I1_to_R2
testSpecifications.loc[9:14,'FalseNegativeRate'] = antigen_FNR_I1_to_R2
testSpecifications.loc[17:22,'FalseNegativeRate'] = antibody_FNR_I1_to_R2
testSpecifications['FalsePositiveRate'] = [ # ratio of negative (non-infected or not immune) people deemed positive by the test
# PCR
0.01, 0.,0.,0.,0., 0.01, 0.01, 0.,
# Antigen
0.1, 0.,0.,0.,0., 0.1, 0.1, 0.,
# Antibody
0.05, 0.04, 0.03, 0.02, 0.01, 0., 0., 0.
]
testSpecifications.loc[0,'FalsePositiveRate'] = PCR_FPR
testSpecifications.loc[5:6,'FalsePositiveRate'] = PCR_FPR
testSpecifications.loc[8,'FalsePositiveRate'] = antigen_FPR
testSpecifications.loc[13:14,'FalsePositiveRate'] = antigen_FPR
testSpecifications.loc[16:20,'FalsePositiveRate'] = antibody_FPR_S_to_I4
name = testSpecifications['Name']
truePosHealthState = testSpecifications['TruePosHealthState']
testSpecifications.drop(['Name', 'TruePosHealthState'], inplace=True, axis=1)
testSpecifications = testSpecifications.to_numpy()
name = name.to_numpy()
truePosHealthState = truePosHealthState.to_numpy()
return testSpecifications, name, truePosHealthState
def trFunc_testCapacity(
realTime, # time within simulation (day)
# PCR capacity - initial
testCapacity_pcr_phe_total = 1e4,
testCapacity_pcr_phe_inflexday = pd.to_datetime("2020-03-25", format="%Y-%m-%d"),
testCapacity_pcr_phe_inflexslope = 5.,
# PCR capacity - increased
testCapacity_pcr_country_total = 1e5,
testCapacity_pcr_country_inflexday = pd.to_datetime("2020-04-25", format="%Y-%m-%d"),
testCapacity_pcr_country_inflexslope = 10,
# Antibody / antigen capacity
testCapacity_antibody_country_firstday = pd.to_datetime("2020-04-25", format="%Y-%m-%d"),
testCapacity_antibody_country_total = 5e6,
testCapacity_antibody_country_inflexday = pd.to_datetime("2020-05-20", format="%Y-%m-%d"),
testCapacity_antibody_country_inflexslope = 20,
testCapacity_antigenratio_country = 0.7,
**kwargs
):
# Returns a dictionary with test names and number available at day "t"
outPCR = (
#phe phase
testCapacity_pcr_phe_total * expit((realTime-testCapacity_pcr_phe_inflexday).days/testCapacity_pcr_phe_inflexslope)
+
#whole country phase
testCapacity_pcr_country_total * expit((realTime-testCapacity_pcr_country_inflexday).days/testCapacity_pcr_country_inflexslope)
)
if realTime<testCapacity_antibody_country_firstday:
outAntiTotal = 0.
else:
outAntiTotal = (
testCapacity_antibody_country_total * expit((realTime-testCapacity_antibody_country_inflexday).days/testCapacity_antibody_country_inflexslope)
)
return {
"PCR": outPCR,
"Antigen": outAntiTotal*testCapacity_antigenratio_country,
"Antibody": outAntiTotal*(1-testCapacity_antigenratio_country)
}
# To test the function, in runtests.jl
py_rTime =
|
pd.to_datetime("2020-05-25", format="%Y-%m-%d")
|
pandas.to_datetime
|
# source:
# https://stackoverflow.com/questions/6620471/fitting-empirical-distribution-to-theoretical-ones-with-scipy-python
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.stats as st
from tqdm import tqdm
mpl.rcParams["figure.figsize"] = (16.0, 12.0)
plt.style.use("ggplot")
# Create models from data
def best_fit_distribution(data, bins=200, ax=None):
"""Find the best fitting distribution to the data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [
st.alpha,
st.anglit,
st.arcsine,
st.argus,
st.beta,
st.betaprime,
st.bradford,
st.burr,
st.burr12,
st.cauchy,
st.chi,
st.chi2,
st.cosine,
st.crystalball,
st.dgamma,
st.dweibull,
st.erlang,
st.expon,
st.exponnorm,
st.exponweib,
st.exponpow,
st.f,
st.fatiguelife,
st.fisk,
st.foldcauchy,
st.foldnorm,
st.genlogistic,
st.gennorm,
st.genpareto,
st.genexpon,
st.genextreme,
st.gausshyper,
st.gamma,
st.gengamma,
st.genhalflogistic,
st.geninvgauss,
st.gilbrat,
st.gompertz,
st.gumbel_r,
st.gumbel_l,
st.halfcauchy,
st.halflogistic,
st.halfnorm,
st.halfgennorm,
st.hypsecant,
st.invgamma,
st.invgauss,
st.invweibull,
st.johnsonsb,
st.johnsonsu,
st.kappa4,
st.kappa3,
st.ksone,
st.kstwo,
st.kstwobign,
st.laplace,
st.laplace_asymmetric,
st.levy,
st.levy_l,
# st.levy_stable, # unstable in v1.6.0
st.logistic,
st.loggamma,
st.loglaplace,
st.lognorm,
st.loguniform,
st.lomax,
st.maxwell,
st.mielke,
st.moyal,
st.nakagami,
st.ncx2,
st.ncf,
st.nct,
st.norm,
st.norminvgauss,
st.pareto,
st.pearson3,
st.powerlaw,
st.powerlognorm,
st.powernorm,
st.rdist,
st.rayleigh,
st.rice,
st.recipinvgauss,
st.semicircular,
st.skewnorm,
st.t,
st.trapezoid,
st.triang,
st.truncexpon,
st.truncnorm,
st.tukeylambda,
st.uniform,
# st.vonmises, # does not work in v1.6.0
st.vonmises_line,
st.wald,
st.weibull_min,
st.weibull_max,
st.wrapcauchy,
]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in tqdm(DISTRIBUTIONS):
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if ax is passed, add to plot
try:
if ax:
|
pd.Series(pdf, x)
|
pandas.Series
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import requests
import io
from alphacast import Alphacast
from dotenv import dotenv_values
API_KEY = dotenv_values(".env").get("API_KEY")
alphacast = Alphacast(API_KEY)
months = ['12', '11', '10', '09', '08', '07', '06', '05', '04', '03', '02', '01']
for month in months:
print('Trying month number:', month)
url = 'https://www.indec.gob.ar//ftp/cuadros/economia/sh_oferta_demanda_{}_21.xls'.format(month)
try:
sheetnames = ['cuadro 1', 'cuadro 8']
dfParcial = pd.DataFrame([])
for sheet in sheetnames:
df = pd.read_excel(url, skiprows=3, sheet_name=sheet)
maxindex = df[df[df.columns[0]] == '(1) Datos provisorios.'].index[0]
df = df[0:maxindex]
df = df.set_index(df.columns[0])
df= df.T
df['Date'] = df.index
def replaceWithNan(x):
x= str(x)
if "Unnamed:" in x:
x=np.nan
return x
def replaceOthers(x):
x=str(x)
x=x.replace("(1)", '')
x=x.replace("(2)", '')
x=x.replace("(3)", '')
x=x.replace("(4)", '')
return x
df['Date'] = df['Date'].apply(lambda x: replaceWithNan(x))
df['Date'] = df['Date'].fillna(method='ffill')
df['Date'] = df['Date'].apply(lambda x: replaceOthers(x))
df = df.rename(columns = {df.columns[0]: 'quarter'})
df = df.dropna(axis=1, how='all')
df = df.dropna(axis=0, how='all', subset= df.columns[1:-1])
df = df.reset_index()
del df['index']
def replaceQuarter(x):
x=str(x)
if x == '1º trimestre':
x='01-01'
elif x == '2º trimestre':
x='04-01'
elif x == '3º trimestre':
x='07-01'
elif x == '4º trimestre':
x='10-01'
elif x == 'Total':
x=np.nan
return x
df['quarter'] = df['quarter'].apply(lambda x: replaceQuarter(x))
df = df.dropna(how='all', subset= df.columns[2:-1])
df = df.dropna(how='any', subset=['quarter'])
df['Date'] = df['Date'].apply(lambda x: x.replace(' ', ''))
df['Date'] = df['Date'] + '-' + df['quarter'].astype(str)
df['Date'] = df['Date'].apply(lambda x: x.replace('.0', ''))
del df['quarter']
df.reset_index()
# df.Date.values
#df['Date']=pd.to_datetime(df['Date'])
df['Date'] = pd.to_datetime(df['Date'], errors="coerce")
df = df.set_index('Date')
newCols=[]
for col in df.columns:
if col != 'Date':
if sheet == 'cuadro 1':
colname = "[P2004]-" + col
newCols += [colname]
elif sheet == 'cuadro 8':
colname = "[CP]-" + col
newCols += [colname]
df.columns = newCols
if sheet=='cuadro 1':
dfParcial = df
else:
dfParcial = dfParcial.merge(df, how='left', left_index=True, right_index=True)
print('We have a match in month number', month)
break
except:
print('There are no updated data for month', month)
pass
dfParcial['[P2004]-Objetos valiosos'] = dfParcial['[P2004]-Objetos valiosos'].fillna(0)
dfParcial['[P2004]-Discrepancia estadística (4)'] = dfParcial['[P2004]-Discrepancia estadística (4)'].fillna(0)
dfParcial['[P2004]-Discrepancia estadística (4)'] = dfParcial['[P2004]-Discrepancia estadística (4)'].str.replace('///','0')
months = ['12', '11', '10', '09', '08', '07', '06', '05', '04', '03', '02', '01']
for month in months:
print('Trying month number:', month)
url = 'https://www.indec.gob.ar//ftp/cuadros/economia/sh_oferta_demanda_desest_{}_21.xls'.format(month)
r = requests.get(url, allow_redirects=False, verify=False)
try:
with io.BytesIO(r.content) as dframe:
df = pd.read_excel(dframe, sheet_name='desestacionalizado n', skiprows=3)
df = df.rename(columns = {'Año': 'Date'})
df['Date'] = df['Date'].fillna(method='ffill')
df = df.rename(columns = {df.columns[1]: 'quarter'})
df = df.dropna(axis=1, how='all')
df = df.dropna(axis=0, how='any', subset= df.columns[1:-1])
def replaceQuarter(x):
x= str(x)
x= x.lower()
if x == 'i':
x='01-01'
elif x == 'ii':
x='04-01'
elif x == 'iii':
x='07-01'
elif x == 'iv':
x='10-01'
elif x == 'Total':
x=np.nan
return x
df['quarter'] = df['quarter'].apply(lambda x: replaceQuarter(x))
df['Date'] = df['Date'].astype(str) + '-' + df['quarter'].astype(str)
df.Date.values
del df['quarter']
#df['Date'] = df['Date'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df['Date'] =
|
pd.to_datetime(df['Date'], errors="coerce")
|
pandas.to_datetime
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp =
|
Index(exp_data)
|
pandas.Index
|
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
cols = right.columns[[1, 2, 3, 5]]
right[cols] = right[cols].astype(df["C"].dtype)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) MultiIndex bug
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.randn(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
for multiindex_columns in (
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
dtype=df.dtypes[0],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("labels", [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
(list("zyx"), [14, 15, 12, 13, 10, 11]),
],
)
def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
# GH-36991
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
df = DataFrame([sorted(data)], columns=midx)
result = df.stack([0, 1])
s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered)
expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2]))
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = DataFrame({"A": cat, "B": cat})
result = df.stack()
index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, columns",
[
([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
],
)
def test_stack_multi_columns_non_unique_index(self, index, columns):
# GH-28301
df = DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack()
new_index = MultiIndex.from_tuples(stacked.index.to_numpy())
expected = DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])
df = DataFrame(
{
"A": pd.array([0, 1, None], dtype="Int64"),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])
df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = DataFrame(
[[3, 1, 2, 0]],
columns=MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = Series(["a", "b", "c", "a"], dtype="object")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=Index(["a"], name="a"),
columns=MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_timezone_aware_values():
# GH 19420
ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York")
df = DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
expected = Series(
ts,
index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_stack_empty_frame(dropna):
# GH 36113
expected = Series(index=MultiIndex([[], []], [[], []]), dtype=np.float64)
result = DataFrame(dtype=np.float64).stack(dropna=dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_stack_unstack_empty_frame(dropna, fill_value):
# GH 36113
result = (
DataFrame(dtype=np.int64).stack(dropna=dropna).unstack(fill_value=fill_value)
)
expected = DataFrame(dtype=np.int64)
tm.assert_frame_equal(result, expected)
def test_unstack_single_index_series():
# GH 36113
msg = r"index must be a MultiIndex to unstack.*"
with pytest.raises(ValueError, match=msg):
Series(dtype=np.int64).unstack()
def test_unstacking_multi_index_df():
# see gh-30740
df = DataFrame(
{
"name": ["Alice", "Bob"],
"score": [9.5, 8],
"employed": [False, True],
"kids": [0, 0],
"gender": ["female", "male"],
}
)
df = df.set_index(["name", "employed", "kids", "gender"])
df = df.unstack(["gender"], fill_value=0)
expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)
result = df.unstack(["employed", "kids"], fill_value=0)
expected = DataFrame(
[[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],
index=Index(["Alice", "Bob"], name="name"),
columns=MultiIndex.from_tuples(
[
("score", "female", False, 0),
("score", "female", True, 0),
("score", "male", False, 0),
("score", "male", True, 0),
],
names=[None, "gender", "employed", "kids"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_positional_level_duplicate_column_names():
# https://github.com/pandas-dev/pandas/issues/36353
columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"])
df = DataFrame([[1, 1, 1, 1]], columns=columns)
result = df.stack(0)
new_columns = Index(["y", "z"], name="a")
new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"])
expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns)
tm.assert_frame_equal(result, expected)
class TestStackUnstackMultiLevel:
def test_unstack(self, multiindex_year_month_day_dataframe_random_data):
# just check that it works for now
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack()
unstacked.unstack()
# test that ints work
ymd.astype(int).unstack()
# test that int32 work
ymd.astype(np.int32).unstack()
@pytest.mark.parametrize(
"result_rows,result_columns,index_product,expected_row",
[
(
[[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]],
["ix1", "ix2", "col1", "col2", "col3", "col4"],
2,
[None, None, 30.0, None],
),
(
[[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
2,
[None, None, 30.0],
),
(
[[1, 1, None, None, 30.0], [2, None, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
None,
[None, None, 30.0],
),
],
)
def test_unstack_partial(
self, result_rows, result_columns, index_product, expected_row
):
# check for regressions on this issue:
# https://github.com/pandas-dev/pandas/issues/19351
# make sure DataFrame.unstack() works when its run on a subset of the DataFrame
# and the Index levels contain values that are not present in the subset
result = DataFrame(result_rows, columns=result_columns).set_index(
["ix1", "ix2"]
)
result = result.iloc[1:2].unstack("ix2")
expected = DataFrame(
[expected_row],
columns=MultiIndex.from_product(
[result_columns[2:], [index_product]], names=[None, "ix2"]
),
index=Index([2], name="ix1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples(
[(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)]
)
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
def test_stack(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
# regular roundtrip
unstacked = ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
unlexsorted = ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
# columns unsorted
unstacked = ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
# more than 2 levels in the columns
unstacked = ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = ymd.unstack(0).stack(-2)
expected = ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert left.index.is_unique is False
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(
np.arange(12).reshape(4, 3),
index=list("abab"),
columns=["1st", "2nd", "3rd"],
)
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd", "3rd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile(np.arange(3), 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ["1st", "2nd", "1st"]
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ("a", 2), ("b", 1), ("a", 1), ("b", 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(
levels=[["a", "b"], [1, 2], ["1st", "2nd"]],
codes=[
np.tile(np.arange(2).repeat(3), 2),
np.repeat([1, 0, 1], [3, 6, 3]),
np.tile([0, 1, 0], 4),
],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thu,Dinner,No,3.0,1
Thu,Lunch,No,117.32,44
Thu,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df["foo", "four"] = "foo"
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df["foo"].stack().sort_index()
tm.assert_series_equal(stacked["foo"], result, check_names=False)
assert result.name is None
assert stacked["bar"].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame(
{
"state": ["naive", "naive", "naive", "active", "active", "active"],
"exp": ["a", "b", "b", "b", "a", "a"],
"barcode": [1, 2, 3, 4, 1, 3],
"v": ["hi", "hi", "bye", "bye", "bye", "peace"],
"extra": np.arange(6.0),
}
)
result = df.groupby(["state", "exp", "barcode", "v"]).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack()
assert unstacked.index.name == "first"
assert unstacked.columns.names == ["exp", "second"]
restacked = unstacked.stack()
assert restacked.index.names == frame.index.names
@pytest.mark.parametrize("method", ["stack", "unstack"])
def test_stack_unstack_wrong_level_name(
self, method, multiindex_dataframe_random_data
):
# GH 18303 - wrong level name should raise
frame = multiindex_dataframe_random_data
# A DataFrame with flat axes:
df = frame.loc["foo"]
with pytest.raises(KeyError, match="does not match index name"):
getattr(df, method)("mistake")
if method == "unstack":
# Same on a Series:
s = df.iloc[:, 0]
with pytest.raises(KeyError, match="does not match index name"):
getattr(s, method)("mistake")
def test_unstack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.unstack("second")
expected = frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack("second")
result = unstacked.stack("exp")
expected = frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = frame.stack("exp")
expected = frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
expected = ymd.unstack("year").unstack("month")
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = ymd["A"]
s_unstacked = s.unstack(["year", "month"])
tm.assert_frame_equal(s_unstacked, expected["A"])
restacked = unstacked.stack(["year", "month"])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, ymd)
assert restacked.index.names == ymd.index.names
# GH #451
unstacked = ymd.unstack([1, 2])
expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
unstacked = ymd.unstack([2, 1])
expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
def test_stack_names_and_numbers(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
# Can't use mixture of names and numbers to stack
with pytest.raises(ValueError, match="level should contain"):
unstacked.stack([0, "month"])
def test_stack_multiple_out_of_bounds(
self, multiindex_year_month_day_dataframe_random_data
):
# nlevels == 3
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
with pytest.raises(IndexError, match="Too many levels"):
unstacked.stack([2, 3])
with pytest.raises(IndexError, match="not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH4342
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period",
)
idx2 = Index(["A", "B"] * 3, name="str")
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period"
)
expected = DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"]
)
expected.columns.name = "str"
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"],
freq="M",
name="period2",
)
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period1"
)
e_cols = pd.PeriodIndex(
["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"],
freq="M",
name="period2",
)
expected = DataFrame(
[
[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan],
],
index=e_idx,
columns=e_cols,
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH4342
idx1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"],
freq="M",
name="period2",
)
value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]}
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1")
e_2 = pd.PeriodIndex(
["2013-10", "2013-12", "2014-02", "2013-10", "2013-12", "2014-02"],
freq="M",
name="period2",
)
e_cols = MultiIndex.from_arrays(["A A A B B B".split(), e_2])
expected = DataFrame(
[[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]], index=e_1, columns=e_cols
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-01", "2014-02"], freq="M", name="period1"
)
e_2 = pd.PeriodIndex(
["2013-10", "2013-12", "2014-02"], freq="M", name="period2"
)
e_cols = MultiIndex.from_arrays(["A A B B".split(), e_1])
expected = DataFrame(
[[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]], index=e_2, columns=e_cols
)
tm.assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
# bug when some uniques are not present in the data GH#3170
id_col = ([1] * 3) + ([2] * 3)
name = (["a"] * 3) + (["b"] * 3)
date = pd.to_datetime(["2013-01-03", "2013-01-04", "2013-01-05"] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame({"ID": id_col, "NAME": name, "DATE": date, "VAR1": var1})
multi = df.set_index(["DATE", "ID"])
multi.columns.name = "Params"
unst = multi.unstack("ID")
down = unst.resample("W-THU").mean()
rs = down.stack("ID")
xp = unst.loc[:, ["VAR1"]].resample("W-THU").mean().stack("ID")
xp.columns.name = "Params"
|
tm.assert_frame_equal(rs, xp)
|
pandas._testing.assert_frame_equal
|
import numpy as np
import pandas as pd
import pickle
import mysql.connector
import configparser
config = configparser.ConfigParser()
config.read("configm.ini")
with open('model_match', 'rb') as f:
mp = pickle.load(f)
mydb = mysql.connector.connect(
host=config.get('db-connection','host'),
user=config.get('db-connection','user'),
password=config.get('db-connection','passcode'),
database=config.get('db-connection','name')
)
query = config.get('data-extraction','mainquery')
df = pd.read_sql(query, mydb)
print(df)
df_drop = pd.read_csv('https://app.redash.io/xchange/api/queries/381074/results.csv?api_key=' + config.get('data-extraction','dropoffKey'))
df_drop = df_drop.rename(columns={'Dropoff': 'Dropoff_location'})
df_pick = pd.read_csv('https://app.redash.io/xchange/api/queries/381097/results.csv?api_key=' + config.get('data-extraction','pickupKey'))
df_pick = df_pick.rename(columns={'Pickup': 'Pickup_location'})
df_requester = pd.read_csv('https://app.redash.io/xchange/api/queries/381058/results.csv?api_key=' + config.get('data-extraction','requesterKey'))
df_requester = df_requester.rename(columns={'requester_id': 'Requester'})
df_add = pd.read_csv('https://app.redash.io/xchange/api/queries/381054/results.csv?api_key=' + config.get('data-extraction','addresseeKey'))
df_add = df_add.rename(columns={'Addressee_id': 'Addressee'})
def dataMerge(df1,df2,col):
result = pd.merge(df1, df2, how='left', on=[col])
return result
result1 = dataMerge(df1=df,df2=df_drop,col='Dropoff_location')
result1.drop(["Dropoff_location","Count", "when_accepted"],inplace=True,axis=1) # Removing old pickup location values
result1 = result1.rename(columns={'Shares_accepted': 'Dropoff_location'}) # Renaming columns
result2 = dataMerge(df1=result1,df2=df_pick,col='Pickup_location')
result2.drop(["Pickup_location","Count", "when_accepted"],inplace=True,axis=1) # Removing old pickup location values
result2 = result2.rename(columns={'Shares_accepted': 'Pickup_location'})
result3 = dataMerge(df1=result2,df2=df_requester,col='Requester')
result3.drop(["Requester","Count", "when_accepted"],inplace=True,axis=1)# Removing old pickup location values
result3 = result3.rename(columns={'Frequency': 'Requester'})# Renaming columns
result4 = dataMerge(df1=result3,df2=df_add,col='Addressee')
result4.drop(["Addressee","Count", "when_accepted"],inplace=True,axis=1)# Removing old pickup location values
result4 = result4.rename(columns={'Frequency': 'Addressee'})# Renaming columns
result4 = result4.fillna(0)
result4.drop(['Requirement_id'], axis=1 , inplace=True)
result4 = pd.concat([result4,pd.get_dummies(result4['Direction'])],axis=1)
result4.drop(['Direction'],axis=1, inplace=True)
#result4 = pd.concat([result4,pd.get_dummies(result4['Container_Type'])],axis=1)
#result4.drop(['Container_Type'],axis=1, inplace=True)
#result5 = result4.head()
#result5.to_csv('Match_final.csv', index = False)
print(result4)
pred_try = mp.predict(result4)
pred_try_df=
|
pd.DataFrame(pred_try, columns=['Match_Prediction'])
|
pandas.DataFrame
|
"""
**This package provides tools for editing and analysing CUBE .LIN files**
example:
::
import pycube
links = pandasshp.read_shp('Q:/my_links.shp').set_index('n')
nodes = pandasshp.read_shp('Q:/network/my_nodes.shp').set_index('n')
with open('Q:/my_lines.LIN', 'r') as lin:
text = lin.read()
hubs = pycube.lin.find_hubs(zones, nodes, text)
pruned_text = pycube.lin.prune_text(text, hubs)
with open('Q:/my_lines_pruned.LIN', 'w') as lin:
lin.write(pruned_text)
"""
__author__ = 'qchasserieau'
import itertools
import re
import networkx as nx
import numpy as np
import pandas as pd
import shapely
from IPython.display import display
from IPython.html.widgets import FloatProgress
from syspy.io.pandasshp import pandasshp
from syspy.pycube._line import Line
from syspy.pycube.dijkstra import DijkstraMonkey
from syspy.spatial import spatial
from syspy.syspy_utils import syscolors
class Lin:
"""
Joins a .LIN to a zoning and a network.
example:
::
import pycube
links = pandasshp.read_shp('Q:/my_links.shp').set_index('n')
nodes = pandasshp.read_shp('Q:/network/my_nodes.shp').set_index('n')
lines = pycube.lin.Lin(zones, nodes, file=r'Q:/my_lines.LIN')
"""
def __init__(
self,
zones=None,
nodes=None,
text=None,
file=None,
edges=None,
build_geometries=False,
build_graph=False,
sep='line name',
leg_type='nearest',
prj=None
):
progress = FloatProgress(min=0, max=5, width=975, height=10, color=syscolors.rainbow_shades[1], margin=5)
progress.value = 1
display(progress)
if not text and file:
with open(file, 'r') as lin_file:
text = lin_file.read()
equal = re.compile('[ ]*[=]+[ ]*')
coma = re.compile('[ ]*[,]+[ ]*')
lower_text = text.lower().replace('n=', 'N=').replace('rt=', 'RT=').replace('<<pt>>', '<<pt>>')
self.text = coma.sub(', ', equal.sub('=', lower_text.replace('"', "'"))) #: raw text of the .LIN (str)
stop_list = _stop_list(self.text)
self.lin_chunks = self.text.split(sep)
self.sep = sep
self._to_dict()
self.line_names = [line_name(c) for c in self.lin_chunks]
self.line_names = [name for name in self.line_names if name != 'not_a_line']
if zones is not None:
zone_stops = _zone_stops(zones, nodes, stop_list, leg_type)
stop_lines = _stop_lines(stop_list, self.lin_chunks)
zone_lines = _zone_lines(zone_stops, stop_list, stop_lines)
hubs = _hubs(zone_stops, stop_lines, zone_lines)
self.zone_stops = zone_stops #: dictionary of the stops of each zone {zone: [stops that are in the zone]}
self.stop_lines = stop_lines #: dictionary of the lines of each stop {stop: [lines that stop]}
self.zone_lines = zone_lines #: dictionary of the lines of each zone {zone: [lines that stop in the zone]}
self.hubs = hubs #: minimal set of nodes that are necessary to keep the keep zone_lines stable while pruning zone_stops
self.hubs_and_terminus = self.hubs.union(self.find_endpoints())
self.transitlegs = _transitlegs(self.stop_lines) #: list of stop<->line links (based on self.stop_lines)
self.nontransitlegs = _nontransitlegs(self.zone_stops) #: list of zone<->stop links (based on self.zone_stops)
self.stop_list = stop_list
self.zones = zones #: GeoDataFrame of the zones : str
self.prj = prj
self.nodes = nodes #: GeoDataFrame of the nodes
self.line_count = _line_count(text) #: line count by node
self.data = self.geo_dataframe(geometry=False) #: data organized as a dataframe
progress.value += 1
if build_graph:
#: nx.Graph built with self.nontransitlegs and self.transitlegs
self.connection_graph = nx.Graph(self.transitlegs + self.nontransitlegs)
#: OD matrix that contains the path and the skims of each OD pair in the zoning (base on path_matrix)
self.path_matrix = _path_matrix(self.connection_graph, self.zones)
progress.value += 1
if build_geometries:
geometries = pandasshp.od_matrix(zones)
#: OD matrix that contains the path and the skims of each OD pair in the zoning + the geometry
self.path_matrix_geometries = pd.merge(self.path_matrix, geometries, on=['origin', 'destination'])
progress.value += 1
if edges is not None:
self.dijkstra = DijkstraMonkey(edges.values)
progress.value += 1
def _to_dict(self):
self.line_dict = {line_name(c): Line(c) for c in self.text.split(self.sep)}
def _to_text(self, format_chunks=True):
_lines = [self.line_dict[n] for n in self.line_names]
if format_chunks:
self.text = self.sep.join([';;<<PT>>;;\n\n'] + [l.formated_chunk() for l in _lines])
else:
self.text = self.sep.join([';;<<PT>>;;\n\n'] + [l.chunk for l in _lines])
self.line_count = _line_count(self.text)
def to_text(self):
self._to_text()
return self.text
def change_time(self, to_change, factor=1, offset=0, inplace=True):
"""
Changes the Route Times of a line
:param inplace: edit the Lin object if True, return an edited text chunk if False
:param to_change name of the line to change (may be a list of names)
:param factor: multiplicative factor to multiply by the Route Times
:param offset: time to add to the Route Times
"""
if type(to_change) in [set, list]:
for entity in to_change:
self.change_time(entity, factor, offset, inplace)
else:
self.line_dict[to_change].change_time(factor, offset)
self._to_text()
def cut_at_node(self, name, n, keep='left'):
"""
Sets a new terminus to a line
:param name: name of the line to cut
:param n: id of the new terminus of the line (must be stop of the line in the fist place)
:param keep: part of the line to keep if left, keep the nodes with lesser Route Times than n in the .LIN file
"""
self.line_dict[name].cut_at_node(n, keep=keep)
self._to_text()
def cut_between(self, to_cut, from_node, to_node, inplace=True):
"""
Shortens a line
:param to_cut: name of the line to cut or list of names
:param from_node: first terminus of the new line (must be stop of the line in the fist place)
:param to_node: second terminus of the new line (must be stop of the line in the fist place)
:param inplace: if true the shortened line replaces the former, if false, it is returned.
"""
if type(to_cut) in [set, list]:
for entity in to_cut:
self.cut_between(entity, from_node, to_node)
else:
self.line_dict[to_cut].cut_between(from_node, to_node)
self._to_text(format_chunks=False)
def copy_line(self, name, copy, from_node=None, to_node=None):
"""
Copies a line
:param name: name of the line to copy
:param copy: name of the copy
keeping the main corridor of a fork line in Monterrey:
::
lines.copy_line('ligne orange_est', 'ligne orange') # builds a line (ligne orange) from a branch (ligne orange_est)
lines.drop_line('ligne orange_est') # drops the first branch
lines.drop_line('ligne orange_ouest') # drops the other branch
lines.cut_between('ligne orange',6293, 53191) # reduces the copy to the main corridor
"""
self.line_dict[copy] = Line(self.line_dict[name].chunk.replace("'" + name + "'", "'" + copy + "'"))
self.line_names.append(copy)
self._to_text(format_chunks=False)
if from_node and to_node:
self.cut_between(copy, from_node, to_node)
def add_line(self, to_add):
"""
Adds a line to a Lin object
:param to_add: the Line object to add (one of the objects of Lin.line_dict for example)
Adding feeders from a distinct file to a Lin
::
lines = pycube.lin.Lin(nodes=nodes, file = data_path + r'/lineas/bus_2015_metro_2045.lin')
feeders = pycube.lin.Lin(nodes=nodes, file = data_path + r'/lineas/lin_2045_net_2045_dijkstra.lin')
to_add = [feeders.line_dict[name] for name in ['ruta_bis 67', 'ruta 67', 'ruta_bis 1', 'ruta 1']]
lines.add_line(to_add)
"""
if type(to_add) in [set, list]:
for entity in to_add:
self.add_line(entity)
else:
self.line_names.append(to_add.name)
self.line_dict[to_add.name] = to_add
self._to_text(format_chunks=False)
def drop_line(self, to_drop):
"""
Drops a line or a collection of lines
:param to_drop: the name or a collection of names of the lines to drop
"""
if type(to_drop) in [set, list]:
for entity in to_drop:
self.drop_line(entity)
else:
self.line_names = [n for n in self.line_names if n != to_drop]
self._to_text(format_chunks=False)
def new_line(self, name, header='', node_list=None, node_time_dict=None, nodes=None, speed=None):
chunk = header
if node_time_dict is not None:
for node, time in node_time_dict.items():
chunk += 'N=%s, RT=%s, ' % (str(node), str(time))
elif node_list is not None:
chunk_length = 0
for node in node_list:
chunk += 'N=%s, ' % (str(node))
chunk_length += 1
chunk += 'RT=%s, ' % (str(round(line_length(chunk, nodes) / speed, 2)) if chunk_length > 1 else '0')
self.line_names.append(name)
self.line_dict[name] = Line(chunk[:-2])
self._to_text(format_chunks=False)
def drop_mode(self, to_drop=None, all_but=None):
"""
Drops a mode or a collection of modes
:param to_drop: the name or a collection of names of the modes to drop
"""
geo = self.geo_dataframe(geometry=False)
to_drop = to_drop if type(to_drop) in [set, list] else ([to_drop] if to_drop else to_drop)
all_but = all_but if type(all_but) in [set, list] else ([all_but] if all_but else all_but)
modes = list(set(geo['mode'].unique()) - set(all_but) if all_but else to_drop)
line_names = list(geo[geo['mode'].isin(modes)]['name'])
self.drop_line(line_names)
self._to_text()
def merge_lines(self, left_name, right_name, delete_right=True, start='left'):
"""
:param left_name: name of the line to edit
:param right_name: name of the line to merge on the left line
:param delete_right: if True, the right line will be deleted
:param start: 'left' or 'right', 'left' means that the merged line starts with the node of the left line
Let's merge the 'ruta 67' (which is a bus line) on the 'ligne violette' which is a twice faster metro line
::
lines.change_time('ruta_bis 67', 0.5, 0) # changes the Route Times of 'ruta_bis 67'
lines.merge_lines('ligne violette', 'ruta_bis 67',) # merge the lines and deletes 'ruta_bis 67'
lines.change_time('ruta 67', 0.5, 0)
lines.merge_lines('ligne violette_bis', 'ruta 67', start='right')
"""
self.line_dict[left_name].add_line(self.line_dict[right_name], start=start)
if delete_right:
self.line_names = [i for i in self.line_names if i != right_name]
self._to_text()
def set_parameter(self, to_set, parameter, value):
"""
Set parameters such as mode or headway for a line or a set of lines
:param to_set: the name or the list of names of the lines to edit
:param parameter: the name of the parameter to edit
:param value: the value to set for the parameter
"""
if type(to_set) in [set, list]:
for entity in to_set:
self.set_parameter(entity, parameter, value)
else:
self.line_dict[to_set].set_parameter(parameter, value)
self._to_text(format_chunks=False)
def set_direct(self, to_set, from_stop, to_stop):
"""
remove the stops between from_stop and to_stop (and reverse)
"""
if type(to_set) in [set, list]:
for entity in to_set:
self.set_direct(entity, from_stop, to_stop)
else:
self.line_dict[to_set].set_direct(from_stop, to_stop)
self._to_text(format_chunks=False)
def drop_checkpoints(self, to_drop):
if type(to_drop) in [set, list]:
for entity in to_drop:
self.drop_checkpoints(entity)
else:
self.line_dict[to_drop].drop_checkpoints()
self._to_text(format_chunks=False)
def change_stop(self, to_change, from_stop, to_stop, to_text=True):
if type(to_change) in [set, list]:
for entity in to_change:
self.change_stop(entity, from_stop, to_stop)
self.to_text()
else:
self.line_dict[to_change].change_stop(from_stop, to_stop)
if to_text:
self.to_text()
def find_endpoints(self, mode=None):
"""
Returns a set of terminus
:param mode: the mode
:return: the set of terminus
"""
return _find_endpoints(self.text, mode=mode, sep=self.sep)
def find_mode_stops(self, mode, regex='N=[0-9]{4,6}'):
"""
Returns a set of stops for a given mode
:param mode: the mode
:param regex: the regex that defines the stops
:return: the set of stops for this mode
"""
return _mode_stops(mode, self.text, regex=regex, sep=self.sep)
def prune_text(self, stop_list=None, inplace=False):
stop_list = stop_list if stop_list else self.hubs_and_terminus
if inplace:
self.text = _prune_text(self.text, stop_list)
else:
return _prune_text(self.text, stop_list)
def geo_dataframe(self, mode_split_string='mode=', geometry=True, all_nodes=True):
"""
Returns a pd.DataFrame that contains the name, the mode, the headway and the geometry of the lines. It may be
used to dump the Lin to a .shp
:param mode_split_string: 'mode=' for example
:return: a pd.DataFrame that contains the name, the mode, the headway and the geometry of the lines
Saving a Lin as a .shp file::
geo = lines.geo_dataframe()
geo['color'] = geo['name'].apply(syscolors.in_string)
pandasshp.write_shp(sig_path+'lin_2045', geo, projection_string=epsg_32614)
"""
chunks = self.text.split(self.sep)
if geometry:
df = pd.DataFrame({
'name': pd.Series([line_name(c) for c in chunks]),
'mode': pd.Series([line_mode(c, split_string=mode_split_string) for c in chunks]),
'headway': pd.Series([line_headway(c) for c in chunks]),
'time': pd.Series([line_time(c) for c in chunks]),
'geometry': pd.Series([line_geometry(c, self.nodes, all_nodes) for c in chunks]),
'stops': pd.Series([str(_stop_list(c)) for c in chunks]),
'nodes': pd.Series([str(_node_list(c)) for c in chunks]),
'nstops': pd.Series([len(_stop_list(c)) for c in chunks])
})
df.dropna(inplace=True)
df['length'] = df['geometry'].apply(lambda g: g.length)
df['speed'] = df['length'] / df['time']
else:
df = pd.DataFrame({
'name': pd.Series([line_name(c) for c in chunks]),
'mode': pd.Series([line_mode(c, split_string=mode_split_string) for c in chunks]),
'headway': pd.Series([line_headway(c) for c in chunks]),
'time': pd.Series([line_time(c) for c in chunks]),
'stops': pd.Series([str(_stop_list(c)) for c in chunks]),
'nodes': pd.Series([str(_node_list(c)) for c in chunks]),
'nstops': pd.Series([len(_stop_list(c)) for c in chunks])
})
return df.dropna()
def to_shape(self, stop_file=None, link_file=None, all_nodes=True):
"""
Saves the geometry of the Lin as a .shp that contains the points (stops) and the polylines (links)
:param node_file: name of the file that contains the points
:param link_file: name ot the file that contains the polylines
"""
if bool(link_file):
pandasshp.write_shp(link_file, self.geo_dataframe(all_nodes=True), projection_string=self.prj)
if bool(stop_file):
stops = pd.merge(self.nodes, self.line_count, left_index=True, right_index=True).reset_index()
pandasshp.write_shp(stop_file, stops, projection_string=self.prj)
def nontransitleg_geometries(self):
"""
returns a pd.DataFrame of the connectors that link the zones to the traffic nodes, it includes their
geometry.
:return: a pd.DataFrame of the connectors that link the zones to the traffic nodes
"""
df_a = pd.DataFrame(self.nontransitlegs, columns=['a', 'b'])
def geometry(row):
return shapely.geometry.LineString(
[self.nodes.loc[row['a'], 'geometry'], self.zones.loc[row['b'], 'geometry'].centroid]
)
df_a['geometry'] = df_a.apply(geometry, axis=1)
df_b = df_a.rename(columns={'a': 'b', 'b': 'a'})
return pd.concat([df_a, df_b])
def line_links(self, to_links=None, mode=None):
if mode:
return self.line_links(list(self.data[self.data['mode'] == mode]['name']))
if type(to_links) in [set, list]:
return pd.concat([self.line_links(entity) for entity in to_links]).drop_duplicates()
else:
line_stops = _stop_list(self.line_dict[to_links].chunk)
_line_links = [[line_stops[i], line_stops[i + 1]] for i in range(len(line_stops) - 1)]
return pd.DataFrame(_line_links, columns=['a', 'b']).drop_duplicates()
regex_time = 'RT=[0-9]{1,6}[.]?[0-9]{0,6}'
time_re = re.compile(regex_time)
def find_hubs(zones, nodes, text):
stop_list = _stop_list(text)
lin_chunks = ['LINE Name' + chunk for chunk in text.split('LINE Name')]
zone_stops = _zone_stops(zones, nodes, stop_list)
stop_lines = _stop_lines(stop_list, lin_chunks)
zone_lines = _zone_lines(zone_stops, stop_list, stop_lines)
hubs = _hubs(zone_stops, stop_lines, zone_lines)
return hubs
def line_length(chunk, nodes):
return line_geometry(chunk, nodes).length
def line_speed(chunk, nodes):
return line_length(chunk, nodes) / line_time(chunk)
def line_geometry(chunk, nodes, all_nodes=True):
point_list = _node_list(chunk) if all_nodes else _stop_list(chunk)
try:
return shapely.geometry.LineString([nodes.loc[node, 'geometry'] for node in point_list])
except Exception:
return np.nan
def line_time(chunk):
try:
return float(time_re.findall(chunk)[-1].split('RT=')[1])
except Exception:
return np.nan
def line_stops(chunk):
return len(_stop_list(chunk))
def line_name(lin_chunk):
try:
return lin_chunk.split("'")[1]
except Exception:
return 'not_a_line'
def line_mode(lin_chunk, split_string='mode='):
try:
return int(lin_chunk.split(split_string)[1].split(',')[0])
except Exception:
return 'not_a_line'
def line_headway(lin_chunk, split_string='headway='):
try:
return float(lin_chunk.lower().replace(' ', '').split(split_string)[1].split(',')[0])
except Exception:
return 'not_a_line'
def _zone_stops(zones, nodes, stop_list, leg_type='contains'):
if leg_type == 'contains':
progress = FloatProgress(
min=0, max=len(list(zones.iterrows())), width=975, height=10, color=syscolors.rainbow_shades[1], margin=5)
progress.value = 0
display(progress)
zone_stops = {}
for zone_id, zone in zones.iterrows():
zone_stops[zone_id] = []
for stop_id, stop in nodes.loc[stop_list].iterrows():
if zone['geometry'].contains(stop['geometry']):
zone_stops[zone_id].append(stop_id)
progress.value += 1
if leg_type == 'nearest':
centroids = zones.copy()
centroids['geometry'] = zones['geometry'].apply(lambda g: g.centroid)
stops = nodes.loc[stop_list]
links_a = spatial.nearest(stops, centroids).rename(columns={'ix_many': 'zone', 'ix_one': 'stop'})
links_b = spatial.nearest(centroids, stops).rename(columns={'ix_one': 'zone', 'ix_many': 'stop'})
links = pd.concat([links_a, links_b]).drop_duplicates()
zone_stops = dict(links.groupby('zone')['stop'].agg(lambda s: list(s)))
return zone_stops
def _stop_lines(stop_list, lin_chunks):
progress = FloatProgress(
min=0, max=len(stop_list), width=975, height=10, color=syscolors.rainbow_shades[1], margin=5
)
progress.value = 0
display(progress)
stop_lines = {}
for stop in stop_list:
stop_lines[stop] = set()
for lin_chunk in lin_chunks[1:]:
if 'N=' + str(stop) in lin_chunk:
stop_lines[stop] = stop_lines[stop].union([line_name(lin_chunk)])
progress.value += 1
return stop_lines
def _zone_lines(zone_stops, stop_list, stop_lines):
zone_lines = {}
for zone, zone_stop_list in zone_stops.items():
zone_lines[zone] = set()
for stop in set(stop_list).intersection(zone_stop_list):
zone_lines[zone] = zone_lines[zone].union(stop_lines[stop])
return zone_lines
def _hubs(zone_stops, stop_lines, zone_lines):
pop_zone_lines = dict(zone_lines)
to_keep = {}
for zone in list(zone_lines.keys()):
to_keep[zone] = []
while len(pop_zone_lines[zone]):
dict_intersection = {len(pop_zone_lines[zone].intersection(stop_lines[stop])): stop for stop in zone_stops[zone]}
max_intersection = sorted(dict_intersection.keys())[-1]
max_stop = dict_intersection[max_intersection]
to_keep[zone].append(max_stop)
pop_zone_lines[zone] = pop_zone_lines[zone] - stop_lines[max_stop]
hubs = set(itertools.chain(*list(to_keep.values())))
return hubs
def _stop_list(text, regex='N=[0-9]{4,6}'):
stop_re = re.compile(regex)
return [int(f[2:]) for f in stop_re.findall(text)]
def _node_list(text, regex='N=[-]?[0-9]{4,6}'):
node_re = re.compile(regex)
return [int(f[2:].replace('-', '')) for f in node_re.findall(text)]
def _endpoints(lin_chunk):
return [_node_list(lin_chunk)[0], _node_list(lin_chunk)[-1]]
def _nontransitlegs(zone_stops):
nontransitlegs = []
for zone in zone_stops.keys():
for stop in zone_stops[zone]:
nontransitlegs.append((stop, zone))
return list(set(nontransitlegs))
def _transitlegs(stop_lines):
transitlegs = []
for stop in stop_lines.keys():
for line in stop_lines[stop]:
transitlegs.append((stop, line))
return list(set(transitlegs))
def _line_count(text):
return pd.DataFrame(pd.Series(_stop_list(text)).value_counts(), columns=['lines'])
def _mode_stops(mode, text, regex='N=[0-9]{4,6}', sep='LINE NAME'):
stop_re = re.compile(regex)
lin_chunks = [sep + chunk for chunk in text.split(sep)]
mode_chunks = [chunk for chunk in lin_chunks if 'mode=' + str(mode) in chunk]
mode_find = stop_re.findall(''.join(mode_chunks))
return [int(f[2:]) for f in mode_find]
def _find_endpoints(text, mode=None, sep='LINE NAME'):
lin_chunks = [sep + chunk for chunk in text.split(sep)[1:]]
lin_chunks = [chunk for chunk in lin_chunks if 'mode=' + str(mode) in chunk] if mode else lin_chunks
return list(itertools.chain(*[_endpoints(chunk) for chunk in lin_chunks]))
def _prune_text(text, stops):
pruned_text = text.replace('N=', 'N=-')
pruned_text = re.sub('[-]+', '-', pruned_text)
for stop in stops:
before = 'N=-' + str(stop)
after = 'N=' + str(stop)
pruned_text = pruned_text.replace(before, after)
return pruned_text
# path matrix
def connection_graph(zones, nodes, text):
lin_chunks = ['LINE Name' + chunk for chunk in text.split('LINE Name')]
stop_list = _stop_list(text)
zone_stops = _zone_stops(zones, nodes, stop_list)
stop_lines = _stop_lines(stop_list, lin_chunks)
nontransitlegs = []
for zone in zone_stops.keys():
for stop in zone_stops[zone]:
nontransitlegs.append((stop, zone))
transitlegs = []
for stop in stop_lines.keys():
for line in stop_lines[stop]:
transitlegs.append((stop, line))
g = nx.Graph(transitlegs + nontransitlegs)
return g
def path_matrix(zones, nodes, text):
"""
:param zones:
:param nodes:
:param text:
:return:
display ODs that require more than 2 transfers:
::
# using direct access to the lin attribute
skims = lines.path_matrix_geometries['connections']
to_shape = skims[skims['connections'] > 3]
pandasshp.write_shp(sig_path + 'Q:/paths.shp', to_shape)
# using path_matrix on it's own
skimps = path_matrix(zones, nodes, text)
to_shape = skims[skims['connections'] > 3]
pandasshp.write_shp(sig_path + 'Q:/paths.shp', to_shape
.. figure:: ./pictures/path_lin.png
:width: 25cm
:align: center
:alt: path lin
:figclass: align-center
OD that require 2 transfers in Monterrey (Mexico)
"""
g = connection_graph(zones, nodes, text)
paths = nx.shortest_path(g)
od_list = []
for o in list(zones.index):
for d in list(zones.index):
try:
od_list.append({'origin': o, 'destination': d, 'path_len': len(paths[o][d]), 'path': paths[o][d]})
except Exception:
pass
od =
|
pd.DataFrame(od_list)
|
pandas.DataFrame
|
import pandas as pd
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
# from IPython.display import display
currentDirectory = os.getcwd()
final_train =
|
pd.read_csv(currentDirectory+"/final_train.csv")
|
pandas.read_csv
|
#---------------------------
# organize imports
#---------------------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
import os
import warnings
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
#---------------------------
# library specific options
#---------------------------
pd.options.display.float_format = '{:,.2f}'.format
sns.set(color_codes=True)
def warn(*args, **kwargs):
pass
warnings.warn = warn
warnings.filterwarnings("ignore", category=FutureWarning)
#---------------------------
# analyze the dataset
#---------------------------
def analyze_dataset(dataset):
print("[INFO] keys : {}".format(dataset.keys()))
print("[INFO] features shape : {}".format(dataset.data.shape))
print("[INFO] target shape : {}".format(dataset.target.shape))
print("[INFO] feature names")
print(dataset.feature_names)
print("[INFO] dataset summary")
print(dataset.DESCR)
df = pd.DataFrame(dataset.data)
print("[INFO] df type : {}".format(type(df)))
print("[INFO] df shape: {}".format(df.shape))
print(df.head())
df.columns = dataset.feature_names
print(df.head())
df["PRICE"] = dataset.target
print(df.head())
print("[INFO] dataset datatypes")
print(df.dtypes)
print("[INFO] dataset statistical summary")
print(df.describe())
# correlation between attributes
print("PEARSON CORRELATION")
print(df.corr(method="pearson"))
sns.heatmap(df.corr(method="pearson"))
plt.savefig("heatmap_pearson.png")
plt.clf()
plt.close()
print("SPEARMAN CORRELATION")
print(df.corr(method="spearman"))
sns.heatmap(df.corr(method="spearman"))
plt.savefig("heatmap_spearman.png")
plt.clf()
plt.close()
print("KENDALL CORRELATION")
print(df.corr(method="kendall"))
sns.heatmap(df.corr(method="kendall"))
plt.savefig("heatmap_kendall.png")
plt.clf()
plt.close()
# show missing values
print(pd.isnull(df).any())
file_report = "boston_housing.txt"
with open(file_report, "w") as f:
f.write("Features shape : {}".format(df.drop("PRICE", axis=1).shape))
f.write("\n")
f.write("Target shape : {}".format(df["PRICE"].shape))
f.write("\n")
f.write("\nColumn names")
f.write("\n")
f.write(str(df.columns))
f.write("\n")
f.write("\nStatistical summary")
f.write("\n")
f.write(str(df.describe()))
f.write("\n")
f.write("\nDatatypes")
f.write("\n")
f.write(str(df.dtypes))
f.write("\n")
f.write("\nPEARSON correlation")
f.write("\n")
f.write(str(df.corr(method="pearson")))
f.write("\n")
f.write("\nSPEARMAN correlation")
f.write("\n")
f.write(str(df.corr(method="spearman")))
f.write("\n")
f.write("\nKENDALL correlation")
f.write("\n")
f.write(str(df.corr(method="kendall")))
f.write("\nMissing Values")
f.write("\n")
f.write(str(
|
pd.isnull(df)
|
pandas.isnull
|
import os
import numpy as np
import pandas as pd
import math
import json
from pathlib import Path
# NOTE
# It is important that everything is the same datatype in pandas, because otherwise getting values as an np array is expensive
# Use float32 everywhere
def labels_to_indicies(labels):
return [ i for i,label in labels]
def select_features_of_hero(hero_id,labels):
hero_id_string = "player_" + str(hero_id) + "_"
return [ (i,label) for i,label in labels if hero_id_string in label]
def select_features_by_name(name,labels):
return [ (i,label) for i,label in labels if name in label]
def remove_features_by_name(name,data):
return [ (i,label) for i,label in labels if name not in label]
def remove_paused_datapoints(data):
time = np.diff(data["time"].values)
is_paused = time < 0.0001 # time does not change, game is paused
data = data.drop(data.index[np.where(is_paused)])
return data
# was this hero visible x second ago?
def add_historical_visibility_features(data):
times = data.values[:,0]
num_datapoints = data.shape[0]
# estimate timestep
delta_times = []
for rand_i in np.random.randint(1,len(times),300):
delta_times.append(times[rand_i] - times[rand_i-1])
timestep = np.array(delta_times).mean()
ticks_per_sec = int(math.ceil(1 / timestep))
for hero_i in range(10):
feature_name = "player_" + str(hero_i) + "_m_iTaggedAsVisibleByTeam"
visibilities = data[feature_name].values
for history_i in range(10):
# visibility history_i+1 sec ago is a shifted version of visibility, with padeded zeros in the beginning
new_feature = np.zeros(num_datapoints,dtype=np.float32)
tick_diff = (history_i+1) * ticks_per_sec
new_feature[tick_diff:] = visibilities[:-tick_diff]
data["player_" + str(hero_i) + "_visibility_history_" + str(history_i)] = new_feature
return data
# can be 0: alive, 1: dying, 2: dead
def life_state_to_times_of_death(life_data,is_one_dead):
times = life_data.values[:,0]
times_of_death_list = []
for i in range(10):
current_player_lifestate = life_data.values[:,i+1]
if is_one_dead:
current_player_lifestate[current_player_lifestate == 1] = 2
else:
current_player_lifestate[current_player_lifestate == 1] = 0
diff = np.diff(current_player_lifestate)
diff = np.insert(diff,0,0) # make diff the same length by inserting a 0 in front
times_of_death = times[diff>0]
times_of_death_list.append(times_of_death)
return times_of_death_list
def create_time_until_next_death_stats(data,times_of_death_list):
time_points = data.values[:,0]
for i in range(10):
death_times = times_of_death_list[i]
time_to_next_death = np.full(len(time_points),-1,dtype=np.float32) # init to invalid
next_death_time_label = "stat_" + str(i) + "_time_until_next_death"
for time_i,time in enumerate(time_points):
for death_time in death_times:
if death_time - time > 0:
if time_to_next_death[time_i] < 0: # still invalid
time_to_next_death[time_i] = death_time - time
else:
time_to_next_death[time_i] = min(time_to_next_death[time_i], death_time - time)
data[next_death_time_label] = time_to_next_death
return data
def create_who_dies_next_labels(data):
labels = [(i,label) for i,label in enumerate(list(data))]
next_death_indicies = labels_to_indicies(select_features_by_name("_time_until_next_death",labels))
next_death_times = data.values[:,next_death_indicies]
invalid_mask = next_death_times < 0 # invalid means the player will newer die in the rest of the time
next_death_times += invalid_mask.astype(np.float32) * 1000000 # add a large number to invalid values
next_death_times = np.concatenate((next_death_times,np.full((data.shape[0],1),5)),axis=1) # add extra column. if this willbe the min, that means noone will die
die_next_index = np.argmin(next_death_times,axis=1)
for i in range(11):
label_name = "label_who_dies_next_" + str(i)
current_is_winner = die_next_index == i
data[label_name] = current_is_winner.astype(np.float32)
return data
def create_die_within_x_sec_feature(data,times_of_death_list,x):
time_points = data.values[:,0]
# new_features = np.zeros((len(time_points),10))
for i in range(10):
death_times = times_of_death_list[i]
new_features = np.zeros(len(time_points),dtype=np.float32)
label_string = "label_" + str(i) + "_die_in_" + str(x)
for time_i,time in enumerate(time_points):
for death_time in death_times:
if death_time > time and death_time - time < x:
new_features[time_i] = 1
data[label_string] = new_features.astype(np.float32)
return data
def addPositionFeaturesTower(data):
labels = [(i,label) for i,label in enumerate(list(data))]
tower_labels = select_features_by_name("Tower_",labels)
unique_tower_labels = select_features_by_name("m_lifeState",tower_labels) # this will return one label per tower
unique_tower_labels = [label.replace("m_lifeState","") for i,label in unique_tower_labels]
modified_data = data
for tower_name in unique_tower_labels:
cell_x = modified_data[tower_name + "CBodyComponent.m_cellX"].values
cell_y = modified_data[tower_name + "CBodyComponent.m_cellY"].values
vec_x = modified_data[tower_name + "CBodyComponent.m_vecX"].values
vec_y = modified_data[tower_name + "CBodyComponent.m_vecY"].values
pos_x = cell_x * 256 + vec_x
pos_y = cell_y * 256 + vec_y
modified_data[tower_name + "pos_x"] = pos_x.astype(np.float32)
modified_data[tower_name + "pos_y"] = pos_y.astype(np.float32)
modified_data = modified_data.drop(tower_name + "CBodyComponent.m_cellX",axis=1)
modified_data = modified_data.drop(tower_name + "CBodyComponent.m_cellY",axis=1)
modified_data = modified_data.drop(tower_name + "CBodyComponent.m_vecX",axis=1)
modified_data = modified_data.drop(tower_name + "CBodyComponent.m_vecY",axis=1)
return modified_data
def addPositionFeatures(data):
modified_data = data
for hero_i in range(10):
player_prefix = "player_" + str(hero_i) + "_"
cell_x = modified_data[player_prefix + "CBodyComponent.m_cellX"].values
cell_y = modified_data[player_prefix + "CBodyComponent.m_cellY"].values
vec_x = modified_data[player_prefix + "CBodyComponent.m_vecX"].values
vec_y = modified_data[player_prefix + "CBodyComponent.m_vecY"].values
pos_x = cell_x * 256 + vec_x # vec_x overflows at 256
pos_y = cell_y * 256 + vec_y
modified_data[player_prefix + "pos_x"] = pos_x.astype(np.float32)
modified_data[player_prefix + "pos_y"] = pos_y.astype(np.float32)
modified_data = modified_data.drop(player_prefix + "CBodyComponent.m_cellX",axis=1)
modified_data = modified_data.drop(player_prefix + "CBodyComponent.m_cellY",axis=1)
modified_data = modified_data.drop(player_prefix + "CBodyComponent.m_vecX",axis=1)
modified_data = modified_data.drop(player_prefix + "CBodyComponent.m_vecY",axis=1)
return modified_data
def addHeroProximities(data):
labels = [(i,label) for i,label in enumerate(list(data))]
labels = select_features_by_name("pos",labels)
labels = select_features_by_name("player_",labels)
pos_x_indicies = labels_to_indicies(select_features_by_name("pos_x",labels))
pos_y_indicies = labels_to_indicies(select_features_by_name("pos_y",labels))
pos_x_vals = data.values[:,pos_x_indicies]
pos_y_vals = data.values[:,pos_y_indicies]
for hero_i in range(10):
hero_team = int(hero_i / 5)
current_ally_i = 0
current_enemy_i = 0
player_prefix = "player_" + str(hero_i) + "_"
for other_hero_i in range(10):
if other_hero_i == hero_i:
continue
feature_name = None
other_hero_team = int(other_hero_i / 5)
if hero_team == other_hero_team:
feature_name = player_prefix + "ally_proximity_" + str(current_ally_i)
current_ally_i += 1
else:
feature_name = player_prefix + "enemy_proximity_" + str(current_enemy_i)
current_enemy_i += 1
distances = np.sqrt((pos_x_vals[:,hero_i] - pos_x_vals[:,other_hero_i]) * (pos_x_vals[:,hero_i] - pos_x_vals[:,other_hero_i]) +
(pos_y_vals[:,hero_i] - pos_y_vals[:,other_hero_i]) * (pos_y_vals[:,hero_i] - pos_y_vals[:,other_hero_i]))
distances = np.minimum(distances,10000) # clamp the distances, it does not realy matter if it is so far away
data[feature_name] = distances
return data
def addClosestAliveTowers(data):
labels = [(i,label) for i,label in enumerate(list(data))]
team_2_tower_lables = select_features_by_name("Tower_2",labels)
team_3_tower_lables = select_features_by_name("Tower_3",labels)
team_2_tower_pos_x_labels = select_features_by_name("pos_x",team_2_tower_lables)
team_2_tower_pos_x_indicies = labels_to_indicies(team_2_tower_pos_x_labels)
team_2_tower_pos_y_labels = select_features_by_name("pos_y",team_2_tower_lables)
team_2_tower_pos_y_indicies = labels_to_indicies(team_2_tower_pos_y_labels)
team_2_tower_life_state_labels = select_features_by_name("m_lifeState",team_2_tower_lables)
team_2_tower_life_state_indicies = labels_to_indicies(team_2_tower_life_state_labels)
team_3_tower_pos_x_labels = select_features_by_name("pos_x",team_3_tower_lables)
team_3_tower_pos_x_indicies = labels_to_indicies(team_3_tower_pos_x_labels)
team_3_tower_pos_y_labels = select_features_by_name("pos_y",team_3_tower_lables)
team_3_tower_pos_y_indicies = labels_to_indicies(team_3_tower_pos_y_labels)
team_3_tower_life_state_labels = select_features_by_name("m_lifeState",team_3_tower_lables)
team_3_tower_life_state_indicies = labels_to_indicies(team_3_tower_life_state_labels)
# NOTE
# dont modify the data, because it will invalidate the indicies
# modify it once everything is calculated
closest_ally_tower = np.zeros((data.shape[0], 10),dtype=np.float32)
closest_enemy_tower = np.zeros((data.shape[0], 10),dtype=np.float32)
for team_iterator in range(2):
team_index = team_iterator + 2 # first team is team 2, second team is team 3
ally_tower_pos_x_indicies = team_2_tower_pos_x_indicies if team_index == 2 else team_3_tower_pos_x_indicies
ally_tower_pos_y_indicies = team_2_tower_pos_y_indicies if team_index == 2 else team_3_tower_pos_y_indicies
enemy_tower_pos_x_indicies = team_3_tower_pos_x_indicies if team_index == 2 else team_2_tower_pos_x_indicies
enemy_tower_pos_y_indicies = team_3_tower_pos_y_indicies if team_index == 2 else team_2_tower_pos_y_indicies
ally_tower_life_state_indicies = team_2_tower_life_state_indicies if team_index == 2 else team_3_tower_life_state_indicies
enemy_tower_life_state_indicies = team_3_tower_life_state_indicies if team_index == 2 else team_2_tower_life_state_indicies
ally_tower_pos_x = data.values[:,ally_tower_pos_x_indicies]
ally_tower_pos_y = data.values[:,ally_tower_pos_y_indicies]
enemy_tower_pos_x = data.values[:,enemy_tower_pos_x_indicies]
enemy_tower_pos_y = data.values[:,enemy_tower_pos_y_indicies]
ally_dead_mask = np.zeros((data.shape[0], 11),dtype=np.uint32)
ally_dead_mask[:] = data.values[:,ally_tower_life_state_indicies] > 0.5
enemy_dead_mask = np.zeros((data.shape[0], 11),dtype=np.uint32)
enemy_dead_mask[:] = data.values[:,ally_tower_life_state_indicies] > 0.5
for hero_iterator in range(5):
hero_index = hero_iterator + 5 * team_iterator
player_prefix = "player_" + str(hero_index) + "_"
hero_pos_x = data[player_prefix + "pos_x"].values
hero_pos_y = data[player_prefix + "pos_y"].values
ally_tower_distances = np.sqrt((ally_tower_pos_x-hero_pos_x[:,np.newaxis]) * (ally_tower_pos_x-hero_pos_x[:,np.newaxis]) +
(ally_tower_pos_y-hero_pos_y[:,np.newaxis]) * (ally_tower_pos_y-hero_pos_y[:,np.newaxis]))
enemy_tower_distances = np.sqrt((enemy_tower_pos_x-hero_pos_x[:,np.newaxis]) * (enemy_tower_pos_x-hero_pos_x[:,np.newaxis]) +
(enemy_tower_pos_y-hero_pos_y[:,np.newaxis]) * (enemy_tower_pos_y-hero_pos_y[:,np.newaxis]))
# give a large value to dead towers, so they dont inflience the minimum
ally_tower_distances = ally_tower_distances + ally_dead_mask * 10000000
enemy_tower_distances = enemy_tower_distances + enemy_dead_mask * 10000000
# 6000 is around quater the map length
closest_ally_tower[:,hero_index] = np.minimum(ally_tower_distances.min(axis=1), 6000)
closest_enemy_tower[:,hero_index] = np.minimum(enemy_tower_distances.min(axis=1), 6000)
modified_data = data
for hero_i in range(10):
feature_name_prefix = "player_" + str(hero_i) + "_closest_tower_"
modified_data[feature_name_prefix + "distance_ally"] = closest_ally_tower[:,hero_i]
modified_data[feature_name_prefix + "distance_enemy"] = closest_enemy_tower[:,hero_i]
# Delete all tower data
all_tower_lables = select_features_by_name("Tower_",labels)
for i,label in all_tower_lables:
modified_data = modified_data.drop(label,axis=1)
return modified_data
def addHeroOneHotEncoding(data):
modified_data = data
NUM_HEROS = 130 # Note this should be the max number of heroes
for hero_i in range(10):
hero_id_feature_name = "player_" + str(hero_i) + "_m_vecPlayerTeamData.000" + str(hero_i) + ".m_nSelectedHeroID"
hero_id = data[hero_id_feature_name].values[0]
hero_id_int = int(np.rint(hero_id))
hero_one_hot = np.zeros(NUM_HEROS)
hero_one_hot[hero_id_int] = 1
for i in range(NUM_HEROS):
feature_name = "player_" + str(hero_i) + "_hero_one_hot_" + str(i)
modified_data[feature_name] = np.repeat(hero_one_hot[i], data.shape[0])
return modified_data
def add_rate_of_change_features(data):
# NOTE: rate of change features are depandant on the timestep, for now I don't care, because I will use the same timestep consistently
# could devide by timestep in the future...
labels = [(i,label) for i,label in enumerate(list(data))]
labels_to_make_diff = []
diff_feature_name = []
filtered_labels = select_features_by_name("pos_",labels)
labels_to_make_diff.extend([label for i,label in filtered_labels])
diff_feature_name.extend([label.replace("pos_","speed_") for i,label in filtered_labels])
filtered_labels = select_features_by_name("_proximity_",labels)
labels_to_make_diff.extend([label for i,label in filtered_labels])
diff_feature_name.extend([label.replace("proximity","delta_proximity") for i,label in filtered_labels])
filtered_labels = select_features_by_name("closest_tower_distance",labels)
labels_to_make_diff.extend([label for i,label in filtered_labels])
diff_feature_name.extend([label.replace("closest_tower_distance","delta_closest_tower_distance") for i,label in filtered_labels])
filtered_labels = select_features_by_name("m_iHealth",labels)
labels_to_make_diff.extend([label for i,label in filtered_labels])
diff_feature_name.extend([label.replace("m_iHealth","delta_health") for i,label in filtered_labels])
for label,new_label in zip(labels_to_make_diff,diff_feature_name):
# take the diff and insert a zero in front
data[new_label] = np.insert(np.diff(data[label].values),0,0)
return data
from zlib import crc32
def bytes_to_float(b):
return float(crc32(b) & 0xffffffff) / 2**32
def str_to_float(s, encoding="utf-8"):
return bytes_to_float(s.encode(encoding))
def add_game_name_hash(data,game_name):
hash_val = str_to_float(game_name)
data["stat_game_name_hash"] = np.repeat(hash_val, data.shape[0]).astype(np.float32)
return data
def hero_id_to_roles(hero_id,hero_list,hero_roles_table):
table_i = 0
if hero_id < len(hero_list):
hero_name = hero_list[hero_id][1]
table_i = np.where(hero_roles_table["Hero"].values == hero_name)
else:
print("hero_role_failed",hero_id)
return hero_roles_table.values[table_i,1:].astype(np.float32).flatten()
#Not used as clarified by author
def add_hero_role_features(data):
#JSON_PATH = '/users/ak1774/scratch/esport/death_prediction/heros.json'
JSON_PATH = Path.cwd().parent / 'heros.json'
#HERO_ROLE_CSV_PATH = "/users/ak1774/scratch/esport/death_prediction/Hero_Role_Data_Uptodate.csv"
HERO_ROLE_CSV_PATH = Path.cwd().parent / 'Hero_Role_Data_Uptodate.csv'
role_strings = ["Offlane","Mid","Support","Mage","RoamingSupport","SafelaneCarry"]
with open(JSON_PATH) as f:
heros_json = json.load(f)
hero_list = [(item["id"],item["localized_name"]) for item in heros_json["heroes"]]
hero_roles_table = pd.read_csv(HERO_ROLE_CSV_PATH)
for hero_i in range(10):
feature_name = "player_" + str(hero_i) + "_m_vecPlayerTeamData.000" + str(hero_i) + ".m_nSelectedHeroID"
hero_id = data[feature_name].values[0]
roles = hero_id_to_roles(int(hero_id),hero_list,hero_roles_table).flatten()
if roles.size != len(role_strings):
print("hero_role_failed 2: ",int(hero_id))
roles = np.zeros(len(role_strings))
roles = np.repeat(roles.reshape((1,-1)),data.shape[0],axis=0)
for role_i,role_name in enumerate(role_strings):
new_feature_name = "player_" + str(hero_i) + "_role_" + role_name
data[new_feature_name] = roles[:,role_i]
return data
def read_and_preprocess_data(game_name,sample=True):
data_file_name = game_name + ".csv"
life_stat_file_name = game_name + "_life.csv"
data =
|
pd.read_csv(data_file_name,dtype=np.float32)
|
pandas.read_csv
|
""" pandaspyomo: read data from coopr.pyomo models to pandas DataFrames
Pyomo is a GAMS-like model description language for mathematical
optimization problems. This module provides functions to read data from
Pyomo model instances and result objects. Use list_entities to get a list
of all entities (sets, params, variables, objectives or constraints) inside a
pyomo instance, before get its contents by get_entity (or get_entities).
Usage:
import pandaspyomo as pdpo
pdpo.list_entities(instance, 'var')
[('EprOut', ['time', 'process', 'commodity', 'commodity']), ...
('EprIn', ['time', 'process', 'commodity', 'commodity'])]
epr = pdpo.get_entities(instance, ['EprOut', 'EprInt'])
...
"""
import coopr.pyomo as pyomo
import pandas as pd
def get_entity(instance, name):
""" Return a DataFrame for an entity in model instance.
Args:
instance: a Pyomo ConcreteModel instance
name: name of a Set, Param, Var, Constraint or Objective
Returns:
a single-columned Pandas DataFrame with domain as index
"""
# retrieve entity, its type and its onset names
entity = instance.__getattribute__(name)
labels = _get_onset_names(entity)
# extract values
if isinstance(entity, pyomo.Set):
# Pyomo sets don't have values, only elements
results = pd.DataFrame([(v, 1) for v in entity.value])
# for unconstrained sets, the column label is identical to their index
# hence, make index equal to entity name and append underscore to name
# (=the later column title) to preserve identical index names for both
# unconstrained supersets
if not labels:
labels = [name]
name = name+'_'
elif isinstance(entity, pyomo.Param):
if entity.dim() > 1:
results = pd.DataFrame([v[0]+(v[1],) for v in entity.iteritems()])
else:
results = pd.DataFrame(entity.iteritems())
else:
# create DataFrame
if entity.dim() > 1:
# concatenate index tuples with value if entity has
# multidimensional indices v[0]
results = pd.DataFrame(
[v[0]+(v[1].value,) for v in entity.iteritems()])
else:
# otherwise, create tuple from scalar index v[0]
results = pd.DataFrame(
[(v[0], v[1].value) for v in entity.iteritems()])
# check for duplicate onset names and append one to several "_" to make
# them unique, e.g. ['sit', 'sit', 'com'] becomes ['sit', 'sit_', 'com']
for k, label in enumerate(labels):
if label in labels[:k]:
labels[k] = labels[k] + "_"
if not results.empty:
# name columns according to labels + entity name
results.columns = labels + [name]
results.set_index(labels, inplace=True)
return results
def get_entities(instance, names):
""" Return one DataFrame with entities in columns and a common index.
Works only on entities that share a common domain (set or set_tuple), which
is used as index of the returned DataFrame.
Args:
instance: a Pyomo ConcreteModel instance
names: list of entity names (as returned by list_entities)
Returns:
a Pandas DataFrame with entities as columns and domains as index
"""
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import math
from copy import deepcopy
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import tensorflow as tf
from statslib.utils.common import to_namedtuple
class DesignMatrix:
def __init__(self, y=None, X=None, f=None, gs=None, add_const=True):
from statslib._lib.transforms import identical
y = deepcopy(y)
X = deepcopy(X)
gs = deepcopy(gs)
self.f = None
self.names = dict()
self.endog_name = None
self.exog_names = None
self._n_y = None
self._n_x = None
self.n = None
if y is None and X is None:
raise ValueError("Please provide at least one of: y, X!")
if y is not None:
if f is None:
self.f = identical()
else:
self.f = deepcopy(f)
if isinstance(y, pd.DataFrame):
y = y.squeeze()
self.endog_name = y.name
self.y = y
self.v = self.f(y).rename('v')
self.names.update({'v': y.name})
self._n_y = len(self.y)
if X is not None:
if gs is None:
self.gs = [identical()] * len(X.columns)
else:
self.gs = gs
self.exog_names = X.columns.tolist()
self.X = X
self._n_x = len(self.X)
self.names.update(dict(zip([f'g{i}' for i in range(1, len(X.columns) + 1)], X.columns.tolist())))
if add_const:
self.names.update({'const': 'const'})
self._inv_names = {v: k for k, v in self.names.items()}
if isinstance(gs, dict):
self.gX = X.agg(gs)
else:
self.gX = X.agg(dict(zip(X.columns.tolist(), self.gs)))
if add_const:
self.gX['const'] = 1.0
self.gX.rename(columns=self._inv_names, inplace=True)
if y is not None:
self.dm_ext = pd.concat([self.y.rename(self.endog_name), self.v, self.X, self.gX], axis=1)
self.dm = pd.concat([self.y.rename('y'), self.v, self.gX], axis=1).dropna(axis=0)
self.gX = self.dm[[name for name in self.names.keys() if name != 'v']]
self.gX = self.gX[sorted(self.gX.columns)]
else:
self.dm_ext =
|
pd.concat([self.X, self.gX], axis=1)
|
pandas.concat
|
from collections import Counter
import os
import sys; sys.path.append('./../../')
import pickle
import numpy as np
import pandas as pd
import networkx as nx
import scipy.stats as st
import multiprocessing as mp
from src.Tree import TreeNode
from src.utils import load_pickle
from src.graph_stats import GraphStats
from src.graph_comparison import GraphPairCompare
def load_data(base_path, dataset, models, seq_flag, rob_flag):
for model in models:
path = os.path.join(base_path, dataset, model)
for subdir, dirs, files in os.walk(path):
for filename in files:
if 'csv' not in filename:
if 'seq' not in filename and 'rob' not in filename:
print(f'loading {subdir} {filename} ... ', end='', flush=True)
pkl = load_pickle(os.path.join(subdir, filename))#, subdir.split('/')[-1]
print('done')
yield pkl, filename
def mkdir_output(path):
if not os.path.isdir(path):
try:
os.mkdir(path)
except OSError:
print('ERROR: could not make directory {path} for some reason')
return
def compute_graph_stats(root):
print('computing GraphStats... ', end='', flush=True)
if type(root) is list:
graph_stats = [GraphStats(graph=g, run_id = 1) for g in root]
else:
graph_stats = [GraphStats(graph=node.graph, run_id=1) for node in [root] + list(root.descendants)]
print('done')
return graph_stats
def compute_pagerank(graph_stats):
print('computing pagerank... ', end='', flush=True)
pgds = [gs.pagerank() for gs in graph_stats]
print('done')
return pgds
def length_chain(root):
return len(root.descendants)
def flatten(L):
return [item for sublist in L for item in sublist]
def compute_stats(ld):
padding = max(len(l) for l in ld)
for idx, l in enumerate(ld):
while len(ld[idx]) < padding:
ld[idx] += [np.NaN]
mean = np.nanmean(ld, axis=0)
ci = []
for row in np.asarray(ld).T:
ci.append(st.t.interval(0.95, len(row)-1, loc=np.mean(row), scale=st.sem(row)))
return np.asarray(mean), np.asarray(ci)
def construct_full_table(pageranks, trials, gens, model):
cols = []
for pagerank in pageranks:
cols.append(pagerank)
rows = {'model': model, 'gen': gens, 'trial': trials, \
'total_2_1edge': total_2_1edge, \
'total_2_indep': total_2_indep, \
'total_3_tris': total_3_tris, \
'total_2_star': total_2_star, \
'total_3_1edge': total_3_1edge, \
'total_4_clique': total_4_clique, \
'total_4_chordcycle': total_4_chordcycle, \
'total_4_tailed_tris': total_4_tailed_tris, \
'total_3_star': total_3_star, \
'total_4_path': total_4_path, \
'total_4_1edge': total_4_1edge, \
'total_4_2edge': total_4_2edge, \
'total_4_2star': total_4_2star, \
'total_4_tri': total_4_tri, \
'total_4_indep': total_4_indep
}
df =
|
pd.DataFrame(rows)
|
pandas.DataFrame
|
#SPDX-License-Identifier: MIT
import datetime
import json
import logging
import os
import sys
import warnings
from multiprocessing import Process, Queue
from workers.worker_git_integration import WorkerGitInterfaceable
import numpy as np
import pandas as pd
import requests
import sqlalchemy as s
from skimage.filters import threshold_otsu
from sklearn.ensemble import IsolationForest
from augur import ROOT_AUGUR_DIRECTORY
from workers.message_insights_worker.message_novelty import novelty_analysis
from workers.message_insights_worker.message_sentiment import get_senti_score
from workers.worker_base import Worker
warnings.filterwarnings('ignore')
class MessageInsightsWorker(WorkerGitInterfaceable):
def __init__(self, config={}):
# Define the worker's type, which will be used for self identification.
worker_type = "message_insights_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
# The name the housekeeper/broker use to distinguish the data model this worker can fill
models = ['message_analysis']
# Define the tables needed to insert, update, or delete on
data_tables = ['message', 'repo', 'message_analysis', 'message_analysis_summary']
# For most workers you will only need the worker_history and worker_job tables
# from the operations schema, these tables are to log worker task histories
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Do any additional configuration after the general initialization has been run
self.config.update(config)
# Define data collection info
self.tool_source = 'Message Insights Worker'
self.tool_version = '0.2.0'
self.data_source = 'Non-existent API'
self.insight_days = self.config['insight_days']
# Abs paths
self.models_dir = os.path.join(ROOT_AUGUR_DIRECTORY, "workers", "message_insights_worker", self.config['models_dir'])
self.full_train = False
# To identify which run of worker inserted the data
self.run_id = 100
def message_analysis_model(self, task, repo_id):
"""
:param task: the task generated by the housekeeper and sent to the broker which
was then sent to this worker. Takes the example dict format of:
{
'job_type': 'MAINTAIN',
'models': ['fake_data'],
'display_name': 'fake_data model for url: https://github.com/vmware/vivace',
'given': {
'git_url': 'https://github.com/vmware/vivace'
}
}
:param repo_id: the collect() method queries the repo_id given the git/github url
and passes it along to make things easier. An int such as: 27869
"""
# Any initial database instructions, like finding the last tuple inserted or generate the next ID value
self.begin_date = ''
# Check to see if repo has been analyzed previously
repo_exists_SQL = s.sql.text("""
SELECT exists (SELECT 1 FROM augur_data.message_analysis_summary WHERE repo_id = :repo_id LIMIT 1)""")
df_rep = pd.read_sql_query(repo_exists_SQL, self.db, params={'repo_id': repo_id})
self.full_train = not(df_rep['exists'].iloc[0])
self.logger.info(f'Full Train: {self.full_train}')
# Collection and insertion of data happens here
if not self.full_train:
# Fetch the timestamp of last analyzed message for the repo
past_SQL = s.sql.text("""
select message_analysis.msg_id, message.msg_timestamp
from augur_data.message_analysis
inner join augur_data.message on message.msg_id = message_analysis.msg_id
inner join augur_data.pull_request_message_ref on message.msg_id = pull_request_message_ref.msg_id
inner join augur_data.pull_requests on pull_request_message_ref.pull_request_id = pull_requests.pull_request_id
where message.repo_id = :repo_id
UNION
select message_analysis.msg_id, message.msg_timestamp
from augur_data.message_analysis
inner join augur_data.message on message.msg_id = message_analysis.msg_id
inner join augur_data.issue_message_ref on message.msg_id = issue_message_ref.msg_id
inner join augur_data.issues on issue_message_ref.issue_id = issues.issue_id
where message.repo_id = :repo_id
""")
df_past = pd.read_sql_query(past_SQL, self.db, params={'repo_id': repo_id})
df_past['msg_timestamp'] = pd.to_datetime(df_past['msg_timestamp'])
df_past = df_past.sort_values(by='msg_timestamp')
self.begin_date = df_past['msg_timestamp'].iloc[-1]
# Assign new run_id for every run
self.run_id = self.get_max_id('message_analysis', 'worker_run_id')
self.logger.info(f'Last analyzed msg_id of repo {repo_id} is {df_past["msg_id"].iloc[-1]}')
self.logger.info(f'Fetching recent messages from {self.begin_date} of repo {repo_id}...\n')
# Fetch only recent messages
join_SQL = s.sql.text("""
select message.msg_id, msg_timestamp, msg_text from augur_data.message
left outer join augur_data.pull_request_message_ref on message.msg_id = pull_request_message_ref.msg_id
left outer join augur_data.pull_requests on pull_request_message_ref.pull_request_id = pull_requests.pull_request_id
where message.repo_id = :repo_id and msg_timestamp > :begin_date
UNION
select message.msg_id, msg_timestamp, msg_text from augur_data.message
left outer join augur_data.issue_message_ref on message.msg_id = issue_message_ref.msg_id
left outer join augur_data.issues on issue_message_ref.issue_id = issues.issue_id
where message.repo_id = :repo_id and msg_timestamp > :begin_date""")
else:
self.logger.info(f'Fetching all past messages of repo {repo_id}...')
# Fetch all messages
join_SQL = s.sql.text("""
select message.msg_id, msg_timestamp, msg_text from augur_data.message
left outer join augur_data.pull_request_message_ref on message.msg_id = pull_request_message_ref.msg_id
left outer join augur_data.pull_requests on pull_request_message_ref.pull_request_id = pull_requests.pull_request_id
where message.repo_id = :repo_id
UNION
select message.msg_id, msg_timestamp, msg_text from augur_data.message
left outer join augur_data.issue_message_ref on message.msg_id = issue_message_ref.msg_id
left outer join augur_data.issues on issue_message_ref.issue_id = issues.issue_id
where message.repo_id = :repo_id""")
df_message = pd.read_sql_query(join_SQL, self.db, params={'repo_id': repo_id, 'begin_date': self.begin_date})
self.logger.info(f'Messages dataframe dim: {df_message.shape}')
self.logger.info(f'Value 1: {df_message.shape[0]}')
if df_message.shape[0] > 10:
# Sort the dataframe
df_message['msg_timestamp'] = pd.to_datetime(df_message['msg_timestamp'])
df_message = df_message.sort_values(by='msg_timestamp')
# DEBUG:
# df_message.to_csv(f'full_{repo_id}.csv',index=False)
# Create the storage directory for trained models
try:
os.makedirs(self.models_dir, exist_ok=True)
self.logger.info(f"Models storage directory is '{self.models_dir}'\n")
except OSError:
self.logger.error('Models storage directory could not be created \n')
self.logger.info('Starting novelty detection...')
threshold, df_message['rec_err'] = novelty_analysis(df_message,repo_id, self.models_dir, self.full_train, logger=self.logger)
if not self.full_train:
merge_SQL = s.sql.text("""
select novelty_flag, reconstruction_error from augur_data.message_analysis
left outer join augur_data.pull_request_message_ref on message_analysis.msg_id = pull_request_message_ref.msg_id
left outer join augur_data.pull_requests on pull_request_message_ref.pull_request_id = pull_requests.pull_request_id
where pull_request_message_ref.repo_id = :repo_id
UNION
select novelty_flag, reconstruction_error from augur_data.message_analysis
left outer join augur_data.issue_message_ref on message_analysis.msg_id = issue_message_ref.msg_id
left outer join augur_data.issues on issue_message_ref.issue_id = issues.issue_id
where issue_message_ref.repo_id = :repo_id""")
df_past =
|
pd.read_sql_query(merge_SQL, self.db, params={'repo_id': repo_id})
|
pandas.read_sql_query
|
from unittest import TestCase
import pandas as pd
import numpy as np
from datamatch.similarities import AbsoluteNumericalSimilarity, JaroWinklerSimilarity, RelativeNumericalSimilarity
from datamatch.scorers import AlterScorer, FuncScorer, RefuseToScoreException, SimSumScorer, AbsoluteScorer, MinScorer, MaxScorer
class SimSumScorerTestCase(TestCase):
def test_score(self):
scorer = SimSumScorer({
'first_name': JaroWinklerSimilarity(),
'age': AbsoluteNumericalSimilarity(10)
})
columns = ['first_name', 'age']
self.assertEqual(
scorer.score(
pd.Series(["john", 41], index=columns),
pd.Series(["john", 41], index=columns),
),
1,
)
self.assertEqual(
scorer.score(
pd.Series(["jim", 41], index=columns),
pd.Series(["jimm", 43], index=columns),
),
0.8737093656105305,
)
class AbsoluteScorerTestCase(TestCase):
def test_score(self):
scorer = AbsoluteScorer('attract_id', 1)
self.assertEqual(
scorer.score(
pd.Series([1234], index=['attract_id']),
pd.Series([1234], index=['attract_id']),
),
1
)
self.assertRaises(RefuseToScoreException, lambda: scorer.score(
pd.Series([1234], index=['attract_id']),
pd.Series([2345], index=['attract_id']),
))
self.assertRaises(RefuseToScoreException, lambda: scorer.score(
pd.Series([1234], index=['attract_id']),
pd.Series([np.NaN], index=['attract_id']),
))
self.assertRaises(RefuseToScoreException, lambda: scorer.score(
pd.Series([None], index=['attract_id']),
pd.Series([1234], index=['attract_id']),
))
def test_ignore_key_error(self):
series_a = pd.Series([1], index=['a'])
series_b = pd.Series([2], index=['a'])
self.assertRaises(
KeyError,
lambda: AbsoluteScorer('b', 1).score(series_a, series_b)
)
self.assertRaises(
RefuseToScoreException,
lambda: AbsoluteScorer(
'b', 1, ignore_key_error=True
).score(series_a, series_b)
)
class MaxScorerTestCase(TestCase):
def test_score(self):
scorer = MaxScorer([
AbsoluteScorer('attract_id', 1),
SimSumScorer({
'first_name': JaroWinklerSimilarity()
})
])
columns = ['first_name', 'attract_id']
self.assertEqual(
scorer.score(
pd.Series(['john', 5], index=columns),
pd.Series(['jim', 5], index=columns),
),
1
)
self.assertEqual(
scorer.score(
pd.Series(['john', 5], index=columns),
pd.Series(['jim', 4], index=columns),
),
0.575
)
class MinScorerTestCase(TestCase):
def test_score(self):
scorer = MinScorer([
AbsoluteScorer('repell_id', 0),
SimSumScorer({
'first_name': JaroWinklerSimilarity()
})
])
columns = ['first_name', 'repell_id']
self.assertEqual(
scorer.score(
pd.Series(['john', 5], index=columns),
pd.Series(['jim', 5], index=columns),
),
0
)
self.assertEqual(
scorer.score(
pd.Series(['john', 5], index=columns),
pd.Series(['jim', 4], index=columns),
),
0.575
)
class AlterScorerTestCase(TestCase):
def test_score(self):
scorer = AlterScorer(
scorer=SimSumScorer({
'name': JaroWinklerSimilarity(),
'age': RelativeNumericalSimilarity(100)
}),
values=pd.Series(
[1, 2, 1, 3],
index=[1, 2, 3, 4]
),
alter=lambda x: x/2
)
columns = ['name', 'age']
self.assertEqual(
scorer.score(
|
pd.Series(['john', 20], index=columns, name=2)
|
pandas.Series
|
import os
import click
from sklearn.metrics import confusion_matrix, f1_score
import seaborn as sns
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, WeightedRandomSampler
import pandas as pd
import numpy as np
from tqdm import trange
from src.models.fasttext_research import get_train_val_test_dataframes
from sklearn.preprocessing import LabelBinarizer
class Model1(nn.Module):
def __init__(self):
super(Model1, self).__init__()
self.fc1 = nn.Linear(768, 4)
def forward(self, x):
x = self.fc1(x)
output = F.log_softmax(x, dim=1)
return output
class Model2(nn.Module):
def __init__(self):
super(Model2, self).__init__()
self.fc1 = nn.Linear(768, 256)
self.fc2 = nn.Linear(256, 4)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
class Model3(nn.Module):
def __init__(self):
super(Model3, self).__init__()
self.fc1 = nn.Linear(768, 256)
self.dropout = nn.Dropout(p=0.3)
self.fc2 = nn.Linear(256, 4)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def predict(torch_model, data, encoder=None):
out = torch_model(data)
y_pred = np.argmax(out.detach().numpy(), axis=1)
if encoder is not None:
b = np.zeros((y_pred.size()[0], 4))
b[np.arange(y_pred.size()[0]), y_pred] = 1
y_pred = encoder.inverse_transform(b)
return y_pred
def calc_f1_score(data_arr, model) -> float:
all_data = torch.Tensor(np.vstack(data_arr[:, 0]))
all_labels = np.vstack(data_arr[:, 1])
y_pred = predict(model, all_data)
y_true = np.argmax(all_labels, axis=1)
return f1_score(y_true, y_pred, average='macro')
def run_multilayer_perceptron_experiments(train_tweets, val_tweets, train_polemo, train_wordnet, path_to_results: str):
for m in [Model1, Model2, Model3]:
for lr in [0.00005, 0.0001, 0.0005, 0.001, 0.005]:
for name, train_dataset in {"polemo_tweets": (train_polemo, train_tweets),
"wordnet_tweets": (train_wordnet, train_tweets),
"polemo_wordnet_tweets": (train_polemo, train_wordnet, train_tweets)}.items():
for importance_sampling_weight in [5, 10, 15]:
train_polemo['weight'] = 1
train_wordnet['weight'] = 1
train_tweets['weight'] = importance_sampling_weight
train_df = pd.concat(objs=train_dataset, axis=0)
model = m()
optimizer = optim.Adam(model.parameters(), lr=lr)
loss = nn.CrossEntropyLoss()
train_data = []
samples_weights = []
for index, row in train_df.iterrows():
train_data.append([row['embeddings'], row['label_enc']])
samples_weights.append(row['weight'])
val_data = []
for index, row in val_tweets.iterrows():
val_data.append([row['embeddings'], row['label_enc']])
full_train_data_arr = np.array(train_data)
full_val_data_arr = np.array(val_data)
sampler = WeightedRandomSampler(samples_weights, len(samples_weights))
train_loader = DataLoader(
train_data, batch_size=500, num_workers=1, sampler=sampler)
history = dict()
history['f1_score'] = []
history['val_f1_score'] = []
t = trange(100,
desc=f'Training model={m.__name__}, lr={lr}, dataset={name}, importance sampl={importance_sampling_weight}',
leave=True)
for epoch in t:
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
out = model(data)
out_loss = loss(out, np.argmax(target, axis=1))
out_loss.backward()
optimizer.step()
with torch.no_grad():
history['f1_score'].append(calc_f1_score(full_train_data_arr, model))
history['val_f1_score'].append(calc_f1_score(full_val_data_arr, model))
t.set_description(
f'Training model={m.__name__}, lr={lr}, dataset={name}, importance sampl={importance_sampling_weight}, train F1 score={history["f1_score"][-1]}, val F1 score={history["val_f1_score"][-1]}',
refresh=True)
results = pd.DataFrame(data={
"epoch": range(1, 101),
"train_tweets_f_score": history['f1_score'],
"val_tweets_f_score": history['val_f1_score'],
})
results.to_csv(
os.path.join(path_to_results, f"{m.__name__}_{lr}_{name}_{importance_sampling_weight}.csv"),
index=False)
class EarlyStopping(object):
def __init__(self, mode='min', min_delta=0, patience=10, percentage=False):
self.mode = mode
self.min_delta = min_delta
self.patience = patience
self.best = None
self.num_bad_epochs = 0
self.is_better = None
self._init_is_better(mode, min_delta, percentage)
self.best_model = None
if patience == 0:
self.is_better = lambda a, b: True
self.step = lambda a: False
def step(self, metrics, model):
if self.best is None:
self.best = metrics
self.best_model = model
return False
if np.isnan(metrics):
return True
if self.is_better(metrics, self.best):
self.num_bad_epochs = 0
self.best = metrics
self.best_model = model
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
return True
return False
def _init_is_better(self, mode, min_delta, percentage):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if not percentage:
if mode == 'min':
self.is_better = lambda a, best: a < best - min_delta
if mode == 'max':
self.is_better = lambda a, best: a > best + min_delta
else:
if mode == 'min':
self.is_better = lambda a, best: a < best - (
best * min_delta / 100)
if mode == 'max':
self.is_better = lambda a, best: a > best + (
best * min_delta / 100)
def get_best_model(self):
return self.best_model
def run_best_models(train_tweets, val_tweets, train_polemo, val_polemo, train_wordnet, val_wordnet, test_tweets,
path_to_results: str, encoder, path_to_plots: str):
best_parameters = {
"Model_2_lr_0.0005_weight_10_polemo_tweets": (Model2, 0.0005, 10, (train_polemo, train_tweets, val_polemo, val_tweets)),
"Model_2_lr_0.0005_weight_15_polemo_tweets": (Model2, 0.0005, 15, (train_polemo, train_tweets, val_polemo, val_tweets)),
"Model_2_lr_0.0005_weight_5_polemo_tweets": (Model2, 0.0005, 5, (train_polemo, train_tweets, val_polemo, val_tweets)),
"Model_2_lr_0.0005_weight_10_polemo_tweets_wordnet": (
Model2, 0.0005, 10, (train_polemo, train_tweets, train_wordnet, val_polemo, val_tweets, val_wordnet)),
"Model_2_lr_0.001_weight_15_polemo_tweets": (Model2, 0.001, 15, (train_polemo, train_tweets, val_polemo, val_tweets)),
"Model_3_lr_0.001_weight_15_polemo_tweets": (Model3, 0.001, 15, (train_polemo, train_tweets, val_polemo, val_tweets))}
for name, parameters in best_parameters.items():
train_polemo['weight'] = 1
train_wordnet['weight'] = 1
train_tweets['weight'] = parameters[2]
val_polemo['weight'] = 1
val_wordnet['weight'] = 1
val_tweets['weight'] = parameters[2]
train_df = pd.concat(objs=parameters[3], axis=0)
model = parameters[0]()
optimizer = optim.Adam(model.parameters(), lr=parameters[1])
loss = nn.CrossEntropyLoss()
train_data = []
samples_weights = []
for index, row in train_df.iterrows():
train_data.append([row['embeddings'], row['label_enc']])
samples_weights.append(row['weight'])
val_data = []
for index, row in test_tweets.iterrows():
val_data.append([row['embeddings'], row['label_enc']])
full_train_data_arr = np.array(train_data)
full_val_data_arr = np.array(val_data)
sampler = WeightedRandomSampler(samples_weights, len(samples_weights))
train_loader = DataLoader(
train_data, batch_size=500, num_workers=1, sampler=sampler)
history = dict()
history['f1_score'] = []
history['val_f1_score'] = []
es = EarlyStopping(patience=50, mode='max')
t = trange(500,
desc=f'Training model={name}',
leave=True)
for epoch in t:
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
out = model(data)
out_loss = loss(out, np.argmax(target, axis=1))
out_loss.backward()
optimizer.step()
with torch.no_grad():
history['f1_score'].append(calc_f1_score(full_train_data_arr, model))
history['val_f1_score'].append(calc_f1_score(full_val_data_arr, model))
if es.step(history['val_f1_score'][-1], model):
model = es.get_best_model()
break # early stop criterion is met, we can stop now
t.set_description(
f'Training model={name} train F1 score={history["f1_score"][-1]}, val F1 score={history["val_f1_score"][-1]}',
refresh=True)
results = pd.DataFrame(data={
"epoch": range(1, len(history['f1_score']) + 1),
"train_tweets_f_score": history['f1_score'],
"test_tweets_f_scores": history['val_f1_score']
})
results.to_csv(
os.path.join(path_to_results, f"best_{name}.csv"),
index=False)
labels = ['ambiguous', 'negative', 'neutral', 'positive']
with torch.no_grad():
cm = confusion_matrix(encoder.inverse_transform(np.array(list(test_tweets['label_enc']))),
predict(model, torch.Tensor(np.vstack(list(test_tweets['embeddings']))), encoder),
labels)
df_cm = pd.DataFrame(cm, index=labels,
columns=labels)
sns.heatmap(df_cm, annot=True)
plt.savefig(os.path.join(path_to_plots, f"heatmap_{name}.png"))
plt.close()
@click.command()
@click.option(
"-trp",
"--train-polemo-embedded",
"train_polemo_embedded",
type=click.Path(file_okay=True, exists=True),
required=True
)
@click.option(
"-vp",
"--val-polemo-embedded",
"val_polemo_embedded",
type=click.Path(file_okay=True, exists=True),
required=True
)
@click.option(
"-tp",
"--test-polemo-embedded",
"test_polemo_embedded",
type=click.Path(file_okay=True, exists=True),
required=True
)
@click.option(
"-pt",
"--political-tweets-embedded",
"political_tweets_embedded",
type=click.Path(file_okay=True, exists=True),
required=True
)
@click.option(
"-w",
"--wordnet-embedded",
"wordnet_embedded",
type=click.Path(file_okay=True, exists=True),
required=True,
)
@click.option(
"-r",
"--path-to-results",
"path_to_results",
type=click.Path(dir_okay=True, exists=True),
required=True
)
@click.option(
"-p",
"--path-to-plots",
"path_to_plots",
type=click.Path(dir_okay=True, exists=True),
required=True
)
def train_models(train_polemo_embedded: str, val_polemo_embedded: str, test_polemo_embedded: str,
political_tweets_embedded: str, wordnet_embedded: str, path_to_results: str,
path_to_plots: str):
train_polemo = pd.read_pickle(train_polemo_embedded)
val_polemo = pd.read_pickle(val_polemo_embedded)
test_polemo =
|
pd.read_pickle(test_polemo_embedded)
|
pandas.read_pickle
|
# Torch imports
import torch as torch
from torch.utils import data
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# HexagDLy import
import hexagdly as hg
import copy
import numpy as np
import pandas as pd
import math
import pickle
from matplotlib import pyplot as plt
import matplotlib as mpl
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def get_log_bins(l1, nbins):
bins = np.geomspace(min(l1), max(l1), nbins)
return bins
def get_discrete_colormap(num, colormapstr = 'viridis'):
cmap = plt.get_cmap(colormapstr)
colco = [cmap(i) for i in range(0, cmap.N, int(cmap.N / (num - 1)))]
colco.append(cmap(cmap.N))
cmap2 = mpl.colors.LinearSegmentedColormap.from_list(
'Custom cmap', colco, num)
return cmap2
def save_model(path, net, optimizer, epoch, trainloss=0.0, testloss = 0.0, additional_info=""):
d1 = {'epoch': epoch, 'model_state_dict':net.state_dict(), 'optimizer_state_dict':optimizer.state_dict(),
'trainloss': trainloss, 'testloss': testloss, "additional_info":additional_info}
#print("QQ1: ",d1)
torch.save(d1, path)
def load_model(path, base_net, base_optimizer):
checkpoint = torch.load(path)
base_net.load_state_dict(checkpoint['model_state_dict'])
base_optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
additional_info = checkpoint['additional_info']
testloss = checkpoint['testloss']
trainloss = checkpoint['trainloss']
# base_net.eval()
# - or -
base_net.train()
return {"net":base_net, 'optimizer':base_optimizer , 'epoch': epoch, 'trainloss': trainloss,
'testloss': testloss, "additional_info": additional_info }
def get_image_from_datahandler(h, imgid):
return h.get_whole_dataset()[imgid]
def process_data(x):
n2 = max(x.iloc[8:2086])
x.iloc[8:2086] = x.iloc[8:2086].div(n2)
return x
def process_data2(x):
x_a = x[13:2091]
x_array2 = x_a / max(x_a)
x_array2 = x_array2 - 0.5 - min(x_array2) / 2
x_array2 = x_array2 * 1 / max(x_array2)
x_array2 = pd.concat([x[0:9], x_array2])
return x_array2
def process_data_gauss(x, colnum, pixnum):
x_a = x[colnum: colnum+2*pixnum]
x_t = x[colnum + 2 * pixnum: colnum + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#x_a = np.log10(x_a+100)
mean = np.mean(x_a, None)
std = np.std(x_a, None)
x_a -= mean
x_a /= std
x_tarray2 = (((x_t - 10) * 1) / (60 - 0)) #(((value - old_min) * new_range) / (old_max - old_min) - new_min)
x_array2 = pd.concat([x[:colnum], x_a, x_tarray2, t_data])
return x_array2
def process_data3_tc(x):
x_a = x[9:2087]
x_t = x[2087:4165]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - min(x_a)) * 2) / (max(x_a) - min(x_a))) - 1
x_t2 = (((x_t - 10) * 1) / (60 - 10)) - 0
x_array2 = pd.concat([x[0:9], x_array2])
x_array2 = pd.concat([x_array2, x_t2])
x_array2 = pd.concat([x_array2,t_data])
return x_array2
def process_data3(x, info_col_num, pixnum):
x_a = x[info_col_num:info_col_num+2*pixnum]
x_t = x[info_col_num + 2 * pixnum:info_col_num + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - min(x_a)) * 2) / (max(x_a) - min(x_a))) - 1
x_array2 = pd.concat([x[0:info_col_num], x_array2])
x_array2 = pd.concat([x_array2, x_t, t_data])
return x_array2
def process_data4(x):
x_a = x[9:2087]
x_t = x[info_col_num + 2 * pixnum:info_col_num + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - -1) * 2) / (2500 - -1)) - 1
x_tarray2 = (((x_a - -1) * 2) / (2500 - -1)) - 1
x_array2 = pd.concat([x[0:9], x_array2])
x_array2 = pd.concat([x_array2,x_tarray2,t_data])
return x_array2
def process_data3_i_image(x_a, colnum, pixnum):
#x_a = x[9:2087]
#xs = x_a.shape
#print(x_a.shape)
#x_a=x_a.view(-1,40*40)
#t_data = [min(x_a),max(x_a)]
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
for i, x_loop in enumerate(x_a):
x_a[i] = (((x_loop - min(x_loop)) * 1) / (max(x_loop) - min(x_loop)))
#x_a=x_a.view(xs)
return x_a
'''
def process_data3_i(x, info_col_num, pixnum):
x_a = x[info_col_num:info_col_num + 2 * pixnum]
x_t = x[info_col_num + 2 * pixnum:info_col_num + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - min(x_a)) * 1) / (max(x_a) - min(x_a)))
x_array2 = pd.concat([x[0:info_col_num], x_array2])
x_array2 = pd.concat([x_array2, x_t, t_data])
return x_array2
'''
def process_data3_i(x, colnum, pixnum):
x_a = x[colnum: colnum+2*pixnum]
x_t = x[colnum + 2 * pixnum: colnum + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - min(x_a)) * 1) / (max(x_a) - min(x_a)))
x_tarray2 = (((x_t - 10) * 1) / (60 - 0)) #(((value - old_min) * new_range) / (old_max - old_min) - new_min)
x_array2 = pd.concat([x[:colnum], x_array2, x_tarray2, t_data])
return x_array2
def process_data4_i(x, colnum, pixnum):
x_a = x[colnum: colnum+2*pixnum]
x_t = x[colnum + 2 * pixnum: colnum + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - -1) * 1) / (2500))
x_tarray2 = (((x_t - 10) * 1) / (60 - 0)) #(((value - old_min) * new_range) / (old_max - old_min) - new_min)
x_array2 = pd.concat([x[:colnum], x_array2, x_tarray2, t_data])
return x_array2
def process_data4_nt(x):
x_a = x[9:2087]
x_t = x[info_col_num + 2 * pixnum:info_col_num + 4 * pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - -1) * 2) / (2500 - -1)) - 1
x_array2 = pd.concat([x[0:9], x_array2])
x_array2 = pd.concat([x_array2,t_data])
return x_array2
def process_data4_i_nt(x, colnum, pixnum):
x_a = x[colnum:colnum+2*pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - -1) * 1) / (2500))
x_array2 = pd.concat([x[0:colnum], x_array2,t_data])
return x_array2
def process_data4_timing(x, colnum, pixnum):
x_a = x[colnum+2*pixnum : colnum+4*pixnum]
t_data = pd.Series([min(x_a),max(x_a)])
#OldRange = (max(x_a) - min(x_a))
#NewRange = 2#(1 - -1)
x_array2 = (((x_a - -1) * 1) / (2500 - -1))
x_array2 =
|
pd.concat([x[0:colnum], x_array2])
|
pandas.concat
|
#! /usr/bin/env python
# coding=utf-8
# Copyright © 2016 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import json
import pandas as pd
import numpy as np
import scipy.sparse as sp
import os.path
import sys
import itertools
from collections import defaultdict
def symmetrize(a):
return a + a.T - np.diag(a.diagonal())
class Tables:
def check_data_exists(self):
for table_name in self.needed_tables:
if not os.path.isfile('data/' + table_name + '.csv'):
sys.exit('[ERROR] Needed table ' + table_name)
def __init__(self):
self.needed_tables = ['Business',
'MemberCouncil',
'Tags',
'BusinessRole',
'Active_People',
'Transcript',
'Session',
'Person',
'adj']
self.check_data_exists()
self.df = {}
for table in self.needed_tables:
self.df[table] = pd.read_csv('data/' + table + '.csv')
def get_friends(self, adj):
# sorts a person's friends in decreasing order of collaboration
dico = {}
for i in adj.index:
row = adj.loc[i].sort_values(ascending=False)
friends = []
for j, k in row.iteritems():
if k.item() > 0:
sub_dico = {'friend': j, 'number': k.item()}
friends.append(sub_dico)
dico[i.item()] = friends
return dico
def cosigner(self):
friends = self.relation_between_person('Auteur', 'Cosignataire')
adj_name = 'adj_cosign'
friends_name = 'friends_cosign'
def get_cosign_friends(matrix, person_number):
x = matrix[person_number,:].nonzero()[1]
y = matrix[person_number,:].data[0]
df =
|
pd.DataFrame({'Person_num':x, 'times_cosigner':y })
|
pandas.DataFrame
|
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import re
from math import ceil
import pandas as pd
from sklearn.metrics import classification_report
from scipy.stats import shapiro, boxcox, yeojohnson
from scipy.stats import probplot
from sklearn.preprocessing import LabelEncoder, PowerTransformer
from category_encoders.target_encoder import TargetEncoder
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.linear_model import LinearRegression, LogisticRegression
# from .charts.classification_visualization import classification_visualization
# from .charts.charts import Plot, ScatterChart
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.utils.multiclass import unique_labels
from sklearn.manifold import TSNE
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import json
from pyod.models.hbos import HBOS
from statsmodels.api import ProbPlot
# from .charts.charts_extras import (
# feature_importances_plot,
# regression_viz,
# classification_viz,
# )
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.svm import LinearSVC
import warnings
warnings.filterwarnings("ignore")
sns.set_palette("colorblind")
class CrawtoDS:
def __init__(
self,
data,
target,
test_data=None,
time_dependent=False,
features="infer",
problem="infer",
):
self.input_data = data
self.target = target
self.features = features
self.problem = problem
self.test_data = test_data
self.timedependent = time_dependent
if self.problem == "binary classification":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True, stratify=self.input_data[self.target],
)
elif self.problem == "regression":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True,
)
def nan_features(input_data):
"""a little complicated. map creates a %nan values and returns the feature if greater than the threshold.
filter simply filters out the false values """
f = input_data.columns.values
len_df = len(input_data)
nan_features = list(
filter(
lambda x: x is not False,
map(
lambda x: x
if self.input_data[x].isna().sum() / len_df > 0.25
else False,
f,
),
)
)
return nan_features
def problematic_features(self):
f = self.input_data.columns.values
problematic_features = []
for i in f:
if "Id" in i:
problematic_features.append(i)
elif "ID" in i:
problematic_features.append(i)
return problematic_features
def undefined_features(self):
if self.features == "infer":
undefined_features = list(self.input_data.columns)
undefined_features.remove(self.target)
for i in self.nan_features:
undefined_features.remove(i)
for i in self.problematic_features:
undefined_features.remove(i)
return undefined_features
def numeric_features(self):
numeric_features = []
l = self.undefined_features
for i in l:
if self.input_data[i].dtype in ["float64", "float", "int", "int64"]:
if len(self.input_data[i].value_counts()) / len(self.input_data) < 0.1:
pass
else:
numeric_features.append(i)
return numeric_features
def categorical_features(self, threshold=10):
self.undefined_features
categorical_features = []
to_remove = []
l = self.undefined_features
for i in l:
if len(self.input_data[i].value_counts()) / len(self.input_data[i]) < 0.10:
categorical_features.append(i)
return categorical_features
def indicator(self):
indicator = MissingIndicator(features="all")
indicator.fit(self.train_data[self.undefined_features])
return indicator
def train_missing_indicator_df(self):
x = self.indicator.transform(self.train_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = [
i
for i in list(missing_indicator_df.columns.values)
if missing_indicator_df[i].max() == True
]
return missing_indicator_df[columns].replace({True: 1, False: 0})
def valid_missing_indicator_df(self):
x = self.indicator.transform(self.valid_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = list(self.train_missing_indicator_df)
return missing_indicator_df[columns].replace({True: 1, False: 0})
def numeric_imputer(self):
numeric_imputer = SimpleImputer(strategy="median", copy=True)
numeric_imputer.fit(self.train_data[self.numeric_features])
return numeric_imputer
def categorical_imputer(self):
categorical_imputer = SimpleImputer(strategy="most_frequent", copy=True)
categorical_imputer.fit(self.train_data[self.categorical_features])
return categorical_imputer
def train_imputed_numeric_df(self):
x = self.numeric_imputer.transform(self.train_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def valid_imputed_numeric_df(self):
x = self.numeric_imputer.transform(self.valid_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def yeo_johnson_transformer(self):
yeo_johnson_transformer = PowerTransformer(method="yeo-johnson", copy=True)
yeo_johnson_transformer.fit(self.train_imputed_numeric_df)
return yeo_johnson_transformer
def yeo_johnson_target_transformer(self):
yeo_johnson_target_transformer = PowerTransformer(method="yeo-johnson", copy=True)
yeo_johnson_target_transformer.fit(
np.array(self.train_data[self.target]).reshape(-1, 1)
)
return yeo_johnson_target_transformer
def train_yeojohnson_df(self):
yj = self.yeo_johnson_transformer.transform(self.train_imputed_numeric_df)
columns = self.train_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def valid_yeojohnson_df(self):
yj = self.yeo_johnson_transformer.transform(self.valid_imputed_numeric_df)
columns = self.valid_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def train_transformed_target(self):
if self.problem == "binary classification":
return self.train_data[self.target]
elif self.problem == "regression":
s = self.yeo_johnson_target_transformer.transform(
np.array(self.train_data[self.target]).reshape(-1, 1)
)
s = pd.DataFrame(s, columns=[self.target])
return s
def valid_transformed_target(self):
if self.problem == "binary classification":
return self.valid_data[self.target]
elif self.problem == "regression":
s = self.yeo_johnson_target_transformer.transform(
np.array(self.valid_data[self.target]).reshape(-1, 1)
)
s = pd.DataFrame(s, columns=[self.target])
return s
def train_imputed_categorical_df(self):
x = self.categorical_imputer.transform(self.train_data[self.categorical_features])
x_labels = [i + "_imputed" for i in self.categorical_features]
imputed_categorical_df = pd.DataFrame(x, columns=x_labels)
return imputed_categorical_df
def valid_imputed_categorical_df(self):
x = self.categorical_imputer.transform(self.valid_data[self.categorical_features])
x_labels = [i + "_imputed" for i in self.categorical_features]
imputed_categorical_df = pd.DataFrame(x, columns=x_labels)
return imputed_categorical_df
def hbos_transformer(self):
hbos = HBOS()
hbos.fit(self.train_transformed_data)
return hbos
def train_hbos_column(self):
hbos_t = self.hbos_transformer.predict(self.train_transformed_data)
return hbos_t
def valid_hbos_column(self):
hbos_v = self.hbos_transformer.predict(self.valid_transformed_data)
return hbos_v
def test_hbos_column(self):
hbos_test = self.hbos_transformer.predict(self.test_transformed_data)
return hbos_test
def target_encoder(self):
te = TargetEncoder(cols=self.train_imputed_categorical_df.columns.values)
te.fit(X=self.train_imputed_categorical_df, y=self.train_transformed_target)
return te
def train_target_encoded_df(self):
te = self.target_encoder.transform(self.train_imputed_categorical_df)
columns = list(
map(
lambda x: re.sub(r"_imputed", "_target_encoded", x),
list(self.train_imputed_categorical_df.columns.values),
)
)
te = pd.DataFrame(data=te)
te.columns = columns
return te
def valid_target_encoded_df(self):
te = self.target_encoder.transform(self.valid_imputed_categorical_df)
columns = list(
map(
lambda x: re.sub(r"_imputed", "_target_encoded", x),
list(self.valid_imputed_categorical_df.columns.values),
)
)
te = pd.DataFrame(data=te)
te.columns = columns
return te
def train_transformed_data(self):
train_transformed_data = (
self.train_target_encoded_df.merge(
self.train_yeojohnson_df, left_index=True, right_index=True
)
.merge(self.train_missing_indicator_df, left_index=True, right_index=True)
.replace(np.nan, 0)
)
return train_transformed_data
def valid_transformed_data(self):
valid_transformed_data = (
self.valid_target_encoded_df.merge(
self.valid_yeojohnson_df, left_index=True, right_index=True
)
.merge(self.valid_missing_indicator_df, left_index=True, right_index=True)
.replace(np.nan, 0)
)
return valid_transformed_data
def test_missing_indicator_df(self):
if self.test_data is not None:
x = self.indicator.transform(self.test_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = list(self.train_missing_indicator_df)
return missing_indicator_df[columns].replace({True: 1, False: 0})
def test_imputed_numeric_df(self):
if self.test_data is not None:
x = self.numeric_imputer.transform(self.test_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def test_yeojohnson_df(self):
if self.test_data is not None:
yj = self.yeo_johnson_transformer.transform(self.test_imputed_numeric_df)
columns = self.test_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def test_imputed_categorical_df(self):
if self.test_data is not None:
x = self.categorical_imputer.transform(
self.test_data[self.categorical_features]
)
x_labels = [i + "_imputed" for i in self.categorical_features]
imputed_categorical_df = pd.DataFrame(x, columns=x_labels)
return imputed_categorical_df
def test_target_encoded_df(self):
if self.test_data is not None:
te = self.target_encoder.transform(self.test_imputed_categorical_df)
columns = list(
map(
lambda x: re.sub(r"_imputed", "_target_encoded", x),
list(self.test_imputed_categorical_df.columns.values),
)
)
te =
|
pd.DataFrame(data=te)
|
pandas.DataFrame
|
# -*-coding:utf-8-*-
import os
import pandas as pd
import torch
if __name__ == '__main__':
os.makedirs(os.path.join('..', 'data'), exist_ok=True)
data_file = os.path.join('..', 'data', 'house_tiny.csv')
with open(data_file, 'w') as f:
f.write('NumRooms,Alley,Price\n') # 列名
f.write('NA,Pave,127500\n') # 每行表示一个数据样本
f.write('2,NA,106000\n')
f.write('4,NA,178100\n')
f.write('NA,NA,140000\n')
data =
|
pd.read_csv(data_file)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 20 21:05:05 2021
@author: dariu
"""
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import pacmap
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import umap
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
#import sklearn.cluster
from sklearn.decomposition import PCA
from sklearn import metrics
path = "C:\\Users\dariu\\Documents\\Master Wirtschaftsinformatik\\Data Challenges\Data\\"
directorys = [
['training_setA/training/', 'p0'],
['training_setB/training_setB/', 'p1']
]
dfs = []
for z, (directory, file_head) in enumerate(directorys):
for i, filename in enumerate(tqdm(os.listdir(path + directory))):
df_temp =
|
pd.read_csv(path + directory + filename, skiprows=0, sep='|')
|
pandas.read_csv
|
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
df = pd.read_csv(filename, parse_dates=['Date'])
df['DayOfYear'] = df['Date'].apply(lambda date: date.timetuple().tm_yday)
df = df.drop(df[df['Temp'] < -50].index) # Drop temperature outliers
return df
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
data = load_data("../datasets/City_Temperature.csv")
data["YearStr"] = data["Year"].astype(str)
# Question 2 - Exploring data for specific country
data_israel = data[data['Country'] == 'Israel']
years_order = list(sorted(data_israel["YearStr"].unique()))
fig = px.scatter(data_israel, x="DayOfYear", y="Temp", color="YearStr", category_orders={"YearStr": years_order},
color_discrete_sequence=px.colors.sequential.Turbo)
fig.update_layout(title="Israel: Temperatures throughout the year<br><sup> "
"Measured Temp. on each day of year</sup>",
xaxis={'title': 'Day Of Year'},
yaxis={'title': 'Temperature'}, title_x=0.5, title_font_size=25,
legend_title="Year", width=800, height=500)
fig.show()
df_stddev_months = data_israel.groupby("Month")["Temp"].agg(np.std)
fig = px.bar(df_stddev_months, x=df_stddev_months.index, y="Temp")
fig.update_layout(title="Israel: Standard Deviation of Temp. Per Month<br><sup>"
"Standard deviation of temperature per month, based on measurments 1995-2007</sup>",
xaxis={'title': 'Month', 'dtick': 1},
yaxis={'title': 'Standard Deviation'}, title_x=0.5, title_font_size=25,
legend_title="Year", width=800, height=500)
fig.show()
# Question 3 - Exploring differences between countries
df_country_month = data.groupby(["Country", "Month"]).agg({"Temp": [np.mean, np.std]}).reset_index()
df_country_month.columns = ["Country", "Month", "Mean", "Std"]
fig = px.line(df_country_month, x="Month", y="Mean", color="Country", error_y="Std", line_shape='spline')
fig.update_layout(title="Average Monthly Temperature",
xaxis={'title': 'Month', 'dtick': 1},
yaxis={'title': 'Temperature'}, title_x=0.5, title_font_size=25,
legend_title="Country", width=800, height=500)
fig.show()
# Question 4 - Fitting model for different values of `k`
X_train, y_train, X_test, y_test = split_train_test(data_israel[["DayOfYear"]], data_israel["Temp"])
df_polynom =
|
pd.DataFrame(columns=['Degree', 'Loss'])
|
pandas.DataFrame
|
from google.cloud import bigquery
from pandas import DataFrame as df
import pandas as pd
from lib.util.parse_url import extract_org, extract_repo
from datetime import datetime
TABLE_SCHEMA = [
bigquery.SchemaField(name='org', field_type=bigquery.enums.SqlTypeNames.STRING),
bigquery.SchemaField(name='repo', field_type=bigquery.enums.SqlTypeNames.STRING),
bigquery.SchemaField(name='number', field_type=bigquery.enums.SqlTypeNames.INT64),
bigquery.SchemaField(name='user', field_type=bigquery.enums.SqlTypeNames.STRING),
bigquery.SchemaField(name='comment', field_type=bigquery.enums.SqlTypeNames.STRING),
bigquery.SchemaField(name='updated_at', field_type=bigquery.enums.SqlTypeNames.TIMESTAMP),
bigquery.SchemaField(name='commit_id', field_type=bigquery.enums.SqlTypeNames.STRING),
bigquery.SchemaField(name='path', field_type=bigquery.enums.SqlTypeNames.STRING),
]
class PullComments:
def __init__(self, df_issue_comments, df_review_comments):
self.pull_comments = pd.concat([df_issue_comments, df_review_comments], sort=False)
def append(self, another):
self.pull_comments = self.pull_comments.append(another.pull_comments, ignore_index=True)
def to_gbq(self, project, dataset, org):
if self.pull_comments.empty:
return
self.pull_comments['updated_at'] =
|
pd.to_datetime(self.pull_comments['updated_at'],unit='s')
|
pandas.to_datetime
|
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-22 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-23 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-29 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-30 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df, trade_days=False)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
class TestPortfolio(DataFrameTest):
def test_portfolio_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 2),
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
p = dero.pandas.portfolio(self.df, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
def test_portfolio_with_nan_and_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', nan, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1), #changed from 2 to 1 when updated nan handling
(10516, 'a', '1/4/2000', 1.04, 2),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 2),
(10516, 'b', '1/4/2000', 1.08, 2),
(10517, 'a', '1/1/2000', 1.09, 1),
(10517, 'a', '1/2/2000', 1.1, 1),
(10517, 'a', '1/3/2000', 1.11, 2),
(10517, 'a', '1/4/2000', 1.12, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'portfolio'])
indf = self.df.copy()
indf.loc[0, 'RET'] = nan
p = dero.pandas.portfolio(indf, 'RET', ngroups=2, byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, p, check_dtype=False)
class TestConvertSASDateToPandasDate:
df_sasdate = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
df_sasdate_nan = pd.DataFrame(data = [
('011508', 16114.0),
('011508', 16482.0),
('011508', 17178.0),
('011508', 17197.0),
('011508', nan),
('011508', 17212.0),
], columns = ['gvkey', 'datadate'])
def test_convert(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate['datadate']))
assert_frame_equal(expect_df, converted)
def test_convert_nan(self):
expect_df = pd.DataFrame(data = [
(numpy.datetime64('2004-02-13T00:00:00.000000000'),),
(numpy.datetime64('2005-02-15T00:00:00.000000000'),),
(numpy.datetime64('2007-01-12T00:00:00.000000000'),),
(numpy.datetime64('2007-01-31T00:00:00.000000000'),),
(numpy.datetime64('NaT'),),
(numpy.datetime64('2007-02-15T00:00:00.000000000'),),
], columns = [0])
converted = pd.DataFrame(dero.pandas.convert_sas_date_to_pandas_date(self.df_sasdate_nan['datadate']))
assert_frame_equal(expect_df, converted)
class TestMapWindows(DataFrameTest):
times = [
[-4, -2, 0],
[-3, 1, 2],
[4, 5, 6],
[0, 1, 2],
[-1, 0, 1]
]
df_period_str = pd.DataFrame([
(10516, '1/1/2000', 1.01),
(10516, '1/2/2000', 1.02),
(10516, '1/3/2000', 1.03),
(10516, '1/4/2000', 1.04),
(10516, '1/5/2000', 1.05),
(10516, '1/6/2000', 1.06),
(10516, '1/7/2000', 1.07),
(10516, '1/8/2000', 1.08),
(10517, '1/1/2000', 1.09),
(10517, '1/2/2000', 1.10),
(10517, '1/3/2000', 1.11),
(10517, '1/4/2000', 1.12),
(10517, '1/5/2000', 1.05),
(10517, '1/6/2000', 1.06),
(10517, '1/7/2000', 1.07),
(10517, '1/8/2000', 1.08),
], columns = ['PERMNO','Date', 'RET'])
df_period = df_period_str.copy()
df_period['Date'] = pd.to_datetime(df_period['Date'])
expect_dfs = [
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 2),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 2),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 2),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 2),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__']),
pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 2),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 3),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 2),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 3),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 3),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 3),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 3),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 3),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__'])
]
expect_df_first = pd.DataFrame(data = [
(10516, Timestamp('2000-01-01 00:00:00'), 1.01, 0),
(10516, Timestamp('2000-01-02 00:00:00'), 1.02, 1),
(10516, Timestamp('2000-01-03 00:00:00'), 1.03, 1),
(10516, Timestamp('2000-01-04 00:00:00'), 1.04, 1),
(10516, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10516, Timestamp('2000-01-06 00:00:00'), 1.06, 1),
(10516, Timestamp('2000-01-07 00:00:00'), 1.07, 1),
(10516, Timestamp('2000-01-08 00:00:00'), 1.08, 1),
(10517, Timestamp('2000-01-01 00:00:00'), 1.09, 0),
(10517, Timestamp('2000-01-02 00:00:00'), 1.1, 1),
(10517, Timestamp('2000-01-03 00:00:00'), 1.11, 1),
(10517, Timestamp('2000-01-04 00:00:00'), 1.12, 1),
(10517, Timestamp('2000-01-05 00:00:00'), 1.05, 1),
(10517, Timestamp('2000-01-06 00:00:00'), 1.06, 1),
(10517, Timestamp('2000-01-07 00:00:00'), 1.07, 1),
(10517, Timestamp('2000-01-08 00:00:00'), 1.08, 1),
], columns = ['PERMNO', 'Date', 'RET', '__map_window__'])
def run_for_each_time(func):
"""
Decorator that can be applied to any function whose args are (self, time, expect_df) which runs the function
for each time in self.times and picks the appropriate matching expect_df
"""
def run(self):
for t, time in enumerate(self.times):
func(self, time, self.expect_dfs[t])
return run
def test_method_first(self):
result = dero.pandas._map_windows(self.df_period, self.times[0], method='first',
periodvar='Date', byvars=['PERMNO'])
assert_frame_equal(result, self.expect_df_first)
@run_for_each_time
def test_method_between(self, time, expect_df):
result = dero.pandas._map_windows(self.df_period, time, method='between',
periodvar='Date', byvars=['PERMNO'])
assert_frame_equal(result, expect_df)
class TestLeftMergeLatest(DataFrameTest):
def test_left_merge_latest(self):
expect_df = pd.DataFrame(data = [
('001076', Timestamp('1995-03-01 00:00:00'), Timestamp('1995-02-01 00:00:00')),
('001076', Timestamp('1995-04-01 00:00:00'), Timestamp('1995-03-02 00:00:00')),
('001722', Timestamp('2012-01-01 00:00:00'), Timestamp('2011-11-01 00:00:00')),
('001722', Timestamp('2012-07-01 00:00:00'), Timestamp('2011-11-01 00:00:00')),
('001722', numpy.timedelta64('NaT','ns'), numpy.timedelta64('NaT','ns')),
(numpy.datetime64('NaT'), numpy.datetime64('2012-01-01T00:00:00.000000000'), numpy.datetime64('NaT')),
], columns = ['GVKEY', 'Date', 'Date_y'])
lm = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2, on='GVKEY')
lm_low_mem = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2, on='GVKEY', low_memory=True)
lm_sql = dero.pandas.left_merge_latest(self.df_gvkey_str, self.df_gvkey_str2,
on='GVKEY', backend='sql')
assert_frame_equal(expect_df, lm, check_dtype=False)
assert_frame_equal(expect_df.iloc[:-1], lm_low_mem, check_dtype=False)
assert_frame_equal(expect_df, lm_sql, check_dtype=False)
class TestVarChangeByGroups(DataFrameTest):
def test_multi_byvar_single_var(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, nan),
(10516, 'a', '1/2/2000', 1.02, 0.010000000000000009),
(10516, 'a', '1/3/2000', 1.03, 0.010000000000000009),
(10516, 'a', '1/4/2000', 1.04, 0.010000000000000009),
(10516, 'b', '1/1/2000', 1.05, nan),
(10516, 'b', '1/2/2000', 1.06, 0.010000000000000009),
(10516, 'b', '1/3/2000', 1.07, 0.010000000000000009),
(10516, 'b', '1/4/2000', 1.08, 0.010000000000000009),
(10517, 'a', '1/1/2000', 1.09, nan),
(10517, 'a', '1/2/2000', 1.1, 0.010000000000000009),
(10517, 'a', '1/3/2000', 1.11, 0.010000000000000009),
(10517, 'a', '1/4/2000', 1.12, 0.010000000000000009),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'RET_change'])
vc = dero.pandas.var_change_by_groups(self.df, 'RET', ['PERMNO','byvar'])
assert_frame_equal(expect_df, vc)
def test_multi_byvar_multi_var(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, nan, nan),
(10516, 'a', '1/2/2000', 1.02, 1, 0.010000000000000009, 1.0),
(10516, 'a', '1/3/2000', 1.03, 1, 0.010000000000000009, 0.0),
(10516, 'a', '1/4/2000', 1.04, 0, 0.010000000000000009, -1.0),
(10516, 'b', '1/1/2000', 1.05, 1, nan, nan),
(10516, 'b', '1/2/2000', 1.06, 1, 0.010000000000000009, 0.0),
(10516, 'b', '1/3/2000', 1.07, 1, 0.010000000000000009, 0.0),
(10516, 'b', '1/4/2000', 1.08, 1, 0.010000000000000009, 0.0),
(10517, 'a', '1/1/2000', 1.09, 0, nan, nan),
(10517, 'a', '1/2/2000', 1.1, 0, 0.010000000000000009, 0.0),
(10517, 'a', '1/3/2000', 1.11, 0, 0.010000000000000009, 0.0),
(10517, 'a', '1/4/2000', 1.12, 1, 0.010000000000000009, 1.0),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight',
'RET_change', 'weight_change'])
vc = dero.pandas.var_change_by_groups(self.df_weight, ['RET','weight'], ['PERMNO','byvar'])
assert_frame_equal(expect_df, vc)
class TestFillExcludedRows(DataFrameTest):
expect_df_nofill = pd.DataFrame(data = [
('001076', Timestamp('1995-03-01 00:00:00')),
('001076',
|
Timestamp('1995-04-01 00:00:00')
|
pandas.Timestamp
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186:
|
pd.Timestamp("2012-11-04 00:00:00")
|
pandas.Timestamp
|
import pandas as pd
import numpy as np
import os
import re
import config
DATA_PATH=config.DATA_PATH
def remove_linebreak(string):
return string.replace('\r',' ').replace('\n',' ')
def split_table_string(string):
trimmedTableString=string[string.rfind("PNR :"):]
string=string[:string.rfind("PNR :")]
return (string, trimmedTableString)
def remove_multispace(x):
x = str(x).strip()
x = re.sub(' +', ' ',x)
return x
class preprocess:
def __init__(self):
self.voc_total=
|
pd.read_excel(DATA_PATH,sheet_name=None, engine='openpyxl')
|
pandas.read_excel
|
import pandas as pd
from datetime import datetime
from pybliometrics.scopus import AuthorRetrieval
from pybliometrics.scopus.utils import config
# print("My API-Key: "+config['Authentication']['APIKey']+"\nMy Token: "+config['Authentication']['InstToken'])
### AuthorRetrieval(author_id, refresh=False, view='ENHANCED')
ar=AuthorRetrieval(author_id="6603694127", refresh=False, view="ENHANCED");
for numero in range(5):
print("******************************************************************************************************************************************************");
print("*****************************************************************INIZIO DEL PROGRAMMA*****************************************************************");
for numero in range(5):
print("******************************************************************************************************************************************************");
# INFORMAZIONI GENERALI
#print(ar);
# AFFILIAZIONI CORRENTI E PASSATE
print("AFFILIATION(S):\n- CURRENT:");
print(pd.DataFrame(ar.affiliation_current));
print("- HISTORY:");
print(pd.DataFrame(ar.affiliation_history));
# ALIAS (PROFILO "FUSO")
print("\nALIAS:");
print(pd.DataFrame(ar.alias));
# CITAZIONI
print("\nCITATION COUNT: "+str(ar.citation_count)+"\nCITED BY COUNT: "+str(ar.cited_by_count));
# COAUTORI
print("\nCOAUTHOR(S):\n- COUNT: "+str(ar.coauthor_count)+"\n- COAUTHOR PAGE LINK: "+ar.coauthor_link);
# DATA DI CREAZIONE DEL RECORD
date=datetime(ar.date_created[0], ar.date_created[1], ar.date_created[2]);
print("\nDATE CREATED: "+str(date));
# DATI
print("\nDATA:\n- EID: "+ar.eid+"\n- ID: "+str(ar.identifier)+"\n- ORCID: "+ar.orcid+"\n- HISTORICAL IDENTIFIER");
print(pd.DataFrame(ar.historical_identifier));
print("- GIVEN FULL NAME: "+ar.given_name+" "+ar.surname+"\n- INDEXED NAME: "+ar.indexed_name+"\n- INITIALS: "+ar.initials+" "+ar.surname+"\n- NAME VARIANTS:");
print(
|
pd.DataFrame(ar.name_variants)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on : March 17th 2020
Goal : Make the script work with thausands of samples (excluding 1bp str since not interetsed at this moment and they contribute ~40+ % of total STRs
Assumption : assuming don't want to create control file for future or dont want to use already existing controls file
@author : <NAME> (github: bharatij)
Updated on : Jan 19th 2021
Addition : option to run the script per chromosome, by default run it genome-wide
Updated on : Jan 28th, 2021 to fix the sample number issue
"""
#!/usr/bin/env python
"""Estimate allele lengths and find outliers at STR loci : accomodate thausands of samples by processing the data in chunks, save in separate compressed file
"""
import warnings
import datetime
from datetime import datetime
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
import argparse
import sys
import glob
import os
import re
import numpy as np
import statsmodels.api as sm
from scipy.stats import norm
from statsmodels.sandbox.stats.multicomp import multipletests
from sklearn import linear_model
import pandas as pd
import gc
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1.0"
__email__ = "<EMAIL>"
def parse_args():
"""Parse the input arguments, use '-h' for help"""
parser = argparse.ArgumentParser(description='Estimate allele lengths and find outliers at STR loci.')
parser.add_argument(
'--locus_counts', type=str, nargs='+', required = True,
help='.locus_counts files for all samples. Contains the number of reads assigned to each STR locus.')
parser.add_argument(
'--STR_counts', type=str, nargs='+', required = True,
help='.STR_counts files for all samples. Contains the number of reads mapped to each STR decoy chromosome.')
parser.add_argument(
'--median_cov', type=str, nargs='+', required = True,
help='.median_cov files for all samples. Text files containing median coverage.')
parser.add_argument(
'--out', type=str, default = '',
help='Prefix for all output files (suffix will be STRs.tsv) (default: %(default)s)')
parser.add_argument(
'--model', type=str, default='STRcov.model.csv',
help='Data to produce linear regression model (provided with STRetch) (default: %(default)s)')
parser.add_argument(
'--chrom', type=str, default='',
help='Process regions on this chromosome only, if not specified run it genome-wide')
####Bharati: remove control and emit option
#parser.add_argument(
# '--control', type=str, default='',
# help='Input file for median and standard deviation estimates at each locus from a set of control samples. This file can be produced by this script using the emit option. If this option is not set, all samples in the current batch will be used as controls by default.')
#parser.add_argument(
# '--emit', type=str, default='',
# help='Output file for median and standard deviation estimates at each locus (tsv).')
return parser.parse_args()
def get_sample(fullpath):
"""Get the sample ID from the filename"""
basename = os.path.basename(fullpath)
return(basename.split('.')[0])
def parse_STRcov(filename,chrom):
"""Parse all STR coverage"""
sample_id = get_sample(filename)
try:
cov_data = pd.read_table(filename, delim_whitespace = True,
names = ['chrom', 'start', 'end', 'decoycov'])
except pd.io.common.EmptyDataError:
sys.exit('ERROR: file {0} was empty.\n'.format(filename))
cov_data['sample'] = sample_id
cov_data['repeatunit'] = [x.split('-')[1] for x in cov_data['chrom']]
cov_data = cov_data[['sample', 'repeatunit', 'decoycov']]
#remove mono-nucleotid
cov_data = cov_data[cov_data['repeatunit'].str.len() > 1]
cov_data = cov_data.reset_index(drop=True)
if chrom != '' :
dummy = {'sample': sample_id, 'repeatunit': 'XXX','decoycov' : 0}
cov_data = cov_data.append(dummy, ignore_index = True)
return(cov_data)
def parse_locuscov(filename,chrom):
"""Parse locuscoverage data produced by identify_locus.py"""
sample_id = get_sample(filename)
try:
locuscov_data = pd.read_table(filename, delim_whitespace = True)
except pd.io.common.EmptyDataError:
sys.exit('ERROR: file {0} was empty.\n'.format(filename))
if locuscov_data.shape[0] == 0: # Check for file with only header
sys.exit('ERROR: file {0} contained 0 loci.\n'.format(filename))
dummyChr=locuscov_data['STR_chr'][0]
locuscov_data['sample'] = sample_id
locuscov_data['locus'] = ['{0}-{1}-{2}'.format(locuscov_data['STR_chr'][i],
locuscov_data['STR_start'][i], locuscov_data['STR_stop'][i]) for i in range(len(locuscov_data.index-1))]
locuscov_data['repeatunit'] = locuscov_data['motif']
locuscov_data['locuscoverage'] = locuscov_data['count']
locuscov_data = locuscov_data[['sample', 'locus', 'repeatunit', 'reflen', 'locuscoverage']]
#if chrom specified filter data for chrom
if chrom != '' :
dummyLocus= '{0}-{1}-{2}'.format(chrom, 0, 0)
dummy = {'sample': sample_id, 'locus': dummyLocus, 'repeatunit': 'XXX','reflen': 0, 'locuscoverage' : 0}
locuscov_data = locuscov_data.append(dummy, ignore_index = True)
chrom = chrom + '-'
locuscov_data = locuscov_data[locuscov_data['locus'].str.startswith(chrom)]
locuscov_data = locuscov_data.reset_index(drop=True)
#remove mono-nucleotide
locuscov_data=locuscov_data[locuscov_data['repeatunit'].str.len() > 1]
locuscov_data = locuscov_data.reset_index(drop=True)
if locuscov_data.shape[0] == 0: # if no data left
dummyLocus= '{0}-{1}-{2}'.format(dummyChr, 0, 0)
dummy = {'sample': sample_id, 'locus': dummyLocus, 'repeatunit': 'XXX','reflen': 0, 'locuscoverage' : 0}
locuscov_data = locuscov_data.append(dummy, ignore_index = True)
return(locuscov_data)
def parse_genomecov(filename):
"""Parse median genome coverage from covmed output.
Assumes median coverage is the top left value in the text file."""
sample_id = get_sample(filename)
try:
mediancov = pd.read_table(filename, delim_whitespace = True, header = None).iloc[0,0]
except pd.io.common.EmptyDataError:
sys.exit('ERROR: file {0} was empty.\n'.format(filename))
if mediancov < 1:
sys.exit('ERROR: Median coverage in file {0} was {1}.\nSuch a low value could indicate median coverage was not correctly calculated,\nfor example an incorrect target region was specified or the WGS pipeline was used for exome data.'.format(filename, mediancov))
genomecov_data = pd.DataFrame({'sample': [sample_id], 'genomecov': [mediancov]})
return(genomecov_data)
####Bharati: disable this function
#def parse_controls(control_file):
# """Parse control file with columns locus, median and standard deviation"""
#
# control_estimates = pd.read_table(control_file, index_col=0)
#
# # Allow for old style column headings, but change to mu and sd.
# if control_estimates.columns[0] in ['mu', 'median'] and control_estimates.columns[1] in ['sd', 'SD']:
# colnames = list(control_estimates.columns)
# colnames[0:2] = ['mu', 'sd']
# control_estimates.columns = colnames
# else:
# raise ValueError(''.join(["The column names in the control file ",
# "don't look right, expecting columns named median, SD ",
# "or mu, sd. Column names are ", str(list(control_estimates.columns)),
# ". Check the file: ", control_file]))
# return(control_estimates)
#from statsmodels import robust
# If using mad below
def hubers_est(x):
"""Emit Huber's M-estimator median and SD estimates.
If Huber's fails, emit standard median and NA for sd"""
huber50 = sm.robust.scale.Huber(maxiter=50)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
try:
mu, s = huber50(np.array(x))
except (ValueError, RuntimeWarning):
mu = np.median(x)
s = np.nan
#s = robust.mad(x)
#XXX working on this - replace s with mad when hubers est fails?
return pd.Series({'mu': mu, 'sd': np.sqrt(s)})
def z_score(x, df):
"""Calculate a z score for each x value, using estimates from a pandas data
frame with the columns 'mu' and 'sd' and index coressponding to the x values"""
z = (x.transpose() - df['mu'])/df['sd']
return z.transpose()
def p_adj_bh(x):
'''Adjust p values using Benjamini/Hochberg method'''
return multipletests(x, method='fdr_bh', returnsorted = False)[1]
def main():
# Parse command line arguments
args = parse_args()
base_filename = args.out
STRcov_model_csv = args.model
####Bharati: remove control and emit option
#emit_file = args.emit
#control_file = args.control
chromosome = args.chrom
locuscov_files = args.locus_counts
STRcov_files = args.STR_counts
genomecov_files = args.median_cov
results_suffix = '.STRs.tsv'
#add filtring for chromsome
chroms = ['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22','chrX','chrY',
'1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','X','Y']
#check if entered valid chr
if chromosome != '' and chromosome not in chroms :
sys.exit('ERROR: entered chromsomome {0} is not validate chromosme.\n'.format(chromosome))
if chromosome != '' :
base_filename = base_filename + '.' + chromosome
sys.stderr.write('Processing chromosome {0} \n'.format(chromosome))
else:
sys.stderr.write('Processing genome-wide data\n')
#Check files exist for all samples
locuscov_ids = set([get_sample(f) for f in locuscov_files])
STRcov_ids = set([get_sample(f) for f in STRcov_files])
genomecov_ids = set([get_sample(f) for f in genomecov_files])
if not (locuscov_ids == STRcov_ids == genomecov_ids):
all_samples = locuscov_ids | STRcov_ids | genomecov_ids
missing_samples = (all_samples - locuscov_ids) | (all_samples - STRcov_ids) | (all_samples - genomecov_ids)
sys.exit("ERROR: One or more files are missing for sample(s): " + ' '.join(missing_samples))
sys.stderr.write('Processing {0} samples\n'.format(len(locuscov_files)))
if len(locuscov_files) < 2 and control_file == '':
sys.stderr.write('WARNING: Only 1 sample and no control file provided, so outlier scores and p-values will not be generated.')
# Parse input data
print("Parsing locus coverage....." + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
locuscov_data = pd.concat( (parse_locuscov(f,chromosome) for f in locuscov_files), ignore_index = True)
###Bharati: remove STRs 1bp
# sys.stderr.write('Locus: Before removing 1bp STRs {0} \n'.format(len(locuscov_data)))
# locuscov_data=locuscov_data[locuscov_data['repeatunit'].str.len() > 1]
# locuscov_data = locuscov_data.reset_index(drop=True)
# sys.stderr.write('Locus: After removing 1bp STRs {0} \n'.format(len(locuscov_data)))
# if chr != '' :
# chr = chr + '-'
# locuscov_data = locuscov_data[locuscov_data['locus'].str.startswith(chr)]
# if locuscov_data.shape[0] == 0: # Check for file with only header
# sys.exit('ERROR: locus count contained 0 loci.\n')
# locuscov_data = locuscov_data.reset_index(drop=True)
# sys.stderr.write('Locus: Aftering keeping this chr STRs only {0} \n'.format(len(locuscov_data)))
####
print("Parsing STR coverage....." + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
STRcov_data = pd.concat( (parse_STRcov(f,chromosome) for f in STRcov_files), ignore_index = True)
###Bharati: remove 1bp STR cov
# STRcov_data = STRcov_data[STRcov_data['repeatunit'].str.len() > 1]
# sys.stderr.write('Locus: After removing 1bp STRs {0} \n'.format(len(STRcov_data)))
# STRcov_data = STRcov_data.reset_index(drop=True)
###
print("Parsing genome coverage....." + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
genomecov_data = pd.concat( (parse_genomecov(f) for f in genomecov_files), ignore_index = True)
# Check for multiple rows with the same sample/locus combination
crosstable = pd.crosstab(locuscov_data['locus'], locuscov_data['sample'])
ismultiloci = crosstable.apply(lambda row: any(row > 1), axis=1)
multiloci = ismultiloci[ismultiloci == True].index.values
if len(multiloci) > 0:
sys.exit('''
The locus count input data contains multiple rows with the same sample/locus combination.
This is usually caused by two loci at the same position in the STR annotation bed file.
Check these loci:
''' + ' '.join(multiloci))
del crosstable
del ismultiloci
del multiloci
gc.collect()
# # Check for different reflen for the same locus
# grouped = locuscov_data.groupby('locus')
# reflenloci = []
# for locus, group in grouped:
# if len(set(group['reflen'])) > 1:
# #reflenloci.append(name)
# # If different, replace with the smallest
# locuscov_data.loc[locuscov_data['locus'] == locus,'reflen'] = np.repeat(min(group['reflen']), len(group['reflen']))
# if len(reflenloci) > 0:
# sys.exit('''
# The locus count input data contains the same locus with different reflens.
# This may be caused by an error in the STR annotation bed file.
# Check these loci:
# ''' + ' '.join(reflenloci)) + '''
# The locus count input data contains the same locus with different reflens.
# This may be caused by an error in the STR annotation bed file.
# Check the above loci'''
#locuscov_data['reflen'] = np.repeat(1, len(locuscov_data['reflen']))
# Fill zeros in locuscov
###Changed by Bharati 2lines by adding repeat unit and reflen in pivot table can eliminate the need of one merge opration after long format
print("Converting locus data into locuscov wide format....." + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
locuscov_wide = locuscov_data.pivot(index='locus', columns='sample', values='locuscoverage').fillna(0)
locuscov_wide=locuscov_wide.reset_index()
#adding STR details to data
locuscov_wide = pd.merge(locuscov_wide, locuscov_data[['locus', 'repeatunit', 'reflen']].drop_duplicates(), how='left').copy()
print("Merging STR and genome coverage ....." + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# Normalise STR coverage by median coverage
factor = 100
STRcov_data = pd.merge(STRcov_data, genomecov_data)
print("Normaizing locus counts on log2 scale ....." + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
STRcov_data['decoycov_log'] = np.log2(factor * (STRcov_data['decoycov'] + 1) / STRcov_data['genomecov'])
# Calculate values for if there were zero reads at a locus in all samples
null_locus_counts = np.log2(factor * (0 + 1) / genomecov_data['genomecov'])
sample_names = genomecov_data['sample']
null_locus_counts.index = sample_names
# Add a null locus that has 0 reads for all individuals
# (so just uses coverage)
null_locus_counts_est = hubers_est(null_locus_counts)
# Predict size (in bp) using the ATXN8 linear model (produced from data in decoySTR_cov_sim_ATXN8_AGC.R)
# Read in the raw data for this model from a file
# Note: coverage_norm = (STR coverage/median coverage) * 100
# allele2 is the length of the longer allele in bp inserted relative to ref
STRcov_model = pd.read_csv(STRcov_model_csv)
# Model is built from log2 data then converted back (to reduce heteroscedasticity)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model
# Reshape using X.reshape(-1, 1) if data has a single feature
# or X.reshape(1, -1) if it contains a single sample.
X_train = np.log2(STRcov_model['coverage_norm']).values.reshape(-1, 1)
Y_train = np.log2(STRcov_model['allele2'])
regr.fit(X_train, Y_train)
sample_cols = list(set(locuscov_data['sample']))
### Bharati: Divide data into chunk for processing
print("Started Processing data in chucks (1000K STRs) ....." + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("Size of locuscov wide format is " + str(len(locuscov_wide)))
chunk_size = 1000
print("No. of chunks to process data: " + str(len(locuscov_wide)/chunk_size))
ct=0
for i in range(0,len(locuscov_wide),chunk_size):
ct = ct + 1
part='part' + str(ct)
sys.stderr.write('Processing chunk {0} \n'.format(ct))
chunk_filename = base_filename + '.' + part + results_suffix + ".gz"
chunk_locuscov_long =locuscov_wide.iloc[i:i+chunk_size].melt(id_vars = ['locus', 'repeatunit', 'reflen'], value_vars = sample_cols, value_name = 'locuscoverage',var_name = 'sample')
chunk_locus_totals = pd.merge(chunk_locuscov_long, STRcov_data, how = 'left')
chunk_locus_totals['total_assigned_log'] = np.log2(factor * (chunk_locus_totals['locuscoverage'] + 1) / chunk_locus_totals['genomecov'])
chunk_locus_totals_wide = chunk_locus_totals.pivot(index='locus', columns='sample', values='total_assigned_log')
print("Doing Huber estimate .....at " + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# Use Huber's M-estimator to calculate median and SD across all samples for each locus
sample_estimates = chunk_locus_totals_wide.apply(hubers_est, axis=1)
# Where sd is NA, replace with the minimum non-zero sd from all loci
min_sd = np.min(sample_estimates['sd'][sample_estimates['sd'] > 0])
sample_estimates['sd'].fillna(min_sd, inplace=True)
#if null_locus_counts_est['sd'] == 0 or np.isnan(null_locus_counts_est['sd']):
if null_locus_counts_est['sd'] == 0:
null_locus_counts_est['sd'] = min_sd
# Calculate a z scores using median and SD estimates from the current set of samples
###Bharati : assuming don't want create control file for future or dont want to use controls file
# if sd is 0, replace with min_sd #XXX is this sensible?
# Save median and SD of all loci to file if requested (for use as a
# control set for future data sets)
#if emit_file != '':
# sample_estimates.loc['null_locus_counts'] = null_locus_counts_est
# n = len(chr_total_assigned_wide.columns)
# sample_estimates['n'] = n
# sample_estimates.to_csv(emit_file, sep= '\t')
# # Calculate z scores using median and SD estimates per locus from a
# # provided control set
# if control_file != '':
# # Parse control file
# control_estimates = parse_controls(control_file)
# # Get a list of all loci in the control file but not the sample data
# control_loci_df = control_estimates.iloc[control_estimates.index != 'null_locus_counts']
# control_loci = [x for x in control_loci_df.index if x not in total_assigned_wide.index]
# # Extract and order just those control estimates appearing in the current data
# mu_sd_estimates = control_estimates.reindex(total_assigned_wide.index)
# # Fill NaNs with null_locus_counts values
# mu_sd_estimates.fillna(control_estimates.loc['null_locus_counts'],
# inplace=True)
# else:
###Bharati: comment block ended : assuming we want to calculte the mu and sd from all samples
# Extract and order estimates to match the current data
mu_sd_estimates = sample_estimates.reindex(chunk_locus_totals_wide.index)
print("Calculate a z scores using median and SD estimates from the current set .....at " + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# calculate z scores
z = z_score(chunk_locus_totals_wide, mu_sd_estimates)
###Bharati : commented following code for emit and control file
# # If a control file is given, effectively add zeros counts at all loci in
# # controls but not in the samples.
# # These extra rows will dissapear due to a later merge
#if control_file != '':
# # Create a total_assigned_wide as if all loci have zero counts
# null_total_assigned_wide = pd.DataFrame(columns = sample_names, index = control_loci)
# null_total_assigned_wide.fillna(null_locus_counts, inplace = True)
# # Caculate z scores
# null_z = z_score(null_total_assigned_wide,
# control_estimates.reindex(null_total_assigned_wide.index))
# loci_with_counts = z.index
# z = z.append(null_z)
###check if only one STR, no need to adjust p-value
if z.shape[0] == 1:
ids = z.columns # save index order as data gets sorted
# Calculate p values based on z scores (one sided)
z_list = list(z.iloc[0])
pvals = norm.sf(z_list) # no need to adjust p values if one locus
# Merge pvals and z scores back into locus_totals
###Bharati: no multiple p-value correction keep raw p-val only
p_z_df = pd.DataFrame({'sample': ids, 'pvals': pvals, 'outlier': z_list})
chunk_locus_totals = pd.merge(chunk_locus_totals, p_z_df)
elif z.shape[0] > 1:
# Calculate p values based on z scores (one sided)
pvals = z.apply(lambda z_row: [norm.sf(x) for x in z_row], axis=1, result_type='broadcast') # apply to each row
###Bharati: Since I want to do multiple test correction in all STRs (not chunk of data) I want to keep raw p-value and no multiple correction
#if pvals.isnull().values.all(): # Don't bother adjusting p values if all are null
# adj_pvals = pvals
#else:
# # Adjust p values using Benjamini/Hochberg method
# adj_pvals = pvals.apply(p_adj_bh, axis=0) # apply to each column
# Merge pvals and z scores back into locus_totals
pvals['locus'] = pvals.index
pvals_long = pd.melt(pvals, id_vars = 'locus',
value_vars = sample_cols, value_name = 'pvals', var_name = 'sample')
chunk_locus_totals =
|
pd.merge(chunk_locus_totals, pvals_long)
|
pandas.merge
|
from datetime import datetime, timedelta
import re
import pandas as pd
from yahooquery.base import _YahooFinance
from yahooquery.utils import (_convert_to_timestamp, _flatten_list,
_history_dataframe)
class Ticker(_YahooFinance):
"""
Base class for interacting with Yahoo Finance API
Arguments
----------
symbols: str or list
Symbol or list collection of symbols
Keyword Arguments
-----------------
asynchronous: bool, default False, optional
Defines whether the requests are made synchronously or asynchronously.
backoff_factor: float, default 0.3, optional
A factor, in seconds, to apply between attempts after a second try.
Done only when there is a failed request and error code is in the
status_forcelist
country: str, default 'united states', optional
This allows you to alter the following query parameters that are
sent with each request: lang, region, and corsDomain.
formatted: bool, default False, optional
Quantitative values are given as dictionaries with at least two
keys: 'raw' and 'fmt'. The 'raw' key expresses value numerically
and the 'fmt' key expresses the value as a string. See Notes for more
detail
max_workers: int, default 8, optional
Defines the number of workers used to make asynchronous requests.
This only matters when asynchronous=True
proxies: dict, default None, optional
Allows for the session to use a proxy when making requests
retry: int, default 5, optional
Number of times to retry on a failed request
status_forcelist: list, default [404, 429, 500, 502, 503, 504], optional
A set of integer HTTP status codes taht we should force a retry on
timeout: int, default 5, optional
Stop waiting for a response after a given number of seconds
user_agent: str, default random.choice, optional
A browser's user-agent string that is sent with the headers on each
request
validate: bool, default False, optional
Validate existence of symbols during instantiation
verify: bool or str, default True, optional
Used to verify SSL certificates for HTTPS requests. Can either be
a boolean, in which case it controsl whether we verify the server's
TLS certificate, or a string in which case it must be a path to a CA
bundle to use.
Notes
-----
When formatted is set to True, all quote_summary modules will return as
dictionaries. There are two reasons for this:
1. Quantitative values are expressed as dictionaries. For example:
"totalPay": {
"raw": 115554666,
"fmt": "11.56M",
"longFmt": "11,555,466"
}
When formatted is set to False, the _format_data method will return
the value in the "raw" key.
2. Dates are either expressed as timestamps:
"governanceEpochDate": 1570147200
Or as dictionaries:
"exDividendDate": {
"raw": 1573084800,
"fmt": "2019-11-07"
}
When formatted is set to False, the _format_data method will return the
date expressed in the format YYYY-MM-DD by either converting from the
timestamp or retrieving the "fmt" key.
"""
def __init__(self, symbols, **kwargs):
super(Ticker, self).__init__(**kwargs)
self.symbols = symbols
self.invalid_symbols = None
if kwargs.get('validate'):
self.validation
def _quote_summary(self, modules):
kwargs = {}
params = {'modules': ','.join(modules)}
if len(modules) == 1:
kwargs.update({'addl_key': modules[0]})
data = self._get_data(key='quoteSummary', params=params, **kwargs)
dates = _flatten_list(
[self._MODULES_DICT[module]['convert_dates']
for module in modules])
return data if self.formatted else self._format_data(data, dates)
def _quote_summary_dataframe(self, module, **kwargs):
data = self._quote_summary([module])
if not kwargs.get('data_filter'):
data_filter = self._MODULES_DICT[module]['filter']
kwargs.update({'data_filter': data_filter})
return self._to_dataframe(data, **kwargs)
def _to_dataframe(self, data, **kwargs):
if not self.formatted:
dataframes = []
try:
for symbol in self.symbols:
final_data = data[symbol][kwargs.get('data_filter')] if \
kwargs.get('data_filter') else data[symbol]
if kwargs.get('from_dict'):
df = pd.DataFrame(
[(k, v) for d in final_data for k, v in d.items()])
df.set_index(0, inplace=True)
df.columns = [symbol]
else:
df = pd.DataFrame(final_data)
dataframes.append(df)
if kwargs.get('from_dict', False):
df = pd.concat(dataframes, axis=1)
else:
df = pd.concat(
dataframes, keys=self.symbols, names=['symbol', 'row'],
sort=False)
return df
except TypeError:
return data
else:
return data
@property
def all_modules(self):
"""
Returns all quoteSummary modules, indexed by module title
for each symbol
Notes
-----
Only returns JSON
"""
return self._quote_summary(
self._CONFIG['quoteSummary']['query']['modules']['options'])
def get_modules(self, modules):
"""
Obtain specific quoteSummary modules for given symbol(s)
Parameters
----------
modules: list or str
Desired modules for retrieval
Notes
-----
Only returns JSON
Raises
------
ValueError
If invalid module is specified
"""
all_modules = \
self._CONFIG['quoteSummary']['query']['modules']['options']
if not isinstance(modules, list):
modules = re.findall(r"[a-zA-Z]+", modules)
if any(elem not in all_modules for elem in modules):
raise ValueError("""
One of {} is not a valid value. Valid values are {}.
""".format(
', '.join(modules),
', '.join(all_modules)
))
return self._quote_summary(modules)
@property
def asset_profile(self):
"""Asset Profile
Geographical and business summary data for given symbol(s).
Returns
-------
dict
assetProfile module data
"""
return self._quote_summary(['assetProfile'])
@property
def calendar_events(self):
"""Calendar Events
Earnings and Revenue expectations for upcoming earnings date for given
symbol(s)
Returns
-------
dict
calendarEvents module data
"""
return self._quote_summary(['calendarEvents'])
# @property
# def earnings_calendar(self):
# """Earnings
# Historical earnings data for given symbol(s)
# Returns
# -------
# pandas.DataFrame
# """
# if isinstance(self.session, FuturesSession):
# return "Asynchronous requests not implemented for this property."
# dataframes = []
# classes = "Va(m) H(20px) Bd(0) M(0) P(0) Fz(s) Pstart(10px) O(n):f Fw(500) C($gray)"
# url = "https://finance.yahoo.com/calendar/earnings"
# params = {'size': 100, 'symbol': ','.join(self.symbols)}
# for i in range(0, len(self.symbols) * 500, 100):
# params['offset'] = i
# r = self.session.get(url, params=params)
# text = r.text
# try:
# dataframes.append(pd.read_html(text, flavor='lxml')[0])
# except IndexError:
# pass
# if classes in text:
# break
# return pd.concat(dataframes, ignore_index=True)
@property
def earnings(self):
"""Earnings
Historical earnings data for given symbol(s)
Returns
-------
dict
earnings module data
"""
return self._quote_summary(['earnings'])
@property
def earnings_trend(self):
"""Earnings Trend
Historical trend data for earnings and revenue estimations for given
symbol(s)
Returns
-------
dict
earningsTrend module data
"""
return self._quote_summary(['earningsTrend'])
@property
def esg_scores(self):
"""ESG Scores
Data related to a given symbol(s) environmental, social, and
governance metrics
Returns
-------
dict
esgScores module data
"""
return self._quote_summary(['esgScores'])
@property
def financial_data(self):
"""Financial Data
Financial KPIs for given symbol(s)
Returns
-------
dict
financialData module data
"""
return self._quote_summary(['financialData'])
def news(self, count=25, start=None):
"""News articles related to given symbol(s)
Obtain news articles related to a given symbol(s). Data includes
the title of the article, summary, url, author_name, publisher
Parameters
----------
count: int
Desired number of news items to return
start: str or datetime
Date to begin retrieving news items. If date is a str, utilize
the following format: YYYY-MM-DD.
Notes
-----
It's recommended to use only one symbol for this property as the data
returned does not distinguish between what symbol the news stories
belong to
Returns
-------
dict
"""
if start:
start = _convert_to_timestamp(start)
return self._get_data(
'news', {'count': count, 'start': start}, **{'list_result': True})
@property
def index_trend(self):
"""Index Trend
Trend data related given symbol(s) index, specificially PE and PEG
ratios
Returns
-------
dict
indexTrend module data
"""
return self._quote_summary(['indexTrend'])
@property
def industry_trend(self):
"""Industry Trend
Seems to be deprecated
Returns
-------
dict
industryTrend module data
"""
return self._quote_summary(['industryTrend'])
@property
def key_stats(self):
"""Key Statistics
KPIs for given symbol(s) (PE, enterprise value, EPS, EBITA, and more)
Returns
-------
dict
defaultKeyStatistics module data
"""
return self._quote_summary(['defaultKeyStatistics'])
@property
def major_holders(self):
"""Major Holders
Data showing breakdown of owners of given symbol(s), insiders,
institutions, etc.
Returns
-------
dict
majorHoldersBreakdown module data
"""
return self._quote_summary(['majorHoldersBreakdown'])
@property
def page_views(self):
"""Page Views
Short, Mid, and Long-term trend data regarding a symbol(s) page views
Returns
-------
dict
pageViews module data
"""
return self._quote_summary(['pageViews'])
@property
def price(self):
"""Price
Detailed pricing data for given symbol(s), exchange, quote type,
currency, market cap, pre / post market data, etc.
Returns
-------
dict
price module data
"""
return self._quote_summary(['price'])
@property
def quote_type(self):
"""Quote Type
Stock exchange specific data for given symbol(s)
Returns
-------
dict
quoteType module data
"""
return self._quote_summary(['quoteType'])
@property
def quotes(self):
"""Quotes
Retrieve quotes for multiple symbols with one call
Returns
-------
dict
"""
return self._get_data("quotes", **{'list_result': True})
@property
def recommendations(self):
"""Recommendations
Retrieve the top 5 symbols that are similar to a given symbol
Returns
-------
dict
"""
return self._get_data('recommendations')
@property
def share_purchase_activity(self):
"""Share Purchase Activity
High-level buy / sell data for given symbol(s) insiders
Returns
-------
dict
netSharePurchaseActivity module data
"""
return self._quote_summary(['netSharePurchaseActivity'])
@property
def summary_detail(self):
"""Summary Detail
Contains similar data to price endpoint
Returns
-------
dict
summaryDetail module data
"""
return self._quote_summary(['summaryDetail'])
@property
def summary_profile(self):
"""Summary Profile
Data related to given symbol(s) location and business summary
Returns
-------
dict
summaryProfile module data
"""
return self._quote_summary(['summaryProfile'])
@property
def technical_insights(self):
"""Technical Insights
Technical trading information as well as company metrics related
to innovativeness, sustainability, and hiring. Metrics can also
be compared against the company's sector
Returns
-------
dict
"""
return self._get_data('insights')
def _financials(
self,
financials_type,
frequency=None,
premium=False,
types=None,
trailing=True):
try:
time_dict = self.FUNDAMENTALS_TIME_ARGS[frequency[:1].lower()]
prefix = time_dict['prefix']
period_type = time_dict['period_type']
except KeyError as e:
raise(e)
except TypeError:
prefix = ''
period_type = ''
key = 'fundamentals_premium' if premium else 'fundamentals'
types = types or \
self._CONFIG[key]['query']['type']['options'][financials_type]
if trailing:
prefixed_types = ['{}{}'.format(prefix, t) for t in types] + \
['trailing{}'.format(t) for t in types]
else:
prefixed_types = ['{}{}'.format(prefix, t) for t in types]
data = self._get_data(key, {'type': ','.join(prefixed_types)}, **{
'list_result': True})
dataframes = []
try:
for k in data.keys():
if isinstance(data[k], str) or data[k][0].get('description'):
return data
dataframes.extend([
self._financials_dataframes(data[k][i], period_type)
for i in range(len(data[k]))])
except AttributeError:
return data
try:
df = pd.concat(dataframes, sort=False)
if prefix:
ls = [prefix, 'trailing'] if trailing else [prefix]
for p in ls:
df['dataType'] = df['dataType'].apply(
lambda x: str(x).lstrip(p))
df['asOfDate'] = pd.to_datetime(df['asOfDate'], format='%Y-%m-%d')
df = df.pivot_table(
index=['symbol', 'asOfDate', 'periodType'], columns='dataType',
values='reportedValue')
return pd.DataFrame(df.to_records()).set_index('symbol')
else:
df['sourceDate'] = pd.to_datetime(
df['sourceDate'],
format='%Y-%m-%d')
df.rename(columns={'sourceDate': 'date'}, inplace=True)
df.set_index(['symbol', 'date'], inplace=True)
return df
except ValueError:
return '{} data unavailable for {}'.format(
financials_type.replace('_', ' ').title(),
', '.join(self._symbols))
def _financials_dataframes(self, data, period_type):
data_type = data['meta']['type'][0]
symbol = data['meta']['symbol'][0]
try:
df = pd.DataFrame.from_records(data[data_type])
if period_type:
df['reportedValue'] = \
df['reportedValue'].apply(lambda x: x.get('raw'))
df['dataType'] = data_type
df['symbol'] = symbol
else:
df['symbol'] = symbol
df['parentTopics'] = df['parentTopics'].apply(
lambda x: x[0].get('topicLabel'))
return df
except KeyError:
# No data is available for that type
pass
def all_financial_data(self, frequency='a'):
"""
Retrieve all financial data, including income statement,
balance sheet, cash flow, and valuation measures.
Notes
-----
The trailing twelve month (TTM) data is not available through this
method
Parameters
----------
frequency: str, default 'a', optional
Specify either annual or quarterly. Value should be 'a' or 'q'.
"""
types = _flatten_list([
self.FUNDAMENTALS_OPTIONS[option]
for option in self.FUNDAMENTALS_OPTIONS
])
return self._financials(
"cash_flow", frequency, types=types, trailing=False)
def get_financial_data(self, types, frequency='a', trailing=True):
"""
Obtain specific data from either cash flow, income statement,
balance sheet, or valuation measures.
Notes
-----
See available options to pass to method through FUNDAMENTALS_OPTIONS
Parameters
----------
types: list or str
Desired types of data for retrieval
frequency: str, default 'a', optional
Specify either annual or quarterly. Value should be 'a' or 'q'.
trailing: bool, default True, optional
Specify whether or not you'd like trailing twelve month (TTM)
data returned
Raises
------
ValueError
If invalid type is specified
"""
if not isinstance(types, list):
types = re.findall(r"[a-zA-Z]+", types)
return self._financials(
"cash_flow", frequency, types=types, trailing=trailing)
@property
def corporate_events(self):
return self._financials(
'cash_flow',
frequency=None,
types=self.CORPORATE_EVENTS,
trailing=False
)
@property
def corporate_guidance(self):
"""
"""
return self._financials(
'cash_flow',
frequency=None,
types=['sigdev_corporate_guidance'],
trailing=False)
@property
def valuation_measures(self):
"""Valuation Measures
Retrieves valuation measures for most recent four quarters as well
as the most recent date
Notes
-----
Only quarterly data is available for non-premium subscribers
"""
return self._financials('valuation', 'q')
def balance_sheet(self, frequency='a', trailing=True):
"""Balance Sheet
Retrieves balance sheet data for most recent four quarters or most
recent four years as well as trailing 12 months.
Parameters
----------
frequency: str, default 'a', optional
Specify either annual or quarterly balance sheet. Value should
be 'a' or 'q'.
trailing: bool, default True, optional
Specify whether or not you'd like trailing twelve month (TTM)
data returned
Returns
-------
pandas.DataFrame
"""
return self._financials(
'balance_sheet', frequency, trailing=trailing)
def cash_flow(self, frequency='a', trailing=True):
"""Cash Flow
Retrieves cash flow data for most recent four quarters or most
recent four years as well as the trailing 12 months
Parameters
----------
frequency: str, default 'a', optional
Specify either annual or quarterly cash flow statement. Value
should be 'a' or 'q'.
trailing: bool, default True, optional
Specify whether or not you'd like trailing twelve month (TTM)
data returned
Returns
-------
pandas.DataFrame
"""
return self._financials('cash_flow', frequency, trailing=trailing)
@property
def company_officers(self):
"""Company Officers
Retrieves top executives for given symbol(s) and their total pay
package. Uses the assetProfile module to retrieve data
Returns
-------
pandas.DataFrame
assetProfile module data
"""
data = self._quote_summary(["assetProfile"])
return self._to_dataframe(data, data_filter="companyOfficers")
@property
def earning_history(self):
"""Earning History
Data related to historical earnings (actual vs. estimate) for given
symbol(s)
Returns
-------
pandas.DataFrame
earningsHistory module data
"""
return self._quote_summary_dataframe('earningsHistory')
@property
def fund_ownership(self):
"""Fund Ownership
Data related to top 10 owners of a given symbol(s)
Returns
-------
pandas.DataFrame
fundOwnership module data
"""
return self._quote_summary_dataframe('fundOwnership')
@property
def grading_history(self):
"""Grading History
Data related to upgrades / downgrades by companies for a given
symbol(s)
Returns
-------
pandas.DataFrame
upgradeDowngradeHistory module data
"""
return self._quote_summary_dataframe('upgradeDowngradeHistory')
def income_statement(self, frequency='a', trailing=True):
"""Income Statement
Retrieves income statement data for most recent four quarters or most
recent four years as well as trailing 12 months.
Parameters
----------
frequency: str, default 'A', optional
Specify either annual or quarterly income statement. Value should
be 'a' or 'q'.
trailing: bool, default True, optional
Specify whether or not you'd like trailing twelve month (TTM)
data returned
Returns
-------
pandas.DataFrame
"""
return self._financials(
'income_statement', frequency, trailing=trailing)
@property
def insider_holders(self):
"""Insider Holders
Data related to stock holdings of a given symbol(s) insiders
Returns
-------
pandas.DataFrame
insiderHolders module data
"""
return self._quote_summary_dataframe('insiderHolders')
@property
def insider_transactions(self):
"""Insider Transactions
Data related to transactions by insiders for a given symbol(s)
Returns
-------
pandas.DataFrame
insiderTransactions module data
"""
return self._quote_summary_dataframe('insiderTransactions')
@property
def institution_ownership(self):
"""Institution Ownership
Top 10 owners of a given symbol(s)
Returns
-------
pandas.DataFrame
institutionOwnership module data
"""
return self._quote_summary_dataframe('institutionOwnership')
@property
def recommendation_trend(self):
"""Recommendation Trend
Data related to historical recommendations (buy, hold, sell) for a
given symbol(s)
Returns
-------
pandas.DataFrame
recommendationTrend module data
"""
return self._quote_summary_dataframe('recommendationTrend')
@property
def sec_filings(self):
"""SEC Filings
Historical SEC filings for a given symbol(s)
Returns
-------
pandas.DataFrame
secFilings endpoint data
"""
return self._quote_summary_dataframe('secFilings')
# FUND SPECIFIC
def _fund_holdings(self, holding_type):
data = self.fund_holding_info
for symbol in self.symbols:
try:
data[symbol] = data[symbol][holding_type]
except TypeError:
pass
return data
@property
def fund_bond_holdings(self):
"""Fund Bond Holdings
Retrieves aggregated maturity and duration information for a given
symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
dict
topHoldings module data subset
"""
return self._fund_holdings("bondHoldings")
@property
def fund_category_holdings(self):
"""Fund Category Holdings
High-level holding breakdown (cash, bonds, equity, etc.) for a given
symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
topHoldings module data subset
"""
data_dict = self._quote_summary(["topHoldings"])
for symbol in self.symbols:
for key in self._FUND_DETAILS:
try:
del data_dict[symbol][key]
except TypeError:
return data_dict
return pd.DataFrame(
[pd.Series(data_dict[symbol]) for symbol in self.symbols],
index=self.symbols)
@property
def fund_equity_holdings(self):
"""Fund Equity Holdings
Retrieves aggregated priceTo____ data for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
dict
topHoldings module data subset
"""
return self._fund_holdings("equityHoldings")
@property
def fund_performance(self):
"""Fund Performance
Historical return data for a given symbol(s) and symbol(s) specific
category
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
fundPerformance module data
"""
return self._quote_summary(["fundPerformance"])
@property
def fund_profile(self):
"""Fund Profile
Summary level information for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
fundProfile endpoint data
"""
return self._quote_summary(["fundProfile"])
@property
def fund_holding_info(self):
"""Fund Holding Information
Contains information for a funds top holdings, bond ratings, bond
holdings, equity holdings, sector weightings, and category breakdown
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
dict
topHoldings module data
"""
return self._quote_summary(["topHoldings"])
@property
def fund_top_holdings(self):
"""Fund Top Holdings
Retrieves Top 10 holdings for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
topHoldings module data subset
"""
return self._quote_summary_dataframe(
'topHoldings', data_filter='holdings')
@property
def fund_bond_ratings(self):
"""Fund Bond Ratings
Retrieves aggregated bond rating data for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
topHoldings module data subset
"""
return self._quote_summary_dataframe(
'topHoldings', data_filter='bondRatings', from_dict=True)
@property
def fund_sector_weightings(self):
"""Fund Sector Weightings
Retrieves aggregated sector weightings for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
topHoldings module data subset
"""
return self._quote_summary_dataframe(
'topHoldings', data_filter='sectorWeightings', from_dict=True)
# PREMIUM
def p_all_financial_data(self, frequency='a'):
"""
Retrieve all financial data, including income statement,
balance sheet, cash flow, and valuation measures.
Notes
-----
The trailing twelve month (TTM) data is not available through this
method
You must be subscribed to Yahoo Finance Premium and be logged in
for this method to return any data
Parameters
----------
frequency: str, default 'a', optional
Specify either annual or quarterly. Value should be 'a' or 'q'.
"""
types = _flatten_list([
self.FUNDAMENTALS_OPTIONS[option]
for option in self.FUNDAMENTALS_OPTIONS
])
return self._financials(
"cash_flow", frequency, premium=True, types=types, trailing=False)
def p_get_financial_data(self, types, frequency='a', trailing=True):
"""
Obtain specific data from either cash flow, income statement,
balance sheet, or valuation measures.
Notes
-----
See available options to pass to method through FUNDAMENTALS_OPTIONS
You must be subscribed to Yahoo Finance Premium and be logged in
for this method to return any data
Parameters
----------
types: list or str
Desired types of data for retrieval
frequency: str, default 'a', optional
Specify either annual or quarterly balance sheet. Value should
be 'a' or 'q'.
trailing: bool, default True, optional
Specify whether or not you'd like trailing twelve month (TTM)
data returned
"""
if not isinstance(types, list):
types = re.findall(r"[a-zA-Z]+", types)
return self._financials(
"cash_flow", frequency, True, types=types, trailing=trailing)
def p_balance_sheet(self, frequency='a', trailing=True):
"""Balance Sheet
Retrieves balance sheet data for most recent four quarters or most
recent four years as well as trailing 12 months.
Parameters
----------
frequency: str, default 'A', optional
Specify either annual or quarterly balance sheet. Value should
be 'a' or 'q'.
trailing: bool, default True, optional
Specify whether or not you'd like trailing twelve month (TTM)
data returned
Notes
-----
You must be subscribed to Yahoo Finance Premium and be logged in
for this method to return any data
Returns
-------
pandas.DataFrame
"""
return self._financials(
'balance_sheet', frequency, premium=True, trailing=trailing)
def p_cash_flow(self, frequency='a', trailing=True):
"""Cash Flow
Retrieves cash flow data for most recent four quarters or most
recent four years as well as the trailing 12 months
Parameters
----------
frequency: str, default 'a', optional
Specify either annual or quarterly cash flow statement. Value
should be 'a' or 'q'.
trailing: bool, default True, optional
Specify whether or not you'd like trailing twelve month (TTM)
data returned
Notes
-----
You must be subscribed to Yahoo Finance Premium and be logged in
for this method to return any data
Returns
-------
pandas.DataFrame
"""
return self._financials(
'cash_flow', frequency, premium=True, trailing=trailing)
@property
def p_corporate_events(self):
return self._financials(
'cash_flow',
frequency=None,
premium=True,
types=self.CORPORATE_EVENTS,
trailing=False
)
def p_income_statement(self, frequency='a', trailing=True):
"""Income Statement
Retrieves income statement data for most recent four quarters or most
recent four years as well as trailing 12 months.
Parameters
----------
frequency: str, default 'A', optional
Specify either annual or quarterly income statement. Value should
be 'a' or 'q'.
trailing: bool, default True, optional
Specify whether or not you'd like trailing twelve month (TTM)
data returned
Notes
-----
You must be subscribed to Yahoo Finance Premium and be logged in
for this method to return any data
Returns
-------
pandas.DataFrame
"""
return self._financials(
'income_statement', frequency, premium=True, trailing=trailing)
@property
def p_company_360(self):
return self._get_data('company360')
@property
def p_technical_insights(self):
return self._get_data('premium_insights')
@property
def p_portal(self):
return self._get_data('premium_portal')
def p_reports(self, report_id):
return self._get_data('reports', {'reportId': report_id})
def p_ideas(self, idea_id):
return self._get_data('trade_ideas', {'ideaId': idea_id})
@property
def p_technical_events(self):
return self._get_data('technical_events')
def p_valuation_measures(self, frequency='q'):
"""Valuation Measures
Retrieves valuation measures for all available dates for given
symbol(s)
"""
return self._financials('valuation', frequency, premium=True)
@property
def p_value_analyzer(self):
return self._get_data('value_analyzer')
@property
def p_value_analyzer_drilldown(self):
return self._get_data('value_analyzer_drilldown')
# HISTORICAL PRICE DATA
def history(
self,
period='ytd',
interval='1d',
start=None,
end=None,
adj_timezone=True,
adj_ohlc=False):
"""
Historical pricing data
Pulls historical pricing data for a given symbol(s)
Parameters
----------
period: str, default ytd, optional
Length of time
interval: str, default 1d, optional
Time between data points
start: str or datetime.datetime, default None, optional
Specify a starting point to pull data from. Can be expressed as a
string with the format YYYY-MM-DD or as a datetime object
end: str of datetime.datetime, default None, optional
Specify a ending point to pull data from. Can be expressed as a
string with the format YYYY-MM-DD or as a datetime object.
adj_timezone: bool, default True, optional
Specify whether or not to apply the GMT offset to the timestamp
received from the API. If True, the datetimeindex will be adjusted
to the specified ticker's timezone.
adj_ohlc: bool, default False, optional
Calculates an adjusted open, high, low and close prices according
to split and dividend information
Returns
-------
pandas.DataFrame
historical pricing data
"""
config = self._CONFIG['chart']
periods = config['query']['range']['options']
intervals = config['query']['interval']['options']
if start or period is None or period.lower() == 'max':
start = _convert_to_timestamp(start)
end = _convert_to_timestamp(end, start=False)
params = {'period1': start, 'period2': end}
else:
period = period.lower()
if period not in periods:
raise ValueError("Period values must be one of {}".format(
', '.join(periods)))
params = {'range': period}
if interval not in intervals:
raise ValueError("Interval values must be one of {}".format(
', '.join(intervals)))
params['interval'] = interval.lower()
if params['interval'] == '1m' and period == '1mo':
df = self._history_1m(adj_timezone, adj_ohlc)
else:
data = self._get_data('chart', params)
df = self._historical_data_to_dataframe(data, params, adj_timezone)
if adj_ohlc and 'adjclose' in df:
df = self._adjust_ohlc(df)
return df
def _history_1m(self, adj_timezone=True, adj_ohlc=False):
params = {'interval': '1m'}
today = datetime.today()
dates = [_convert_to_timestamp(today - timedelta(7*x)) for x in range(5)]
dataframes = []
for i in range(len(dates) - 1):
params['period1'] = dates[i + 1]
params['period2'] = dates[i]
data = self._get_data('chart', params)
dataframes.append(
self._historical_data_to_dataframe(data, params, adj_timezone))
df = pd.concat(dataframes, sort=True)
df.sort_values(by=['symbol', 'date'])
df.fillna(value=0, inplace=True)
return df
def _historical_data_to_dataframe(self, data, params, adj_timezone):
d = {}
for symbol in self._symbols:
if 'timestamp' in data[symbol]:
d[symbol] = _history_dataframe(data, symbol, params, adj_timezone)
else:
d[symbol] = data[symbol]
if all(isinstance(d[key], pd.DataFrame) for key in d):
df = pd.concat(d, names=['symbol', 'date'], sort=False)
if 'dividends' in df.columns:
df['dividends'].fillna(0, inplace=True)
if 'splits' in df.columns:
df['splits'].fillna(0, inplace=True)
return df
return d
def _adjust_ohlc(self, df):
adjust = df['close'] / df['adjclose']
for col in ['open', 'high', 'low']:
df[col] = df[col] / adjust
del df['close']
df.rename(columns={'adjclose': 'close'}, inplace=True)
return df
@property
def option_chain(self):
data = self._get_data('options', {'getAllData': True})
dataframes = []
for symbol in self._symbols:
try:
if data[symbol]['options']:
dataframes.append(
self._option_dataframe(data[symbol]['options'], symbol)
)
except TypeError:
pass
if dataframes:
df = pd.concat(dataframes, sort=False)
df.set_index(
['symbol', 'expiration', 'optionType'], inplace=True)
df.rename_axis(
['symbol', 'expiration', 'optionType'], inplace=True)
df.fillna(0, inplace=True)
df.sort_index(
level=['symbol', 'expiration', 'optionType'], inplace=True)
return df
return 'No option chain data found'
def _option_dataframe(self, data, symbol):
dataframes = []
for optionType in ['calls', 'puts']:
df = pd.concat(
[
|
pd.DataFrame(data[i][optionType])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import anndata as ad
import squidpy as sq
import eggplant as eg
from scipy.spatial.distance import cdist
import torch as t
import unittest
import gpytorch as gp
from . import utils as ut
class GetLandmarkDistance(unittest.TestCase):
def test_default_wo_ref(
self,
):
adata = ut.create_adata()
eg.pp.get_landmark_distance(adata)
def test_standard_ref(
self,
):
adata = ut.create_adata()
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=pd.DataFrame(reference_input["landmarks"]),
meta=reference_input["meta"],
)
eg.pp.get_landmark_distance(
adata,
reference=ref,
)
def test_np_ref(
self,
):
adata = ut.create_adata()
reference_input = ut.create_model_input()
eg.pp.get_landmark_distance(
adata,
reference=reference_input["landmarks"].numpy(),
)
class ReferenceToGrid(unittest.TestCase):
def test_default_bw_image(
self,
):
side_size = 500
ref_img, counts = ut.create_image(
color=False, side_size=side_size, return_counts=True
)
ref_crd, mta = eg.pp.reference_to_grid(
ref_img,
n_approx_points=int(side_size**2),
n_regions=1,
background_color="black",
)
def test_default_color_image(
self,
):
side_size = 32
ref_img, counts = ut.create_image(
color=True,
side_size=side_size,
return_counts=True,
)
ref_crd, mta = eg.pp.reference_to_grid(
ref_img,
n_approx_points=int(side_size**2),
n_regions=3,
background_color="black",
)
_, mta_counts = np.unique(mta, return_counts=True)
obs_prop = np.sort(mta_counts / sum(mta_counts))
true_prop = np.sort(counts / sum(counts))
for ii in range(3):
self.assertAlmostEqual(
obs_prop[ii],
true_prop[ii],
places=0,
)
class MatchScales(unittest.TestCase):
def test_default(
self,
):
adata = ut.create_adata()
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=reference_input["landmarks"],
meta=reference_input["meta"],
)
eg.pp.match_scales(adata, ref)
del adata.uns["spatial"]
eg.pp.match_scales(adata, ref)
def test_pd_lmk_obs(
self,
):
adata = ut.create_adata()
adata.uns["curated_landmarks"] = pd.DataFrame(adata.uns["curated_landmarks"])
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=pd.DataFrame(reference_input["landmarks"]),
meta=reference_input["meta"],
)
eg.pp.match_scales(adata, ref)
def test_not_implemented_lmk_obs(
self,
):
adata = ut.create_adata()
adata.uns["curated_landmarks"] = 0
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=pd.DataFrame(reference_input["landmarks"]),
meta=reference_input["meta"],
)
self.assertRaises(
NotImplementedError,
eg.pp.match_scales,
adata,
ref,
)
def test_no_landmarks(
self,
):
adata = ut.create_adata()
del adata.uns["curated_landmarks"]
reference_input = ut.create_model_input()
ref = eg.m.Reference(
domain=reference_input["domain"],
landmarks=
|
pd.DataFrame(reference_input["landmarks"])
|
pandas.DataFrame
|
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
import pandas.util._test_decorators as td
from pandas import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Series,
Timedelta,
Timestamp,
cut,
date_range,
)
import pandas._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_astype(self):
# see GH#14878
ser = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.astype(np.float64, errors=False)
ser.astype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see GH#7271
ser = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
ser.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.astype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.astype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64),
|
tm.rands(1000)
|
pandas._testing.rands
|
import sys
import requests
from bs4 import BeautifulSoup
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
import pathlib
from pathlib import Path
import csv
import collections
import re
import pandas as pd
from tqdm import tqdm
from datetime import datetime
import nltk
import numpy as np
import hashlib
import pickle
import heapq
#style of the dataframe output
pd.set_option('display.max_colwidth', None)
"""
============================================================================================================
html pages download functions
============================================================================================================
"""
def save_html_animePage(url, directoryNum, index):
# For each page this function takes as input it saves the html of the anime in a folder.
# Get current page
req = requests.get(url)
# MyAnimeList might stop the connection due to the numbers of request
if(req.status_code != 200) :
raise Exception(f"Web site have closed the connection.\nRestart the process from page: {(index//50)}")
# get the path where to place the file
save_path = f"{pathlib.Path().resolve()}/animeList_pages/{directoryNum}th_page"
Path(save_path).mkdir(parents=True, exist_ok=True)
# Write the file in the directory.
if(sys.platform != "win32"):
with open(f"{save_path}/article_{index}.html", 'w') as file:
file.write(req.text)
else:
with open(f"{save_path}\article_{index}.html", 'w') as file:
file.write(req.text)
def save_html_AnimePage_In_ListAnimePage(urls, folderNumber, CPUs = multiprocessing.cpu_count()):
# This function saves the html of the anime of all the pages in folders, the anime present in a page are saved in a folder.
# So I will have a folder for each page.
# I divide the process into several sub-processes to not overload the computer.
pool = ThreadPool(CPUs)
# For each page I read I call the "save_html_animePage" function (previously defined) which saves the html of the anime in a folder.
pool.map(lambda url : save_html_animePage(url, folderNumber, (50*(folderNumber-1)) + urls.index(url) +1), urls)
def get_listAnimePage(index, listPages):
# With this function I download all the pages from "MyAnimeList".
# Each page will contain 50 anime, that is 50 html which I will then have to save.
listPages[index] = requests.get(f"https://myanimelist.net/topanime.php{'?limit={}'.format(50*index)}")
if(listPages[index].status_code != 200) :
raise Exception(f"Web site have closed the connection at page: {index}")
def get_urls_In_ListAnimePage(page, pages):
# In every page that I have downloaded I look for all the links of the souls present in a single page.
# I take only those with the tag "href" which are exactly the links of the anime.
soup = BeautifulSoup(page.content, "html.parser")
# Find all links of the animes
Urls = soup.find_all("a", class_="hoverinfo_trigger fl-l ml12 mr8", id=lambda string: string and string.startswith('#area'), href=True)
#get just the href
animeLinks = []
for link in Urls:
link_anime = str(link.get("href"))
animeLinks.append(link_anime)
pages[pages.index(page)] = animeLinks
def initGet(pageToGet = 383 ,CPUs = multiprocessing.cpu_count()):
# This is the main function that starts the whole process. It takes as input the number of pages to download.
pages = [None] * pageToGet
numberOfPage = range(0, pageToGet)
# I divide the process into several sub-processes to not overload the computer.
pool = ThreadPool(CPUs)
# For each page he reads, he creates the list of the 50 anime present in the page and for each page he saves the urls of the souls.
# I do this using the two functions I defined previously.
pool.map(lambda num : get_listAnimePage(num, pages), numberOfPage)
pool.map(lambda page : get_urls_In_ListAnimePage(page, pages), pages)
with open("./generic_datafile/urls_anime.txt", "w") as file:
for page in tqdm(pages):
for url in page:
file.write(str(url))
file.write("\n")
return pages
def getAnime(pages, start=0):
# For each page I downloaded, I save the html of the 50 anime.
# I do this using the previously defined "save_html_AnimePage_In_ListAnimePage" function.
pages_from_start_to_end = pages[start:]
for i in tqdm(range(0, len(pages_from_start_to_end))) :
save_html_AnimePage_In_ListAnimePage(pages_from_start_to_end[i], start+i+1)
"""
============================================================================================================
parsing functions
============================================================================================================
"""
def findUsers(string):
# With this function I find the "users".
# The only place I can find "users" is in the "Score" block, where "users" is the third part of this block. So I just consider that part.
#I notice that the number is written twice, once with commas and once without commas, so I have to fix it to get the correct data.
string = string.split()[3].split(",")
if(len(string) < 2):
string[0] = string[0][:len(string[0])//2]
if(len(string) == 2):
temp = string[0].replace(string[1], "")
temp = temp[:len(temp)//2]
string.insert(0,temp)
string.pop(1)
if(len(string) == 3):
temp = string[0].replace(string[1]+string[2], "")
temp = temp[:len(temp)//2]
string.insert(0,temp)
string.pop(1)
users_string = "".join(string)
users_integer = int(users_string)
return users_integer
def str_to_datetime(d): #Convert a string into a datetime type
"""Input: string,
Output: list"""
# With this function we can convert a string into a datetime type, but we need to check all possible combinations.
if d=="Not available":
return None
else:
d = re.sub(r',', '', d) #first of all, remove the comma from the string
d = re.sub(r' ','',d) #remove also the space
if "to" in d:
date_time_list = d.split("to") #split the date of start and the date of end
[start,end] = date_time_list[:]
if len(start)==4: #if is only year
start_datetime = datetime.strptime(start, "%Y").date()
elif len(start)==7: #if is year and month
start_datetime = datetime.strptime(start, "%b%Y").date()
else:
start_datetime = datetime.strptime(start, "%b%d%Y").date()
if "?" in end:
end_datetime = None
return [start_datetime,end_datetime]
else:
if len(end)==4: #if is only year
end_datetime = datetime.strptime(end, "%Y").date()
elif len(end)==7: #if is year and month
end_datetime = datetime.strptime(end, "%b%Y").date()
else:
end_datetime = datetime.strptime(end, "%b%d%Y").date()
return [start_datetime,end_datetime]
else: #there is only the date of starting
if len(d)==4: #if is only year
start_datetime = datetime.strptime(d, "%Y").date()
elif len(d)==7: #if is year and month
start_datetime = datetime.strptime(d, "%b%Y").date()
else:
start_datetime = datetime.strptime(d, "%b%d%Y").date()
return[start_datetime,start_datetime]
def tdForCharacters_Voices(tag):
return tag.name == "td" and not tag.has_attr("width") and tag.has_attr("valign")
def getDataFromPage(pagePath):
# This function takes an html as input and saves all the useful information present in the page.
# I open the file that I pass as input for reading
with open(pagePath, "r") as file:
soup = BeautifulSoup(file, "html.parser")
# I take all the "div" tag with class "spaceit_add" because this class contains all the information that interests me.
temp = soup.find_all("div", {"class": "spaceit_pad"})
out=[] # I create a temporary list
tempDict = collections.OrderedDict() # I create a temporary dictionary
finalDict = collections.OrderedDict() # I create the final dictionary in which I will save all the information
# With this for loop I scroll through all the "div" tags that I have taken
# and I save in the "out" list all the texts present in these tags
for i in temp:
out.append(i.text)
# In this "for" loop I go through all the elements in the out list and clean up those elements a bit.
for i in range(0, len(out)):
out[i] = out[i].strip() # I eliminate the spaces present at the extremes
out[i] = out[i].strip("\n") # I delete the "\ n" present at the extremes
out[i] = out[i].replace("\n", " ") # I replace the "\ n" present within the text with a space
# Now I create the temporary dictionary with the information I have collected.
# Then I start a "for" loop on all the elements of the "out" list.
# For each element of the list I look for the index of ":".
# In this way I can create the key of my dictionary and its value.
for i in out:
index = i.find(":")
tempDict[i[:index]] = i[index+1:].strip()
# Now we have to clean up the data we collected and then create the final dictionary.
# For each element of the temporary dictionary I have to check if it exists or not,
# or if it has particular values, for example "N / A", "Not available", ...
# If it exists I add it to the final dictionary,
# if it does not exist I add "None "to indicate that the value is undefined.
if(tempDict["Type"] != "N/A"):
finalDict["Type"] = tempDict["Type"]#Anime Type
else:
finalDict["Type"] = ""
if(tempDict["Episodes"] != "N/A" and tempDict['Episodes'] != 'Unknown'):
finalDict["Episodes"] = int(tempDict["Episodes"]) #number of episodes
else:
finalDict["Episodes"] = None
# In this case we have to use the "str_to_datetime" function we defined previously
# to transform all the strings indicating dates into a datetime.
# We separate the starting date from the ending date.
if(tempDict["Aired"] != "N/A" and tempDict["Aired"] != "Not available"):
aired = str_to_datetime(tempDict["Aired"])
finalDict["releasedDate"] = aired[0]
finalDict["endDate"] = aired[1]
else:
finalDict["Aired"] = None
if(tempDict["Members"].replace(",", "") != "N/A"):
finalDict["Members"] = int(tempDict["Members"].replace(",", "")) #members
else:
finalDict["Members"] = None
# I find the users in the "Score" block.
# I do this using the "findUsers" function that I defined earlier.
if(tempDict["Score"].split()[3] != "-"):
finalDict["Users"] = findUsers(tempDict["Score"])# Users
else:
finalDict["Users"] = None
if(tempDict["Score"].split()[0][:-1] != "N/A"):
finalDict["Score"] = float(tempDict["Score"].split()[0])#Score
else:
finalDict["Score"] = None
if(tempDict["Ranked"].split()[0].strip("#")[:-1] != "N/A"):
finalDict["Ranked"] = int(tempDict["Ranked"].split()[0].strip("#")[:-1]) #Rank
else:
finalDict["Ranked"] = None
if(tempDict["Popularity"] != "N/A"):
finalDict["Popularity"] = int(tempDict["Popularity"].strip("#")) #Popularity
else:
finalDict["Popularity"] = None
#Characters
#Voices
#Staff
#ADDING THE NAME
temp = soup.find("strong")
finalDict["Name"] = temp.text
#ADDING THE SYNOPSIS
temp = soup.find("p", itemprop="description")
if(temp.text != "No synopsis information has been added to this title. Help improve our database by adding a synopsis here."):
finalDict["Synopsis"] = temp.text.replace("\n", " ")
else:
finalDict["Synopsis"] = None
#ADDING THE RELATED ANIME
# This variable is not always defined so I have to do a test.
# If it exists, I take the link, I use "set" because I don't want repetitions and then add it to the dictionary.
# If it doesn't exist I add "None" to the dictionary.
try:
temp = soup.find("table", class_="anime_detail_related_anime", style="border-spacing:0px;")
temp = temp.find_all("a")
yt = set()
for t in temp:
yt.add(t.text)
finalDict["Related_Anime"] = list(yt)
except:
finalDict["Related_Anime"] = None
#FIND CHARACTERS, VOICES, STAFF AND ROLE
characters = []
voices = []
staff = []
role = []
# I can save all these elements together because they are all in the same table.
# I consider first the left column, then the right column.
# I check if they exist or not and then add them to the dictionary.
try:
temp = soup.find_all("div", class_="left-column fl-l divider")
temp0 = temp[0].find_all("table", width="100%")
for t in temp0:
t = t.find_all(tdForCharacters_Voices)
try:
characters.append(t[0].find("a").string)
except:
characters.append("")
try:
voices.append(t[1].find("a").string)
except:
voices.append("")
temp1 = temp[1].find_all("table", width="100%")
for t in temp1:
t = t.find(tdForCharacters_Voices)
try:
staff.append(t.find("a").string)
except:
staff.append("")
try:
role.append(t.find("small").string)
except:
role.append("")
except:
pass
try:
temp = soup.find_all("div", class_="left-right fl-r")
temp0 = temp[0].find_all("table", width="100%")
for t in temp0:
t = t.find_all(tdForCharacters_Voices)
try:
characters.append(t[0].find("a").string)
except:
characters.append("")
try:
voices.append(t[1].find("a").string)
except:
voices.append("")
temp1 = temp[1].find_all("table", width="100%")
for t in temp1:
t = t.find(tdForCharacters_Voices)
try:
staff.append(t.find("a").string)
except:
staff.append("")
try:
role.append(t.find("small").string)
except:
role.append("")
except:
characters = None
voices = None
staff = None
role = None
finalDict["Characters"] = characters
finalDict["Voices"] = voices
finalDict["Staffs"] = staff
finalDict["Roles"] = role
# In the end I get the final dictionary for a single anime.
return finalDict
"""
============================================================================================================
Path functions
============================================================================================================
"""
# These are two functions that I need to create a list of strings indicating the path of each file.
def animeFile_path():
# This function saves the path where each anime is saved.
animePath = []
for animeDir in range(1,384):
for animePage in range(1,51):
try:
with open(f'./animeList_pages/{animeDir}th_page/article_{animePage + ((animeDir-1)*50)}.html', 'r') as file:
pass
animePath.append(f'./animeList_pages/{animeDir}th_page/article_{animePage + ((animeDir-1)*50)}.html')
except:
pass
return animePath
def anime_tsv_path(control_parameter = 10):
# This function saves the path where each tsv file related to anime is saved.
anime_tsv_path = []
check = 0
index = 1
while(check < control_parameter):
try:
with open(f'./anime_tsv/anime_{index}.tsv', 'r') as file:
pass
anime_tsv_path.append(f'./anime_tsv/anime_{index}.tsv')
except:
check += 1
index += 1
return anime_tsv_path
"""
============================================================================================================
TSV functions
============================================================================================================
"""
def write_anime_tsv(pagePath):
# I call the "getDataFromPage" function created previously which collects all the data of an anime in a single dictionary.
data = getDataFromPage(pagePath)
#Use regex to find the anime number
pattern = re.compile("_[0-9].*")
index = pattern.findall(pagePath)[0].strip('_html.')
# I create the path in whiche I want to save these tsv files and I save them.
save_path = f"{pathlib.Path().resolve()}/anime_tsv"
Path(save_path).mkdir(parents=True, exist_ok=True)
if(sys.platform != "win32"):
with open(f"{save_path}/anime_{index}.tsv", "w") as file:
tsv_writer = csv.writer(file, delimiter='\t')
tsv_writer.writerow(data.keys())
tsv_writer.writerow(data.values())
else:
with open(f"{save_path}\anime_{index}.tsv", "w") as file:
tsv_writer = csv.writer(file, delimiter='\t')
tsv_writer.writerow(data.keys())
tsv_writer.writerow(data.values())
def write_all_anime_tsv(CPUs = multiprocessing.cpu_count()):
# Now I write all the tsv files concerning the anime.
# I divide the process into several sub-processes to not overload the computer.
pool = ThreadPool(CPUs)
# I get all the folders where the anime are saved using the "animeFile_path" function that I created earlier.
anime = animeFile_path()
# For each anime in the anime list I save the tsv file.
pool.map(lambda anime: write_anime_tsv(anime), anime);
"""
============================================================================================================
preprocessing functions
============================================================================================================
"""
def preprocessing(string):
processed_string = []
# I initialize the stemmer.
stemmer = nltk.SnowballStemmer("english", ignore_stopwords=False)
# First I tokenize the string I take as input, that is, I create a list of words and symbols.
words = nltk.word_tokenize(string)
words = list(map(lambda word: word.replace("°", ""), words))
# Then I use a "for" loop to eliminate all symbols and punctuation to get a list of only words and numbers.
new_words= [word for word in words if word.isalnum()]
# Now I stemming on the new list of words I created.
for word in new_words:
processed_string.append(stemmer.stem(word))
# I don't want repetitions, so I use "set"
processed_string = set(processed_string)
# At the end I get all the words that will go into the dictionary.
return processed_string
def preprocessing_with_occurences(string):
# This function does the same thing as the previous function,
# but in this case I don't use "set" because I want to consider repetitions.
processed_string = []
stemmer = nltk.SnowballStemmer("english", ignore_stopwords=False)
words = nltk.word_tokenize(string)
new_words= [word for word in words if word.isalnum()]
for word in new_words:
processed_string.append(stemmer.stem(word))
return processed_string
"""
============================================================================================================
indexing functions
============================================================================================================
"""
def make_inverted_index():
# I access my dataset in the "Synopsis" column
data = pd.read_csv("./generic_datafile/dataset.csv", usecols=["Synopsis"])
# I create a dictionary in which I will save my "inverted index".
dic = collections.defaultdict(list)
# index = document number ; synops = corpus of the document
for index, synops in tqdm(enumerate(data["Synopsis"].array)):
# First of all I check that the synopsis is a string (because not all anime have the synopsis)
if(isinstance(synops, str)):
# For each word present in the corpus of my document I do the "preprocessing" (function created previously), that is, I create a list of words.
# Then I transform each word in the list into a hash, add it to my dictionary and append the number of the document in which it appears.
# In this way every time I add the number of the document if a word is already present in the dictionary and otherwise it creates a new one.
for word in preprocessing(synops):
dic[hashlib.sha256(word.encode()).hexdigest()].append(str(index+1))
# I save my dictionary in a file and this will be my "inverted index".
with open("./generic_datafile/inverted_index.txt", "w") as file:
for key in tqdm(dic):
file.write(str(key) + ":" + ",".join(dic[key]))
file.write("\n")
def make_inverted_index_tfidf():
# This function is similar to the prevoius function "make_inverted_index",
# but in this case I use the function "tfidf" to calculate the inverted index.
vocabulary = read_vocabulary_per_doc()
synopsis = read_synopsis()
inverted_index_new = collections.defaultdict(list)
inverted_index_old = read_inverted_index()
for word in tqdm(vocabulary):
inverted_index_new[word[0]].append(str(tfidf(word[0], int(word[1])-1, synopsis, inverted_index_old)))
with open("./generic_datafile/inverted_index_tfidf.txt", "w") as file:
for key in tqdm(inverted_index_new):
file.write(key + ":" + ";".join(inverted_index_new[key]))
file.write("\n")
def make_inverted_index_tfidf_with_names():
# This function is equal to the previous function "make_inverted_index_tfidf",
# the only difference is that in this case I consider also the name of the anime.
vocabulary = read_vocabulary_per_doc_with_names()
synopsis = read_synopsis_and_names()
inverted_index_new = collections.defaultdict(list)
inverted_index_old = read_inverted_index_with_names()
for word in tqdm(vocabulary):
inverted_index_new[word[0]].append(str(tfidf(word[0], int(word[1])-1, synopsis, inverted_index_old)))
with open("./generic_datafile/inverted_index_tfidf_with_name.txt", "w") as file:
for key in tqdm(inverted_index_new):
file.write(key + ":" + ";".join(inverted_index_new[key]))
file.write("\n")
def make_inverted_index_with_names():
# This function is equal to the previous function "make_inverted_index", but in this case I consider also the name of the anime.
data =
|
pd.read_csv("./generic_datafile/dataset.csv", usecols=["Synopsis", "Name"])
|
pandas.read_csv
|
"""
Code to study groups of cells that activate together
"""
from tqdm.auto import tqdm as pbar
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tctx.analysis import simbatch as sb
from tctx.util import spike_trains as spt, plot
from kmodes.kmodes import KModes
class Amat:
"""
An activation matrix is a matrix indicating who (rows) gets active when (columns).
It may be:
bool representing active/non-active
int representing number of times active
float [0, 1] representing a percentage of presence (for clusters of cells)
"""
def __init__(self, table):
self.table = table
def _is_float(self):
return np.all([np.issubdtype(t, np.floating) for t in self.table.dtypes])
# noinspection PyPep8Naming
@property
def T(self):
"""return the transpose of this table"""
return self.__class__(self.table.T)
def get_contrast_trials(self, good_labels=None, bad_labels=None, req_quantile=.9) -> pd.Index:
"""
Get the trials (columns) sorted so that we maximize the activation level of
good_labels and minimize the one from bad_labels.
:param good_labels: elements (row) to maximize
:param bad_labels: elements (row) to minimize
:param req_quantile: minimum quantile of presence to consider
:return: a subset of all trials, sorted from worst to best
"""
if good_labels is not None:
good_score = self.table.loc[good_labels].astype(np.float).mean(axis=0)
else:
good_score = pd.Series(0., index=self.table.columns)
if bad_labels is not None:
bad_score = self.table.loc[bad_labels].astype(np.float).mean(axis=0)
else:
bad_score = pd.Series(0., index=self.table.columns)
mask = (
(bad_score <= np.quantile(bad_score, 1 - req_quantile)) &
(np.quantile(good_score, req_quantile) <= good_score)
)
return (good_score - bad_score)[mask].sort_values().index
def get_clusters_presence(self, cell_clusters: pd.Series):
"""
Extract the presence matrix for every cluster in every trial.
The presence matrix indicates, for every cluster and trial, the ratio of the cells
that belong to that cluster that spiked at least once in that trial.
:returns:
a new float Amat that has shape <clusters, trials> and contains entries within [0, 1].
"""
assert not self._is_float()
cluster_sizes = cell_clusters.groupby(cell_clusters).size()
table = (self.table.astype(np.int).groupby(cell_clusters).sum().T / cluster_sizes).T
return self.__class__(table)
def get_presence(self) -> pd.Series:
"""
:return: the percentage of elements present at each event
"""
assert not self._is_float()
return self.table.astype(np.int).sum() / self.table.shape[0]
def get_clusters_active(self, cell_clusters: pd.Series, thresh=.4):
"""
Extract the trials in which a cluster is considered to be active.
:returns: a new binary Amat with clusters as rows.
"""
presence = self.get_clusters_presence(cell_clusters)
return self.__class__(presence.table >= thresh)
def get_counts(self) -> pd.Series:
"""Count in how many trials each item (neuron or cluster) is active"""
assert not self._is_float()
return self.table.astype(np.int).sum(axis=1)
def sel_trials_min_participation(self, thresh) -> np.ndarray:
"""select trial indices with a minimum participation of followers"""
assert 0 <= thresh <= 1
participation = self.get_presence()
return self.table.columns[participation >= thresh]
def sort_by_ward_distance(self, axis='both'):
"""
Sort trying to minimize ward distance between pairs
see scipy.cluster.hierarchy.optimal_leaf_ordering
"""
if axis in ('cols', 'col', 'y'):
return self._sort_cols_by_ward_distance()
elif axis in ('rows', 'row', 'x'):
return self.T._sort_cols_by_ward_distance().T
else:
assert axis == 'both'
return self.sort_by_ward_distance('cols').sort_by_ward_distance('rows')
# noinspection PyUnresolvedReferences
def _sort_cols_by_ward_distance(self):
"""see sort_by_ward_distance"""
import scipy.cluster.hierarchy
linkage = scipy.cluster.hierarchy.ward(self.table.values.T)
sorted_linkage = scipy.cluster.hierarchy.optimal_leaf_ordering(linkage, self.table.T.values)
sorted_idcs = scipy.cluster.hierarchy.leaves_list(sorted_linkage)
sorted_idcs = self.table.columns[sorted_idcs]
sorted_table = self.table.loc[:, sorted_idcs]
assert sorted_table.shape == self.table.shape
assert (sorted_table.sort_index().index == self.table.sort_index().index).all()
assert (sorted_table.sort_index(axis=1).columns == self.table.sort_index(axis=1).columns).all()
return self.__class__(sorted_table)
def sort_by_sum(self, ascending=False):
"""sort the matrix by number of activations in both trials and cells"""
table = self.table
table = table.loc[table.sum(axis=1).sort_values(kind='stable', ascending=ascending).index, :]
table = table.loc[:, table.sum(axis=0).sort_values(kind='stable', ascending=ascending).index]
return self.__class__(table)
def generate_kmodes_labels(self, target_cluster_size=5, init='Huang', n_init=200) -> pd.Series:
"""
cluster rows (cells) by activations
:param target_cluster_size:
Approx. how many cells per cluster.
We'll divide the number of cells by this number and use that as K in k-modes clustering.
"""
cell_count = len(self.table.index)
if cell_count <= 1:
return pd.Series(np.zeros(len(self.table.index), dtype=np.int), index=self.table.index)
n_clusters = np.clip(cell_count // target_cluster_size, 2, cell_count)
# print(f'clustering k={n_clusters}, init={n_init}', flush=True)
# print(self.table, flush=True)
# print(self.table.drop_duplicates(), flush=True)
km = KModes(n_clusters=n_clusters, init=init, n_init=n_init, verbose=False)
labels = km.fit_predict(self.table.values)
labels =
|
pd.Series(labels, index=self.table.index)
|
pandas.Series
|
import numpy as np
from numpy.core.numeric import _rollaxis_dispatcher
import pandas as pd
from pymbar import BAR as BAR_
from pymbar import MBAR as MBAR_
from alchemlyb.estimators import MBAR
from sklearn.base import BaseEstimator
import copy
import re
import itertools
import logging
logger = logging.getLogger(__name__)
class Estimators():
"""
Return Estimated binding free energy (dG).
Returns the dG between state A and state B using 3 differant Energy estimators
Zwanzig, Thermodynamic Integration TI, or Bennett Acceptance Ratio (BAR).
"""
def Zwanzig(dEs,steps):
"""
Return the estimated binding free energy using Zwanzig estimator.
Computes the binding free (dG) form molecular dynamics simulation
between state A and state B using Zwanzig estimator.
Parameters
----------
dEs : Pandas Dataframe
contains the reduced potentail (dE) between the states.
steps : interger
the number of the steps to be included in the calculation, set to "None" if all steps are needed.
Returns
---------
Zwanzig_df : Pandas Dataframe
contains the binding free energy (dG) between the states.
Examples
--------
>>> Zwanzig(dEs,None)
>>> Zwanzig(dEs,1000)
"""
dEs_df=pd.DataFrame(-0.592*np.log(np.mean(np.exp(-dEs.iloc[:steps]/0.592))))
Lambdas=[]
dGF=[]
dGF_sum=[]
dGR=[]
dGR_sum=[]
dG_Average=[]
dGR.append(0.0)
dG_Average.append(0.0)
for i in range(1,len(dEs_df.index),2):
Lambdas.append(re.split('_|-',dEs_df.index[i-1])[1])
dGF.append(dEs_df.iloc[i,0])
dGR.append(dEs_df.iloc[i-1,0])
Lambdas.append(re.split('_|-',dEs_df.index[-1])[1])
dGF.append(0.0)
dGF=dGF[::-1]
for i in range(len(dGF)):
dGF_sum.append(sum(dGF[:i+1]))
dGR_sum.append(sum(dGR[:i+1]))
dG_average_raw=((pd.DataFrame(dGF[1:]))-pd.DataFrame(dGR[1:][::-1]))/2
for i in range(len(list(dG_average_raw.values))):
dG_Average.append(np.sum(dG_average_raw.values[:i+1]))
Zwanzig_df=pd.DataFrame.from_dict({"Lambda":Lambdas,"dG_Forward":dGF,"SUM_dG_Forward":dGF_sum,"dG_Reverse":dGR[::-1],"SUM_dG_Reverse":dGR_sum[::-1],"dG_Average":dG_Average})
Zwanzig_Final_dG = Zwanzig_df['dG_Average'].iloc[-1]
logger.info('Final DG computed from Zwanzig estimator: ' +str(Zwanzig_Final_dG))
return Zwanzig_df, Zwanzig_Final_dG
def Create_df_TI(State_A_df, State_B_df):
"""
create the input dataframe needed for the Thermodynamic Integration (TI) function.
Parameters
----------
State_A_df : Pandas DataFrame for state A energies
State_B_df : Pandas DataFrame for state B energies
----------
Returns
----------
dU_dH_df : Pandas DataFrame
"""
dU_dH_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
dU_dH_df.reset_index(drop=True,inplace=True)
dU_dH_df.index.names = ['time']
dU_dH_df.set_index(['lambda'], append=True,inplace=True)
return dU_dH_df
def TI(State_A_df,State_B_df,steps):
"""
Return the estimated binding free energy using Thermodynamic integration (TI) estimator.
Compute free energy differences between each state by integrating
dHdl across lambda values.
Parameters
----------
dHdl : Pandas DataFrame
----------
Returns
----------
delta_f_ : DataFrame
The estimated dimensionless free energy difference between each state.
d_delta_f_ : DataFrame
The estimated statistical uncertainty (one standard deviation) in
dimensionless free energy differences.
states_ : list
Lambda states for which free energy differences were obtained.
TI : float
The free energy difference between state 0 and state 1.
"""
if steps != None:
Energies_df=(pd.DataFrame({"lambda":State_A_df["Lambda"],"fep":State_B_df["Q_sum"] - State_A_df["Q_sum"] })).sort_values('lambda')
Energies_df=pd.DataFrame.from_dict(dict(Energies_df.groupby('lambda',sort=False)['fep'].apply(list)),orient='index')
Energies_df=Energies_df.transpose()
Energies_df=Energies_df.iloc[:steps]
dfl=pd.DataFrame(columns=['lambda','fep'])
dU_dH_df=pd.DataFrame(columns=['lambda','fep'])
for state in range (len(Energies_df.columns)):
dfl=
|
pd.DataFrame(columns=['lambda','fep'])
|
pandas.DataFrame
|
import os
import re
import glob
import numpy as np
import pandas as pd
import matplotlib
font = {'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
import matplotlib.pyplot as plt
scriptdir = os.path.abspath(__file__).split('scripts')[0] + 'scripts/'
sys.path.append(scriptdir)
from evaluation.closed_loop_metrics import ScenarioResult, ClosedLoopTrajectory, load_scenario_result
def get_metric_dataframe(results_dir):
scenario_dirs = sorted(glob.glob(results_dir + "*scenario*"))
if len(scenario_dirs) == 0:
raise ValueError(f"Could not detect scenario results in directory: {results_dir}")
# Assumption: format is *scenario_<scene_num>_ego_init_<init_num>_policy
dataframe = []
for scenario_dir in scenario_dirs:
scene_num = int( scenario_dir.split("scenario_")[-1].split("_")[0] )
init_num = int( scenario_dir.split("ego_init_")[-1].split("_")[0])
policy = re.split("ego_init_[0-9]*_", scenario_dir)[-1]
pkl_path = os.path.join(scenario_dir, "scenario_result.pkl")
if not os.path.exists(pkl_path):
raise RuntimeError(f"Unable to find a scenario_result.pkl in directory: {scenario_dir}")
notv_pkl_path = os.path.join(re.split(f"{policy}", scenario_dir)[0] + "notv", "scenario_result.pkl")
if not os.path.exists(notv_pkl_path):
raise RuntimeError(f"Unable to find a notv scenario_result.pkl in location: {notv_pkl_path}")
# Load scenario dict for this policy and the notv case (for Hausdorff distance).
sr = load_scenario_result(pkl_path)
notv_sr = load_scenario_result(notv_pkl_path)
metrics_dict = sr.compute_metrics()
metrics_dict["hausdorff_dist_notv"] = sr.compute_ego_hausdorff_dist(notv_sr)
dmins = metrics_dict.pop("dmins_per_TV")
if dmins:
metrics_dict["dmin_TV"] = np.amin(dmins) # take the closest distance to any TV in the scene
else:
metrics_dict["dmin_TV"] = np.nan # no moving TVs in the scene
metrics_dict["scenario"] = scene_num
metrics_dict["initial"] = init_num
metrics_dict["policy"] = policy
dataframe.append(metrics_dict)
return pd.DataFrame(dataframe)
def make_trajectory_viz_plot(results_dir, color1="r", color2="b"):
scenario_dirs = sorted(glob.glob(results_dir + "*scenario*"))
if len(scenario_dirs) == 0:
raise ValueError(f"Could not detect scenario results in directory: {results_dir}")
# Assumption: format is *scenario_<scene_num>_ego_init_<init_num>_policy
dataframe = []
for scenario_dir in scenario_dirs:
scene_num = int( scenario_dir.split("scenario_")[-1].split("_")[0] )
init_num = int( scenario_dir.split("ego_init_")[-1].split("_")[0])
policy = re.split("ego_init_[0-9]*_", scenario_dir)[-1]
pkl_path = os.path.join(scenario_dir, "scenario_result.pkl")
if not os.path.exists(pkl_path):
raise RuntimeError(f"Unable to find a scenario_result.pkl in directory: {scenario_dir}")
notv_pkl_path = os.path.join(re.split(f"{policy}", scenario_dir)[0] + "notv", "scenario_result.pkl")
if not os.path.exists(notv_pkl_path):
raise RuntimeError(f"Unable to find a notv scenario_result.pkl in location: {notv_pkl_path}")
# Load scenario dict for this policy and the notv case (for Hausdorff distance).
sr = load_scenario_result(pkl_path)
notv_sr = load_scenario_result(notv_pkl_path)
# Get time vs. frenet projection for this policy's ego trajectory vs the notv case.
ts, s_wrt_notv, ey_wrt_notv, epsi_wrt_notv = sr.compute_ego_frenet_projection(notv_sr)
# Get the closest distance to a TV across all timesteps identified above.
d_closest = np.ones(ts.shape) * np.inf
d_trajs_TV = sr.get_distances_to_TV()
for tv_ind in range(len(d_trajs_TV)):
t_traj = d_trajs_TV[tv_ind][:,0]
d_traj = d_trajs_TV[tv_ind][:,1]
d_interp = np.interp(ts, t_traj, d_traj, left=np.inf, right=np.inf)
d_closest = np.minimum(d_interp, d_closest)
# Make the plots.
t0 = sr.ego_closed_loop_trajectory.state_trajectory[0, 0]
trel = ts - t0
plt.figure()
ax1 = plt.gca()
ax1.set_xlabel("Time (s)")
ax1.set_ylabel("Route Progress (m)", color=color1)
ax1.plot(trel[::2], s_wrt_notv[::2], color=color1)
ax1.tick_params(axis="y", labelcolor=color1)
ax1.set_yticks(np.arange(0., 101., 10.))
ax2 = ax1.twinx()
ax2.set_ylabel("Closest TV distance (m)", color=color2)
ax2.plot(trel[::2], d_closest[::2], color=color2)
ax2.tick_params(axis="y", labelcolor=color2)
ax2.set_yticks(np.arange(0., 51., 5.))
plt.tight_layout()
plt.savefig(f'{scenario_dir}/traj_viz.svg', bbox_inches='tight')
def normalize_by_notv(df):
# Compute metrics that involve normalizing by the notv scenario execution.
# Right now, these metrics are completion_time and max_lateral_acceleration.
# Add the new columns with normalized values.
df = df.assign( max_lateral_acceleration_norm = df.max_lateral_acceleration,
completion_time_norm = df.completion_time)
# Do the normalization per scenario / ego initial condition.
scene_inits = set( [f"{s}_{i}" for (s,i) in zip(df.scenario, df.initial)])
for scene_init in scene_inits:
s, i = [int(x) for x in scene_init.split("_")]
s_i_inds = np.logical_and(df.scenario == s, df.initial == i)
notv_inds = np.logical_and(s_i_inds, df.policy=="notv")
if np.sum(notv_inds) != 1:
raise RuntimeError(f"Unable to find a unique notv execution for scenario {s}, initialization {i}.")
notv_ind = np.where(notv_inds)[0].item()
notv_lat_accel = df.max_lateral_acceleration[notv_ind]
notv_time = df.completion_time[notv_ind]
lat_accel_normalized = df[s_i_inds].max_lateral_acceleration / notv_lat_accel
df.loc[s_i_inds, "max_lateral_acceleration_norm"] = lat_accel_normalized
time_normalized = df[s_i_inds].completion_time / notv_time
df.loc[s_i_inds, "completion_time_norm"] = time_normalized
return df
def aggregate(df):
df_aggregate = []
for scenario in set(df.scenario):
for policy in set(df.policy):
subset_inds = np.logical_and( df.scenario == scenario, df.policy == policy )
res = df[subset_inds].mean(numeric_only=True)
res.drop(["initial", "scenario"], inplace=True)
res_dict = {"scenario": int(scenario), "policy": policy}
res_dict.update(res.to_dict())
df_aggregate.append(res_dict)
return
|
pd.DataFrame(df_aggregate)
|
pandas.DataFrame
|
import pandas as pd
import json
import zmq
class Publisher:
def __init__(self, host, port, output_topic):
self.host = host
self.port = port
self.output_topic = output_topic
self.context = zmq.Context()
self.client = self.context.socket(zmq.PUB)
self.client.bind(f"tcp://127.0.0.1:5557")
self.counter = 0
def publish(self, payload: dict):
multipart_msg = [str(self.output_topic).encode(), str(payload).encode()]
self.client.send_multipart(multipart_msg)
self.counter += 1
class Subscriber:
def __init__(self, params, castles):
self.host = params.host
self.port = params.port
self.params = params
self.castles = castles
self.context = zmq.Context()
self.client = self.context.socket(zmq.SUB)
self.client.connect(f"tcp://localhost:{self.port}")
for topic in self.params.mqtt_topics:
self.client.subscribe(topic)
self.categories = {}
self.counter = 0
while True:
topic, msg = self.client.recv_multipart()
topic = topic.decode()
# Insert to CASTLE
series = self.parse_response(msg)
self.update_mapping()
if topic in self.castles:
self.castles[topic].insert(series)
self.counter += 1
else:
print(f"ERROR: {topic}")
def on_connect(self, client, userdata, flags, rc):
for topic in self.params.mqtt_topics:
client.subscribe(topic)
def start(self):
self.client.connect(self.host, self.port, 60)
self.client.loop_forever()
def disconnect(self):
self.client.disconnect()
def parse_response(self, payload) -> pd.Series:
json_dict = json.loads(payload)
series =
|
pd.Series(json_dict)
|
pandas.Series
|
from functools import reduce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import display
from scipy.stats import linregress
class CovidDataViz(object):
"""
A class to make plots from processed COVID-19 and World Bank data.
"""
def __init__(self, path='../data/processed'):
self.path = path
self.data = dict()
self.data['Confirmed'] = pd.read_csv(f'{path}/confirmed_cases.csv')
self.data['Confirmed chg'] = pd.read_csv(f'{path}/confirmed_cases_daily_change.csv')
self.data['Confirmed t0'] = pd.read_csv(f'{path}/confirmed_cases_since_t0.csv')
self.data['Recovered'] = pd.read_csv(f'{path}/recovered_cases.csv')
self.data['Dead'] = pd.read_csv(f'{path}/dead_cases.csv')
self.data['Active'] = pd.read_csv(f'{path}/active_cases.csv')
self.data['Mortality'] = pd.read_csv(f'{path}/mortality_rate.csv')
self.data['Coordinates'] = pd.read_csv(f'{path}/coordinates.csv')
self.data['Continents'] = pd.read_csv(f'{path}/continents.csv')
self.data['Ctry to cont'] = pd.read_csv(f'{path}/country_to_continent.csv')
self.data['Country stats'] = pd.read_csv(f'{path}/country_stats.csv')
self.data['World bank'] = pd.read_csv(f'{path}/world_bank.csv')
for _, df in self.data.items():
if 'Date' in df.columns:
df['Date'] = pd.to_datetime(df['Date'])
self.all_countries = sorted(set(self.data['Coordinates']['Country']))
self.all_continents = sorted(set(self.data['Continents']['Continent']))
def list_highest_mortality(self, n=10):
"""
Generate a list of countries with the highest moratlity rate.
Notes
-----
mortality = dead / confirmed.
"""
df = self._sort_ctry_stats(stat_name='Mortality', n=n)
return df
def get_country_ts(self, country):
"""
Extract country level cases time series.
"""
dfs = [self.data['Confirmed'][['Date', country]],
self.data['Recovered'][['Date', country]],
self.data['Dead'][['Date', country]],
self.data['Active'][['Date', country]]]
df = reduce(lambda x, y:
|
pd.merge(x, y, on='Date', how='outer')
|
pandas.merge
|
import geopandas as gpd
import pandas as pd
import plotly.express as px
import json
import numpy as np
import plotly.io as pio
import os
from zipfile import ZipFile
from urllib import request
pd.options.display.float_format = "{:,.2f}".format
def donwload_sectors_shp_2010(ufs):
try:
for uf in ufs:
if uf == 'GO':
remote_url = f'https://geoftp.ibge.gov.br/organizacao_do_territorio/malhas_territoriais/malhas_de_setores_censitarios__divisoes_intramunicipais/censo_2010/setores_censitarios_shp/go/go_setores%20_censitarios.zip'
dirs = f'data/territorio/setores2010/{uf}/'
os.makedirs(dirs, exist_ok=True)
file = dirs+ f'{uf.lower()}_setores_censitarios.zip'
request.urlretrieve(remote_url, file)
with ZipFile(file, "r") as z:
z.extractall(dirs)
print(uf + ' done!')
else:
remote_url = f'https://geoftp.ibge.gov.br/organizacao_do_territorio/malhas_territoriais/malhas_de_setores_censitarios__divisoes_intramunicipais/censo_2010/setores_censitarios_shp/{uf.lower()}/{uf.lower()}_setores_censitarios.zip'
dirs = f'data/territorio/setores2010/{uf}/'
os.makedirs(dirs, exist_ok=True)
file = dirs+ f'{uf.lower()}_setores_censitarios.zip'
request.urlretrieve(remote_url, dirs+ f'{uf.lower()}_setores_censitarios.zip')
with ZipFile(file, "r") as z:
z.extractall(dirs)
print(uf + ' done!')
except:
print('An exception occurred. Probably related to the internet conection. It also can be the case that IBGE changed the data urls.')
def download_sector_statistics_2010(ufs):
try:
for uf in ufs:
if uf == 'SP':
for territory in ['SP_Exceto_a_Capital_20190207.zip', 'SP_Capital_20190823.zip']:
remote_url = f'https://ftp.ibge.gov.br/Censos/Censo_Demografico_2010/Resultados_do_Universo/Agregados_por_Setores_Censitarios/{territory}'
dirs = f'data/pop/setores/{uf.upper()}/'
os.makedirs(dirs, exist_ok=True)
file = dirs+ territory
request.urlretrieve(remote_url, file)
with ZipFile(file, "r") as z:
z.extractall(dirs)
print(uf + ' done!')
elif uf == 'PE':
remote_url = f'https://ftp.ibge.gov.br/Censos/Censo_Demografico_2010/Resultados_do_Universo/Agregados_por_Setores_Censitarios/PE_20200219.zip'
dirs = f'data/pop/setores/{uf.upper()}/'
os.makedirs(dirs, exist_ok=True)
file = dirs+ f'PE_20200219.zip'
request.urlretrieve(remote_url, file)
with ZipFile(file, "r") as z:
z.extractall(dirs)
print(uf + ' done!')
else:
remote_url = f'https://ftp.ibge.gov.br/Censos/Censo_Demografico_2010/Resultados_do_Universo/Agregados_por_Setores_Censitarios/{uf.upper()}_20171016.zip'
dirs = f'data/pop/setores/{uf.upper()}/'
os.makedirs(dirs, exist_ok=True)
file = dirs+ f'{uf.upper()}_20171016.zip'
request.urlretrieve(remote_url, file)
with ZipFile(file, "r") as z:
z.extractall(dirs)
print(uf + ' done!')
except:
print('An exception occurred. Probably related to the internet conection. It also can be the case that IBGE changed the data urls.')
def treat_sectors_shp(uf):
dict_ufs = {
'RO':'11',
'AC':'12',
'AM':'13',
'RR':'14',
'PA':'15',
'AP':'16',
'TO':'17',
'MA':'21',
'PI':'22',
'CE':'23',
'RN':'24',
'PB':'25',
'PE':'26',
'AL':'27',
'SE':'28',
'BA':'29',
'MG':'31',
'ES':'32',
'RJ':'33',
'SP':'35',
'PR':'41',
'SC':'42',
'RS':'43',
'MS':'50',
'MT':'51',
'GO':'52',
'DF':'53'
}
gdf = gpd.read_file(f'data/territorio/setores2010/{uf}/{dict_ufs[uf]}SEE250GC_SIR.shp')
gdf = gdf.to_crs("EPSG:5880")
gdf.rename(columns={'CD_GEOCODI':'Cod'}, inplace=True)
gdf.drop(labels=['ID', 'CD_GEOCODS', 'NM_SUBDIST', 'CD_GEOCODD', 'NM_DISTRIT', 'NM_MICRO', 'NM_MESO'], axis=1, inplace=True)
if uf.upper() == 'PE':
df_pb_pop = pd.read_csv(f'data/pop/setores/PE/PE_20171016/PE/Base informaçoes setores2010 universo {uf.upper()}/CSV/Basico_{uf.upper()}.csv', sep=';', usecols=['Cod_setor', 'V002'],encoding='ANSI')
elif uf.upper() == 'ES':
df_pb_pop = pd.read_csv(f'data/pop/setores/ES/Base informaçoes setores2010 universo {uf.upper()}/CSV/Basico_{uf.upper()}.csv', sep=';', usecols=['Cod_setor', 'V002'],encoding='ANSI')
elif uf.upper() == 'TO':
df_pb_pop = pd.read_csv(f'data/pop/setores/TO/Base informacoes setores2010 universo TO/CSV/Basico_{uf.upper()}.csv', sep=';', usecols=['Cod_setor', 'V002'],encoding='ANSI')
elif uf.upper() == 'RS':
df_pb_pop = pd.read_csv(f'data/pop/setores/RS/RS_20150527/RS/Base informaçoes setores2010 universo {uf.upper()}/CSV/Basico_{uf.upper()}.csv', sep=';', usecols=['Cod_setor', 'V002'],encoding='ANSI')
elif uf.upper() == 'SP':
df_pb_pop = pd.read_csv(f'data/pop/setores/SP/Base informaçoes setores2010 universo SP_Capital/CSV/Basico_SP1.csv', sep=';', usecols=['Cod_setor', 'V002'],encoding='ANSI')
df_pb_pop2 = pd.read_csv(f'data/pop/setores/SP/SP Exceto a Capital/Base informaçoes setores2010 universo SP_Exceto_Capital/CSV/Basico_SP2.csv', sep=';', usecols=['Cod_setor', 'V002'],encoding='utf-8')
df_pb_pop =
|
pd.concat(objs=[df_pb_pop, df_pb_pop2])
|
pandas.concat
|
#importing libraries
from bokeh.plotting import figure
from bokeh.io import curdoc
from bokeh.models.annotations import LabelSet
from bokeh.models import ColumnDataSource, Range1d
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, ResetTool, HoverTool, SaveTool
from bokeh.models.widgets import Select, Slider
from bokeh.layouts import layout
import pandas as pd
import numpy as np
df =
|
pd.read_csv("country_profile_variables.csv")
|
pandas.read_csv
|
import pathlib
import pandas as pd
from metrics import auto_metrics
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 1000)
def print_ongoing_info(logs_dir: pathlib.PosixPath):
for p in logs_dir.glob('./*seed*'):
with open(p / 'log.txt') as f:
lines = f.readlines()
if lines[-1].startswith('{'):
continue
lines.reverse()
for i, line in enumerate(lines):
if 'Model saved at epoch' in line:
print(p, '----------->', lines[i + 1], end='')
break
def auto_summarize_logs(dataset: str, ongoing=False):
logs_dir = pathlib.Path('./log_{}/'.format(dataset))
if not logs_dir.exists():
return None
print('\n\n', '#' * 30, dataset, '#' * 30)
results = summarize_logs(logs_dir=logs_dir, metrics=auto_metrics(dataset))
print_red('Ongoing task details')
if ongoing:
print_ongoing_info(logs_dir=logs_dir)
return results
def config2cmd(config: dict):
_ = ' '.join(['--' + k + ' ' + str(v) for k, v in config.items()])
cmd = 'python3 run.py {}'.format(_)
cmd = cmd.replace('(', '\(').replace(')', '\)') # for shell run
print(cmd)
return cmd
def print_red(s: str):
print("\033[1;31m{}\033[0m".format(s))
def read_logs(logs_dir: pathlib.PosixPath):
logs = []
for p in logs_dir.glob('./*seed*'):
log_path = p / 'log.txt'
with open(log_path) as f:
# read a single logfile to lines, and skip the log files without test
lines = f.readlines()
if not lines[-1].startswith('{') :
continue
# read a sigle logfile to config from -2 line, and short the words for better visual experience
str_config_dict = lines[-2].replace('\n', '').strip().replace('mol_', 'm').replace('pro_', 'p') \
.replace('depth', 'd').replace('graph_res', 'res').replace('batch_size', 'bs') \
.replace('_TripletMessage', 'Trim').replace('_NNConv', 'NN').replace('_GCNConv', 'GCN') \
.replace('_GATConv', 'GAT').replace('hid_dim_alpha', 'a').replace('message_steps', 'step') \
.replace('Dropout(', '(').replace('Global', '').replace('_norm', 'n') \
.replace('_LayerNorm', 'LN').replace('_BatchNorm', 'BN').replace('_PairNorm', 'PN') \
.replace('more_epochs_run', 'mer').replace('_None', '0') \
.replace('LeakyReLU', 'LReLU')
config_for_print = eval(str_config_dict)
for item in ['dataset_root', 'seed', 'gpu', 'verbose_patience', 'out_dim',
'early_stop_patience', 'lr_reduce_rate', 'lr_reduce_patience']:
del config_for_print[item]
# read a single logfile to loss, test information, valid information.
loss_info, test_info, valid_info = lines[-1].replace(
'\n', '').strip().split('|')
# print(p, loss_info, test_info) # for some inf set
log = {'id': p.name}
if 'inf' in loss_info or 'inf' in test_info or 'inf' in valid_info: continue
log.update(eval(loss_info))
log.update(eval(test_info))
log.update(eval(valid_info))
log.update(config_for_print)
log.update({'config': lines[-2]})
logs.append(log)
return logs
def summarize_logs(logs_dir: pathlib.PosixPath, metrics: list):
logs = read_logs(logs_dir)
if len(logs) >= 1:
# group, sort, and print the logs
logs_pd = pd.DataFrame(logs).sort_values(metrics[0], ascending=False)
logs_summary = []
for note, df in logs_pd.groupby('note'):
d = {'id(note)': note, 'n_run': len(df), 'dataset': df['dataset'].iloc[0],
'config': df['config'].iloc[0]}
for m in metrics:
array = df[m].astype(float)
for opt in ['mean', 'min', 'max', 'std']:
d[opt + m] = eval('array.{}()'.format(opt))
d.update({})
logs_summary.append(d)
logs_summary =
|
pd.DataFrame(logs_summary)
|
pandas.DataFrame
|
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy
import copy
from . import LRT
from . import figfuns
def estimate(par,data,num_bins):
T,_N = data.logY.shape
# a. setup
model = LRT.modelStruct()
model.name = f'PCTBIN'
model.name_short = f'PCTBIN_{num_bins}'
model.par = copy.deepcopy(par)
model.num_bins = num_bins
model.bins = []
# b. construct age-by-age
for t in range(T):
# i. group
G_t, bins_t = pd.qcut(data.logY[t,:],q=num_bins,labels=False,retbins=True)
bins_t[0] = -np.inf
bins_t[-1] = np.inf
model.G.append(G_t)
model.bins.append(bins_t)
# ii. avg. income within bins
ypred_G =
|
pd.DataFrame({'y':data.logY[t,:], 'bin':G_t})
|
pandas.DataFrame
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_navigator.ipynb (unless otherwise specified).
__all__ = ['convnav_supported_models', 'cndf_view', 'add_container_row_info', 'cndf_search', 'ConvNav', 'cndf_save',
'cndf_load']
# Cell
import pickle
from .models import models
from .core import *
from pandas import DataFrame, option_context, concat
from math import ceil
# Cell
def convnav_supported_models():
"Prints list of transfer learning models supported by fa_convnav"
supported_models()
# Cell
def cndf_view(df, verbose=3, truncate=0, tight=True, align_cols='left', top=False):
"Display a valid CNDF dataframe `df` with optional arguments and styling"
def check_view_args(df, verbose, truncate):
assert type(df) == DataFrame and 'Module_name' in df.columns, "Not a valid convnav dataframe"
assert isinstance(truncate, int) and -10 <= truncate <= 10, f"Argument 'truncate' must be an integer between -10 (show more cols) and +10 (show fewer cols)"
assert isinstance(verbose, int) and 1 <= verbose <= 5, f"Argument verbose must be 1 2 or 3 "
def display_df(df, verbose, truncate, tight, align_cols):
with option_context("display.max_rows", 1000):
df.index.name = 'Index'
df_styled = df.iloc[:,:-(11+truncate)].style.set_properties(**{'text-align': align_cols})
if tight:
display(df_styled)
else:
display(df.iloc[:,:-(11+truncate)])
#handle arguments
check_view_args(df, verbose, truncate)
if not isinstance(tight, bool): tight = True
if len(df) < 10: tight = False
if verbose != 3: truncate = (10, 4, 0, 0, -10)[verbose-1]
if verbose == 4: df = add_container_row_info(df)
#display df
if top and len(df) > 10:
display_df(df.iloc[:10], verbose, truncate, False, align_cols)
print(f'...{len(df)-10} more layers')
elif len(df) > 0:
display_df(df, verbose, truncate, tight, align_cols)
else:
print('No data to display')
return None
# Cell
def add_container_row_info(df):
"Add output dimensions and block/layer counts to container rows of `df`. These are not added when a CNDF dataframe is first built to avoid cluttering the display of larger dataframes."
df.loc[df['Division'] == '', 'Division'] = df['div_id']
df.loc[df['Container_child'] == '', 'Container_child'] = df['chd_id']
df.loc[df['Container_block'] == '', 'Container_block'] = df['blk_id']
df.loc[df['Output_dimensions'] == '', 'Output_dimensions'] = df['out_dim']
df.loc[df['Currently'] == '', 'Currently'] = df['current']
return df
# Cell
def cndf_search(df, searchterm, exact=True, show=True):
"Search a CNDF dataframe, display the results in a dataframe and return matching module object(s)"
def match(df, searchterm, exact):
"Searches `df` for `searchterm`, returning exact matches only if `exact=True` otherwise any match"
#select 'df' row using index from 'searchterm'
if isinstance(searchterm, int):
assert searchterm >= 0 and searchterm <= len(df), f'Layer ID out of range: min 0, max {len(df)}'
x = df.iloc[searchterm].copy()
x =
|
DataFrame(x)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import json
import requests
import os.path
import itertools
from time import sleep
base_url = 'https://comtrade.un.org/api/get?'
def download_trade_data(filename, human_readable=False, verbose=True,
period='recent', frequency='A', reporter='USA', partner='all', product='total', tradeflow='exports'):
"""
Downloads records from the UN Comtrade database and saves them in a csv-file with the name "filename".
If necessary, it calls the API several times.
There are two modes:
- human_readable = False (default): headings in output are not human-readable but error messages from the API are received and displayed
- human_readable = True: headings in output are human-readable but we do not get messages from the API about potential problems (not recommended if several API calls are necessary)
Additional option: verbose = False in order to suppress both messages from the API and messages like '100 records downloaded and saved in filename.csv' (True is default)
Parameters:
Using parameter values suggested in the API documentation should always work.
For the parameters period, reporter, partner and tradeflow more intuitive options have been added.
- period [ps] : depending on freq, either YYYY or YYYYMM (or 'YYYY-YYYY'/ 'YYYYMM-YYYYMM' or a list of those) or 'now' or 'recent' (= 5 most recent years/ months) or 'all'
- frequency [freq] : 'A' (= annual) or 'M' (= monthly)
- reporter [r] : reporter code/ name (case-sensitive!) or list of reporter codes/ names or 'all' (see https://comtrade.un.org/data/cache/reporterAreas.json)
- partner [p] : partner code/ name (case-sensitive!) or list of partner codes/ names or 'all' (see https://comtrade.un.org/data/cache/partnerAreas.json)
- product [cc] : commodity code valid in the selected classification (here: Harmonized System HS) or 'total' (= aggregated) or 'all' or 'HG2', 'HG4' or 'HG6' (= all 2-, 4- and 6-digit HS commodities)
- tradeflow [rg] : 'import[s]' or 'export[s]'; see https://comtrade.un.org/data/cache/tradeRegimes.json for further, lower-level options
Information copied from the API Documentation (https://comtrade.un.org/data/doc/api/):
Usage limits
Rate limit (guest): 1 request every second (per IP address or authenticated user).
Usage limit (guest): 100 requests per hour (per IP address or authenticated user).
Parameter combination limit: ps, r and p are limited to 5 codes each. Only one of the above codes may use the special ALL value in a given API call.
Classification codes (cc) are limited to 20 items. ALL is always a valid classification code.
If you hit a usage limit a 409 (conflict) error is returned along with a message specifying why the request was blocked and when requests may resume.
Stability
Notice: this API may be considered stable. However, new fields may be added in the future.
While this API is still subject to change, changes that remove fields will be announced and a method of accessing legacy field formats will be made available during a transition period.
New fields may be added to the CSV or JSON output formats without warning. Please write your code that accesses the API accordingly.
"""
# (1) replace more convenient input options by ones that can by understood by API
# e.g. replace country names by country codes or 'YYYYMM-YYYYMM' by a list of months
reporter = transform_reporter(reporter)
partner = transform_partner(partner)
tradeflow = transform_tradeflow(tradeflow)
period = transform_period(period, frequency)
# (2) warn/ raise an error if appropriate
if sum('all' in inpt for inpt in [reporter, partner, period]) > 1:
raise ValueError("Only one of the parameters 'reporter', 'partner' and 'period' may use the special ALL value in a given API call.")
if any(len(inpt) > 5 for inpt in [reporter, partner, period]) and human_readable:
print("Using the option human_readable=True is not recommended in this case because several API calls are necessary.")
print("When using the human_readable=True option, messages from the API cannot be received!")
response = input("Press y if you want to continue anyways. ")
if response != 'y':
return None # exit function
# (3) download data by doing one or several API calls
dfs = []
slice_points = [range(0, len(inpt), 5) for inpt in [reporter, partner, period]] + \
[range(0, len(product), 20)]
# since the parameters reporter, partner and period are limited to 5 inputs each and
# product is limited to 20 inputs
for i, j, k, m in itertools.product(*slice_points):
df = download_trade_data_base(human_readable=human_readable, verbose=verbose,
period=period[k:k+5], reporter=reporter[i:i+5],
partner=partner[j:j+5], product=product[m:m+20],
tradeflow=tradeflow, frequency=frequency, )
if df is not None:
dfs.append(df)
sleep(1) # wait 1 second because of API rate limit
# (4) save dataframe as csv file
if len(dfs) > 0:
df_all = pd.concat(dfs)
filename = filename if len(filename.split('.')) == 2 else filename + '.csv' # add '.csv' if necessary
df_all.to_csv(filename)
if verbose: print('{} records downloaded and saved as {}.'.format(len(df_all), filename))
def download_trade_data_base(human_readable=False, verbose=True,
period='recent', frequency='A', reporter=842, partner='all', product='total', tradeflow=2):
"""
Downloads records from the UN Comtrade database and returns pandas dataframe using one API call.
There are two modes:
- human_readable = False (default): headings in output are not human-readable but error messages from the API are received and displayed
- human_readable = True: headings in output are human-readable but we do not get messages from the API about potential problems
Additional option: verbose = False in order to suppress messages from the API (True is default)
Parameters of the API call:
As documented in the API documentation.
More intuitive options for the parameters period, reporter, partner and tradeflow are only available in the function 'download_trade_data'!
- period [ps] : depending on freq, either YYYY or YYYYMM (or a list of those) or 'now' or 'recent' (= 5 most recent years/ months) or 'all'
- frequency [freq] : 'A' (= annual) or 'M' (= monthly)
- reporter [r] : reporter code or list of reporter codes or 'all' (see https://comtrade.un.org/data/cache/reporterAreas.json)
- partner [p] : partner code or list of partner codes or 'all' (see https://comtrade.un.org/data/cache/partnerAreas.json)
- product [cc] : commodity code valid in the selected classification (here: Harmonized System HS) or 'total' (= aggregated) or 'all' or 'HG2', 'HG4' or 'HG6' (= all 2-, 4- and 6-digit HS commodities)
- tradeflow [rg] : 1 (for imports) or 2 (for exports); see https://comtrade.un.org/data/cache/tradeRegimes.json for further options
"""
fmt = 'csv' if human_readable else 'json'
head = 'H' if human_readable else 'M'
parameters = {
'ps': period,
'freq': frequency,
'r': reporter,
'p': partner,
'cc': product,
'rg': tradeflow,
'px': 'HS', # Harmonized System (as reported) as classification scheme
'type': 'C', # Commodities ('S' for Services)
'fmt': fmt, # format of the output
'max': 50000, # maximum number of rows -> what happens if number of rows is bigger?
# https://comtrade.un.org/data/dev/portal#subscription says it is 100 000
'head': head # human readable headings ('H') or machine readable headings ('M')
}
url = base_url + dict_to_string(parameters)
if verbose: print(url)
if human_readable:
dataframe = pd.read_csv(url)
else:
json_dict = requests.get(url).json()
n_records = json_dict['validation']['count']['value']
message = json_dict['validation']['message']
if not json_dict['dataset']:
if verbose: print('Error: empty dataset \n Message: {}'.format(message))
dataframe = None
else:
if verbose and message: print('Message: {}'.format(message))
dataframe =
|
pd.DataFrame.from_dict(json_dict['dataset'])
|
pandas.DataFrame.from_dict
|
from numpy.core.fromnumeric import size
import torch
import datasets
import transformers
import os
import argparse
from transformers import Trainer, TrainerCallback
from nn_pruning.sparse_trainer import SparseTrainer
from nn_pruning.patch_coordinator import SparseTrainingArguments
from datasets import load_dataset
from data import get_dataset
from transformers import TrainingArguments
import torch
# from transformers import AutoModelForCausalLM, AutoConfig
# from transformers import AutoConfig
from nn_pruning.patch_coordinator import ModelPatchingCoordinator
from nn_pruning.inference_model_patcher import optimize_model
from model import GPTNeoForCausalLM
import numpy as np
import copy
from torch import nn
import pandas as pd
torch.manual_seed(0)
np.random.seed(0)
parser = argparse.ArgumentParser(description='PyTorch GPT-Neo ft script')
parser.add_argument('--dataset_path', default=None, help='location of data corpus')
parser.add_argument('--tokenizer_path', required=True, help='location of tokenizer')
parser.add_argument('--model_path', required=True, help='location of model')
parser.add_argument('--output_dir', default=None, help='location of output dir')
parser.add_argument('--save_model', action='store_true', help='save the net')
parser.add_argument('--batch_size', default=16, type=int, help='batch size')
parser.add_argument('--epochs', default=100, type=int, help='epochs')
# parser.add_argument('--prune', action='store_true', help='simple prune test')
parser.add_argument('--dense_pruning_method', default="disabled", help='dense pruning method', choices=('disabled', 'topK', 'topK:1d_alt', 'magnitude', 'threshold', 'sigmoied_threshold:1d_alt'))
parser.add_argument('--attention_pruning_method', default="disabled", help='attention pruning method', choices=('disabled', 'topK', 'magnitude', 'threshold', 'sigmoied_threshold'))
parser.add_argument('--regularization', default="disabled", help='regularization method', choices=('disabled', 'l0', 'l1', "uniqueness"))
parser.add_argument('--train', action='store_true', help='train the net')
parser.add_argument('--evaluate', action='store_true', help='evaluate the net')
parser.add_argument('--train_samples', default=None, type=int, help='number of training samples to use')
parser.add_argument('--valid_samples', default=None, type=int, help='number of validation samples to use')
if __name__ == "__main__":
args = parser.parse_args()
do_prune = args.dense_pruning_method != "disabled" or args.attention_pruning_method != "disabled"
datasets.logging.set_verbosity_error()
transformers.logging.set_verbosity_error()
print(f"Using transformers v{transformers.__version__} and datasets v{datasets.__version__} and torch v{torch.__version__}")
gptneo_name = args.model_path
# gptneo_name = "EleutherAI/gpt-neo-125M"
# gptneo_name = "EleutherAI/gpt-neo-2.7B"
wikisql_train = get_dataset(args.tokenizer_path, "", "train", args.train_samples, 512, 512, False)
wikisql_validation = get_dataset(args.tokenizer_path, "", "validation", args.valid_samples, 512, 512, False)
wikisql_test = get_dataset(args.tokenizer_path, "", "test", args.valid_samples, 512, 512, False)
log_df = []
class LogDfCallback(TrainerCallback):
"""
A bare :class:`~transformers.TrainerCallback` that just prints the logs.
"""
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
# _ = logs.pop("total_flos", None)
if state.is_local_process_zero:
# print(logs)
log_df.append(metrics)
class PruningTrainer(SparseTrainer, Trainer):
def __init__(self, sparse_args, *args, **kwargs):
Trainer.__init__(self, *args, **kwargs)
SparseTrainer.__init__(self, sparse_args)
def compute_loss(self, model, inputs, return_outputs=False):
"""
We override the default loss in SparseTrainer because it throws an
error when run without distillation
"""
outputs = model(**inputs)
labels = inputs["labels"]
logits = outputs["logits"]
logits = torch.argmax(logits, axis=-1)
acc = (logits[:] == labels[:]).sum(axis=1, keepdims=True)
correct_labels = acc.sum() / (labels.shape[0] * labels.shape[1])
acc = (acc == labels.shape[1]).sum() / labels.shape[0]
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
uniqueness = outputs["uniqueness"].mean()
regu_loss, lamb, info = self.patch_coordinator.regularization_loss(model)
for kind, values in info.items():
if kind == "total":
suffix = ""
else:
suffix = "_" + kind
for k, v in values.items():
self.metrics[k + suffix] += float(v)
# self.metrics["ce_loss"] += float(loss.mean())
self.metrics["accuracy"] += acc
self.metrics["correct_labels"] += correct_labels
self.metrics["uniqueness"] += uniqueness
self.loss_counter += 1
# loss = loss + regu_loss * lamb
loss = loss + regu_loss * lamb + uniqueness * lamb
# print(loss)
return (loss, outputs) if return_outputs else loss
def _save(self, output_dir = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
print(f"Saving model checkpoint to {output_dir}")
self.model.save_pretrained(output_dir, state_dict=state_dict)
if do_prune:
print("Compiling model")
model_copy = copy.deepcopy(self.model)
self.patch_coordinator.compile_model(model_copy)
compiled_dir = os.path.join(output_dir, "compiled")
print(f"Saving compiled model checkpoint to {compiled_dir}")
model_copy.save_pretrained(compiled_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
# torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
sparse_args = SparseTrainingArguments()
initial_threshold = 1.0
# final_threshold = 0.5 # top 50% of topk
final_threshold = 0.1 # top 10% of topk
if args.dense_pruning_method == "threshold":
initial_threshold = 0.001
final_threshold = .001
elif args.dense_pruning_method == "magnitude":
initial_threshold = .1
final_threshold = .1 # this is for uniqueness, keep top 10% of weights
elif "sigmoied_threshold" in args.dense_pruning_method:
initial_threshold = 0
final_threshold = 0.1 # over .1 for sigmoid
regularization_final_lambda = 0
if args.regularization != "disabled":
# regularization_final_lambda = 10
# regularization_final_lambda = 2
regularization_final_lambda = .05
hyperparams = {
"dense_pruning_method": args.dense_pruning_method,
"attention_pruning_method": args.attention_pruning_method,
"regularization": args.regularization,
"regularization_final_lambda": regularization_final_lambda,
"ampere_pruning_method": "disabled",
"initial_threshold": initial_threshold,
"final_threshold": final_threshold,
"initial_warmup": 1,
"final_warmup": 3,
"attention_block_rows":32,
"attention_block_cols":32,
"attention_output_with_dense": 0,
"save_uniqueness": args.regularization == "uniqueness",
}
for k,v in hyperparams.items():
if hasattr(sparse_args, k):
setattr(sparse_args, k, v)
else:
print(f"sparse_args does not have argument {k}")
learning_rate = 2e-4
# learning_rate = 2e-6
n_gpu = torch.cuda.device_count()
batch_size = args.batch_size
epoch_steps = len(wikisql_train) // (batch_size*n_gpu)
num_train_epochs = args.epochs
logging_steps = epoch_steps
# warmup for 10% of training steps
warmup_steps = logging_steps * num_train_epochs * 0.1 # 10 %
# eval_steps = int(epoch_steps * num_train_epochs / 12) # eval 12 times
eval_steps = int(epoch_steps*5) # eval every 5 epochs
print("eval steps", eval_steps)
print("batch_size", batch_size)
print("epoch_steps", epoch_steps)
print("n_gpu", n_gpu)
save_strategy = "no"
if args.save_model:
save_strategy = "steps"
if args.output_dir is None:
output_dir = "checkpoints"
else:
os.makedirs(args.output_dir, exist_ok=True)
output_dir = os.path.join(args.output_dir, "checkpoints")
training_args = TrainingArguments(
output_dir=output_dir,
# output_dir=None,
# evaluation_strategy="epoch",
evaluation_strategy="steps",
eval_steps= eval_steps,
save_strategy=save_strategy,
save_steps = eval_steps,
# gradient_accumulation_steps=1,
# eval_accumulation_steps=10,
eval_accumulation_steps=2,
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
learning_rate=learning_rate,
weight_decay=0.1,
warmup_steps=warmup_steps,
# weight_decay=1e-4,
logging_steps=logging_steps,
# disable_tqdm=True,
disable_tqdm=False,
report_to=None,
# adam_beta1=.9,
# adam_beta2=.999,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
mpc = ModelPatchingCoordinator(
sparse_args=sparse_args,
device=device,
cache_dir="checkpoints",
model_name_or_path=gptneo_name,
logit_names="logits",
teacher_constructor=None)
gptneo_model = GPTNeoForCausalLM.from_pretrained(gptneo_name).to(device)
if args.train:
with torch.no_grad():
# gptneo_model.transformer.wte.weight.data.normal_(mean=0.0, std=0.02)
embed_shape = gptneo_model.transformer.wte.weight.shape
decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
decoder.weight = gptneo_model.transformer.wte.weight # Tied weights with input
gptneo_model.set_output_embeddings(decoder)
mpc.patch_model(gptneo_model)
def compute_metrics(pred):
predictions, labels = pred
predictions = np.argmax(predictions, axis=-1)
# predictions, labels = predictions[..., :, :-1], labels[..., :, 1:]
# acc = (predictions == labels).sum(axis=1, keepdims=True) == labels.shape[1]
# acc = (predictions == labels).sum(axis=1, keepdims=True)
# print(acc)
real = wikisql_validation.tokenizer.decode(labels[0])
pred = wikisql_validation.tokenizer.decode(predictions[0])
ridx = real.find("<|endoftext|>")
pidx = real.find("<|endoftext|>")
print()
print("SAMPLE", real[:ridx])
print("PREDICTION", pred[:pidx])
print()
# print("sample", real, "pred", pred)
acc = (predictions[:] == labels[:]).sum(axis=1, keepdims=True) == labels.shape[1]
return {"accuracy": acc.sum() / labels.shape[0]}
trainer = PruningTrainer(
sparse_args=sparse_args,
args=training_args,
model=gptneo_model,
train_dataset=wikisql_train,
eval_dataset=wikisql_validation,
callbacks=[LogDfCallback]
)
trainer.set_patch_coordinator(mpc)
if args.train:
print("training")
trainer.train()
print("evaluating")
results = trainer.evaluate()
print("results")
print(results)
if args.output_dir:
print("saving results")
log_file = os.path.join(args.output_dir, 'log.df')
|
pd.DataFrame(log_df)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import datetime
class Durations(object):
@classmethod
def set(cls, X, extract_cols, dataset):
print("... ... Durations")
all_df = dataset["all_df"]
# duration from first action to clickout
dffac_df = all_df[["session_id", "timestamp", "timestamp_dt"]].groupby(
"session_id").first().reset_index()
dffac_df = dffac_df[["session_id", "timestamp_dt"]]
dffac_df.columns = ["session_id", "first_timestamp_dt"]
X = pd.merge(X, dffac_df, on="session_id", how="left")
X["session_duration"] = X.apply(lambda x: (x.timestamp_dt - x.first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["session_duration"]
del dffac_df
# duration from last distination to clickout
dflsc_df = all_df[["session_id", "_session_id", "timestamp", "timestamp_dt"]].groupby(
"_session_id").first().reset_index()
dflsc_df = dflsc_df[dflsc_df._session_id.isin(X._session_id)]
dflsc_df = dflsc_df[["session_id", "timestamp_dt"]]
dflsc_df.columns = ["session_id", "step_first_timestamp_dt"]
X = pd.merge(X, dflsc_df, on="session_id", how="left")
X["step_duration"] = X.apply(lambda x: (x.timestamp_dt - x.step_first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["step_duration"]
del dflsc_df
return (X, extract_cols)
class JustClickout(object):
@classmethod
def set(cls, X, extract_cols):
print("... ... JustClickout")
# append current fillters
def get_cf_features(x):
sbp = 1 if "Sort by Price" in x.current_filters else 0
sbd = 1 if "Sort By Distance" in x.current_filters else 0
sbr = 1 if "Sort By Rating" in x.current_filters else 0
fod = 1 if "Focus on Distance" in x.current_filters else 0
fsr = 1 if "Focus on Rating" in x.current_filters else 0
bev = 1 if "Best Value" in x.current_filters else 0
return pd.Series({'cf_sbp': sbp
, 'cf_sbd': sbd
, 'cf_sbr': sbr
, 'cf_fod': fod
, 'cf_fsr': fsr
, 'cf_bev': bev})
X["current_filters"] = X["current_filters"].fillna("")
curf_df = X[["current_filters"]].apply(lambda x: get_cf_features(x), axis=1)
X = pd.concat([X, curf_df], axis=1)
extract_cols = extract_cols + list(curf_df.columns)
del curf_df
return (X, extract_cols)
class JustBeforeClickout(object):
@classmethod
def set(cls, X, dataset):
print("... ... JustBeforeClickout")
all_df = dataset["all_df"]
# last action_type
lasttype_df = all_df[["session_id", "action_type", "is_y"]].copy()
lasttype_df["lat"] = lasttype_df["action_type"].shift(1)
lasttype_df["last_session_id"] = lasttype_df["session_id"].shift(1)
lasttype_df = lasttype_df[lasttype_df.is_y == 1]
lasttype_df = lasttype_df[lasttype_df.session_id == lasttype_df.last_session_id]
lasttype_df = lasttype_df[["session_id", "lat"]]
onehot_lat = pd.get_dummies(lasttype_df, columns=['lat'])
X = pd.merge(X, onehot_lat, on="session_id", how="left")
lat_cols = list(onehot_lat.columns)
lat_cols.remove("session_id")
for lat_col in lat_cols:
X[lat_col] = X[lat_col].fillna(0)
del lasttype_df
del onehot_lat
return X
class Record2Impression(object):
@classmethod
def expand(cls, X, extract_cols, dataset):
print("... ... Record2Impression")
# create expanded
X = X.reset_index()
X["gid"] = X.index
X["n_imps"] = X[["impressions"]].apply(lambda x: len(str(x.impressions).split("|")), axis=1)
X["price_mean"] = X[["prices"]].apply(lambda x: np.mean(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["price_std"] = X[["prices"]].apply(lambda x: np.std(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["impression"] = X[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
X["price"] = X[["prices"]].apply(lambda x: str(x.prices).split("|"), axis=1)
X_impression = X[["gid", "impression"]].set_index('gid').impression.apply(pd.Series).stack().reset_index(
level=0).rename(columns={0: 'impression'})
X_price = X[["gid", "price"]].set_index('gid').price.apply(pd.Series).stack().reset_index(level=0).rename(
columns={0: 'price'})
X_position = X[["gid", "impression"]].set_index('gid').impression.apply(
lambda x: pd.Series(range(len(x)))).stack().reset_index(level=0).rename(columns={0: 'position'})
X_expanded = pd.concat([X_impression, X_price], axis=1)
X_expanded = pd.concat([X_expanded, X_position], axis=1)
X_expanded.columns = ["gid", "impression", "gid2", "price", "gid3", "position"]
X_expanded = X_expanded[["gid", "impression", "price", "position"]]
# join expaned
X = pd.merge(X_expanded, X[["gid", "n_imps", "price_mean", "price_std"] + extract_cols], on="gid", how="left")
# to normalize position and price
X["pos_rate"] = X["position"] / X["n_imps"]
X["pos"] = X["position"] + 1
X["price_norm"] = (X["price"].astype(float) - X["price_mean"].astype(float)) / X["price_std"].astype(float)
# join price_norm rank
pnorm_rank_df = X[["session_id", "price_norm"]].copy()
pnorm_rank_df = pnorm_rank_df[["session_id", "price_norm"]].groupby("session_id").rank(ascending=False)
pnorm_rank_df.columns = ["price_norm_rank"]
X =
|
pd.concat([X, pnorm_rank_df], axis=1)
|
pandas.concat
|
#!/usr/bin/env python
# coding: utf-8
##################################
# Author: <NAME>
# Copyright © 2020 The Board of Trustees of the Royal Botanic Gardens, Kew
##################################
#
# # wcvp_taxo
# wcvp_taxo is a python3 script for matching and resolving scientific names against the WCVP database (https://wcvp.science.kew.org/)
#
# ## Input
# ### A. Input files
# The script requires two input tables: The WCVP database and a file with species names to match on WCVP
# 1. **WCVP database**: must be downloaded from http://sftp.kew.org/pub/data-repositories/WCVP/. It will be filtered and save by the script in pickle format. If you whish to update the WCVP database, deleted the .pkl file.
# 2. **Sample file**: This spreadsheet must be in **.csv** format and contain at least one column with the scientific names you wish to match in WCVP. By default the script will look for a column named **scientific_name**. Otherwise it will look for a column called **Species**. If the species name is spread in two columns **(Genus, Species)**, the script with recognize it automatically.
#
# ### B. Parameters
# These parameters are optional and can be accessed with python wcvp_taxo.py -h
# - **-g, --resolve_genus**: Find taxa for scientific names written in genus sp. format
# - **-s, --similar_tax_method**: Find most similar taxa for misspelled taxa. <br>
# Possibles values are:
# - **similarity_genus**: Search for similar scientific name in WCVP assuming genus is correct (fast)
# - **similarity**: Search for similar scientific name in WCVP (slow)
# - **request_kewmatch**: Search for similar scientific name using kewmatch (online) (ok if less than <200 queries)
# - **-d, --duplicate_action**. Action to take when multiple wcvp entries match the provided scientific_name. <br>
# Possibles values are:
# - **rank**: reduce duplicates by prioritizing accepted > unplaced > synonym > homotypic_synonym taxonomic status (keep first entry).
# - **divert**: divert duplicates to _duplicates.csv
# - **divert_taxonOK**: divert duplicates to _duplicates.csv, unless all matching entries have the same taxon name in WCVP (keep first entry)
# - **divert_speciesOK**: divert duplicates to _duplicates.csv, unless all matching entries have the same species name in WCVP (keep first entry)
# - **divert_genusOK**: divert duplicates to _duplicates.csv, unless all matching entries have the same genus name in WCVP (keep first entry and rename as genus sp.)
# - **-oc, --only_changes**: Output file only contains IDs that have a different taxonomy than provided (species, genus or family if provided)
# - **-os, --simple_output**: Output file is simplified to 4 columns: ID, kew-id, Ini_sci_name, sci_name
# - **-v, --verbose**: verbose output in console
#
#
# ## Example
# ```console
# python wcvp_taxo.py wcvp_export.txt sample_file.csv -g -s similarity_genus -d divert_taxonOK
# python wcvp_taxo.py wcvp_export.txt sample_file.csv
# python wcvp_taxo.py wcvp_export.txt sample_file.csv -oc -os -s similarity --verbose -d divert
# python wcvp_taxo.py wcvp_export.txt sample_file.csv -g -s similarity -d rank --verbose
# ```
#
# ## Output
# For the example above, the script will output the following tables:
# * **sample_file_wcvp.csv**: Samples for which the scientific name are resolved.
# * **sample_file_duplicates.csv**: Samples for which the scientific name matched multiple WCVP entries.
# * **sample_file_unresolved.csv**: Samples for which the scientific name did not match any WCVP entries.
#
#
# ## Pipeline
# ### Pre-processing
# * Load wcvp database. If only text file exist, saving as .pkl.
# * Find column containing scientific names. scientific_name or sci_name (default), Species or Genus + Species otherwise.
# * Search for column with unique IDs. First column in table will be selected. Creates column with unique IDs if it doesn't exist. Will not pick sci_name or Species as ID.
#
# ### Initial checks
# * Check if Ini_scinames are written as Genus sp.
# * Check if Ini_scinames exist in WCVP
# * Optional. Find similar names if not in WCVP
# * Check if Ini_scinames have duplicate entries
# * Proceed to matching for valid scientific names
#
# ### Matching & Resolving
# 1. Find accepted and unplaced matches.
# 2. Resolves synonyms and homotypic synonyms.
# 3. Resolve duplicates.
# 4. Output tables
#
# ## Dependencies
# pandas, tqdm<br>
# for similarity: difflib, requests, ast<br>
# numpy, os, argparse, sys
# In[1]:
import pandas as pd
from tqdm import tqdm
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
import os
import argparse
import sys
# ## Parameters
# In[2]:
parser = argparse.ArgumentParser(
description='Script used to match species names with wcvp. Requires at least the paths to the wcvp\
file and to a .csv file containing scientific names (species or genus + species)')
parser.add_argument("wcvp_path", type=str,
help="path to wcvp_export.txt, \
download from http://sftp.kew.org/pub/data-repositories/WCVP/")
parser.add_argument("df_path", type=str,
help="path to spreadsheet in .csv format. Note output will be in the same folder")
parser.add_argument("-g", "--resolve_genus",
help="Optional. find taxa for scientific names written in genus sp. format",
action="store_true", default=False)
parser.add_argument("-s",'--similar_tax_method',
help="Optional. Find most similar taxa for misspelled taxa. possibles values are: \
similarity_genus, similarity, request_kew", action="store", default=None)
parser.add_argument("-d",'--duplicate_action',
help="Optional. Action to take when multiple wcvp taxon match to a sci_name. possibles values are: \
rank, divert, divert_taxonOK, divert_speciesOK, divert_genusOK.\
\n\n rank: reduce duplicates by prioritizing accepted > unplaced > synonym > homotypic synonym \
taxonomic status. \n\n divert: flag duplicates, remove them from _wcvp.csv output and write them to _duplicates.csv",
action="store", default='rank')
parser.add_argument("-oc", "--only_changes",
help="Optional. Output file only contains IDs that have a different taxonomy than provided",
action="store_true", default=False)
parser.add_argument("-od", "--output_duplicates",
help="Optional. Output a separate file for duplicates as _duplicates.csv",
action="store_true", default=False)
parser.add_argument("-os", "--simple_output",
help="Optional. Specify which columns of the input file should be kept, in addition to ID, species name \
and WCVP columns kew-id and species name. \
e.g. --simple_output ['idSequencing','NumReads'] will produce an output with \
idSequencing,NumReads, kew-id, Ini_sci_name, sci_name",
action="store_true", default=False)
parser.add_argument("-v", "--verbose",
help="Optional. verbose output in console",
action="store_true", default=False)
args = parser.parse_args()
wcvp_path = args.wcvp_path
df_path = args.df_path
resolve_genus=args.resolve_genus
find_most_similar=args.find_most_similar
dupl_action=args.duplicate_action
only_changes=args.only_changes
simple_output=args.simple_output
verbose=args.verbose
status_keep=['Accepted','Unplaced']
# In[3]:
# ## Jupyter Notebook
# wcvp_path='wcvp_v4_mar_2021.txt'
# df_path='../PAFTOL_DB/2021-03-19_paftol_export.csv'
# resolve_genus=True
# find_most_similar='similarity'
# dupl_action='rank'
# verbose=False
# only_changes=True
# # simple_output=False
# simple_output=['idPaftol','idSequencing','ExternalSequenceID','DataSource','Project','Taxonomical_Notes']
# status_keep=['Accepted','Unplaced']
# ## Functions
# ### Data processing functions
# In[4]:
# Load wcvp file and save as pickle for faster loading
def load_wcvp(wcvp_path):
print('Loading WCVP...',end='')
# Load pickel
if os.path.exists(wcvp_path.replace('.txt','.pkl')):
print('found .pkl...',end='')
wcvp = pd.read_pickle(wcvp_path.replace('.txt','.pkl'))
elif os.path.exists(wcvp_path):
wcvp = pd.read_table(wcvp_path,sep='|',encoding='utf-8')
print('found .txt, ',end='')
# Remove extra columns
wcvp = wcvp.drop(columns=['parent_kew_id','parent_name','parent_authors'])
print('saving to .pkl...',end='')
wcvp.to_pickle(wcvp_path.replace('.txt','.pkl'))
else:
print('could not find',wcvp_path)
sys.exit()
print(wcvp.shape[0],'entries')
return wcvp
def load_df(df_path):
print('Loading dataset...',end='')
try:
smpl_df = pd.read_csv(df_path,encoding='utf-8')
print(smpl_df.shape[0],'entries')
return smpl_df
except:
print('could not find',df_path)
sys.exit()
# In[5]:
#Define ID column
def GetIDcol(df):
#Check for columns with all unique values
col_unique=(df.nunique()==df.shape[0]).to_frame().reset_index().rename(columns={0:'unique','index':'column'})
col_unique = col_unique[col_unique.unique==True]
col_unique = col_unique[~col_unique['column'].isin(['Ini_sci_name','Ini_Genus','Ini_Species'])]
if col_unique.shape[0]>0:
print('found',col_unique.shape[0],'ID column:',end='')
colsID=list(col_unique['column'])
colID=colsID[0]
print(colID)
else:
print('No ID column, create ID from index')
colID='ID'
#Return new col with ID
return colID
# In[6]:
#Find which column contains the scientific name to match
def define_sci_name(smpl_df, verbose=False):
col_taxo=list(smpl_df.columns[smpl_df.columns.str.contains(
'family|genus|species|infraspecies|sci_name|scientific_name',case=False)])
for itaxo in col_taxo:
smpl_df = smpl_df.rename(columns = {itaxo:'Ini_' + itaxo.capitalize()})
if verbose:
print('renaming ' + itaxo + ' to Ini_' + itaxo, end=', ')
# Use sci_name if provided
if 'Ini_Sci_name' in smpl_df.columns:
print('\nScientific Name is sci_name')
smpl_df = smpl_df.rename(columns = {'Ini_Sci_name':'Ini_sci_name'})
elif 'Ini_Scientific_name' in smpl_df.columns:
print('\nScientific Name is scientific_name')
smpl_df = smpl_df.rename(columns = {'Ini_Scientific_name':'Ini_sci_name'})
else:
# Identify is scientific name is in 1 or two columns
try:
avg_word_sp=smpl_df['Ini_Species'].str.split().str.len().mean()
print('avg words in Ini_Species:',round(avg_word_sp,1))
if round(avg_word_sp)==1:
print('Scientific Name (Ini_sci_name) is Ini_Genus + Ini_Species')
smpl_df['Ini_sci_name'] = smpl_df['Ini_Genus'] + ' ' + smpl_df['Ini_Species']
elif round(avg_word_sp)>=2:
print('Scientific Name (Ini_sci_name) is Ini_Species')
smpl_df['Ini_sci_name'] = smpl_df['Ini_Species']
except:
print('ERROR: Could not identify species column')
sys.exit()
return smpl_df
# ### WCVP related functions
# In[7]:
def get_by_taxon_name(df, wcvp):
tmp_wcvp=wcvp[wcvp.taxon_name.isin(df.sci_name)]
match =
|
pd.merge(df, tmp_wcvp, how='inner', left_on='sci_name', right_on='taxon_name')
|
pandas.merge
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with
|
ensure_clean_store(setup_path)
|
pandas.tests.io.pytables.common.ensure_clean_store
|
# *- coding: utf-8 -*
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
# from model.ESPNet_v2.SegmentationModel import EESPNet_Seg
# from model.CGNet import CGNet
# from model.ContextNet import ContextNet
# from model.DABNet import DABNet
# from model.EDANet import EDANet
# from model.ENet import ENet
# from model.ERFNet import ERFNet
# from model.ESNet import ESNet
# from model.ESPNet import ESPNet
# from model.FastSCNN import FastSCNN
# from model.FPENet import FPENet
# from model.FSSNet import FSSNet
# from model.LEDNet import LEDNet
# from model.LinkNet import LinkNet
# from model.SegNet import SegNet
# from model.SQNet import SQNet
# from model.UNet import UNet
|
pd.set_option('display.width', 1000)
|
pandas.set_option
|
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------
# **TD DSA 2021 de <NAME> - rapport de <NAME>**
# ------------------------- -------------------------------------
# # Analyse descriptive
# ## Setup
# In[5]:
get_ipython().system('pip install textblob')
# In[6]:
get_ipython().system('pip install emot')
# In[7]:
get_ipython().system('pip install wordcloud')
# In[8]:
#Temps et fichiers
import os
import warnings
import time
from datetime import timedelta
#Manipulation de données
import pandas as pd
import numpy as np
# Text
from collections import Counter
import nltk
nltk.download('punkt')
from nltk.tokenize import word_tokenize
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
from nltk.util import ngrams
from textblob import TextBlob
import string
import re
import spacy
from emot.emo_unicode import UNICODE_EMO, EMOTICONS
#Visualisation
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from wordcloud import WordCloud
#Tracking d'expérience
import mlflow
import mlflow.sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# ### Utilisation du package
# In[9]:
#Cette cellule permet d'appeler la version packagée du projet et d'en assurer le reload avant appel des fonctions
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[10]:
from dsa_sentiment.scripts.make_dataset import load_data
from dsa_sentiment.scripts.evaluate import eval_metrics
from dsa_sentiment.scripts.make_dataset import Preprocess_StrLower, Preprocess_transform_target
# ### Configuration de l'experiment MLFlow
# In[11]:
mlflow.tracking.get_tracking_uri()
# ### Chargement des données
# In[12]:
# On Importe les données
#df
df_train=pd.read_parquet('/mnt/data/interim/df_train.gzip')
df_val=pd.read_parquet('/mnt/data/interim/df_val.gzip')
df_test=pd.read_parquet('/mnt/data/interim/df_test.gzip')
#X
X_train=pd.read_parquet('/mnt/data/interim/X_train.gzip')
X_val=pd.read_parquet('/mnt/data/interim/X_val.gzip')
X_test=pd.read_parquet('/mnt/data/interim/X_test.gzip')
#y
y_train=pd.read_parquet('/mnt/data/interim/y_train.gzip')
y_val=
|
pd.read_parquet('/mnt/data/interim/y_val.gzip')
|
pandas.read_parquet
|
import os
from datetime import datetime
# Imports the Google Cloud client library
from google.cloud import datastore
from google.cloud import storage
from gcp_interactions import gcp_clients
# This defaults to "prod". Override with "dev" for local runs
gcp_clients.ENVIRONMENT = "prod"
# This is only used locally:
# from google.oauth2 import service_account
""" CONSTANTS """
# STORAGE BUCKET NAMES
UNPROCESSED_BUCKET_NAME = "unprocessed_expense_bucket_1"
PROCESSED_BUCKET_NAME = "processed_expense_bucket_1"
# this controls the threshold for the degree of overlap between an scewed text
# box and the rest of the line for it to be allocated
OVERLAPPING_ALLOCATION_THRESHOLD = 0.3
# The entity kind in datastore to query to find previous assignments
DATASTORE_KIND_CATEGORY_ASSIGNMENT = "category_item_mapping"
DATASTORE_KIND_CATEGORIES = "category"
DATASTORE_KIND_TRANSACTIONS = "transaction"
""" SESSION VARIABLES """
debug = False
# Initiate gcp clients
datastore_client = gcp_clients.init_service_client(service="datastore")
storage_client = gcp_clients.init_service_client(service="storage")
""" STORAGE BUCKET """
def upload_blob(file, user, store_name):
""" Uploads a file to the bucket.
params:
file (blob): file to upload
user (str): name of user that uploaded file
store_name (str): name of the store of the receipt
"""
# storage_client = storage.Client()
bucket = storage_client.bucket(UNPROCESSED_BUCKET_NAME)
now = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
destination_blob_name = "image" + now
blob = bucket.blob(destination_blob_name)
# Add the uploading user to the metadata
metadata = {"uploaded_by": user, "store_name": store_name}
blob.metadata = metadata
# Upload file
blob.upload_from_file(file)
print('Blob {} uploaded to bucket {}.'.format(
file.filename,
bucket.name
))
""" DATASTORE """
def get_missing_category(max_records):
"""
:param max_records (int): number of items to return
:return: a number of unclassified items from the category assignment
registry
"""
query = datastore_client.query(kind=DATASTORE_KIND_CATEGORY_ASSIGNMENT)
query.add_filter("cat_id", "=", -1)
# Fetch first X results
q_result = query.fetch(limit=max_records)
return list(q_result)
def get_all_categories():
""" Get all the categories in the db
:return: key:value pairs from the DB containing the categories
"""
query = datastore_client.query(kind=DATASTORE_KIND_CATEGORIES)
q_result = query.fetch()
categories = {}
# Add the dummy value for unassigned
categories[-1] = "Ikke tildelt"
categories[0] = "Ikke tildelt"
for entity in list(q_result):
key = entity.key.id_or_name
category = entity["cat_name"]
categories[key] = category
return categories
def get_main_categories(categories):
""" Given a list of categories, returns the top level categories with name"""
main_categories = {}
for key in categories.keys():
if len(str(key)) <= 2:
main_categories[key] = categories[key]
return main_categories
def update_item_category(updates):
""" Does a bulk update of item-category mappings
:param updates (dict): key: partial key to the item name, value: new category (int)
"""
# {'avokado modnet 2pk': 804}
for item in updates:
key = datastore_client.key(DATASTORE_KIND_CATEGORY_ASSIGNMENT, item)
task = datastore_client.get(key)
new_id = updates[item]
if new_id.isnumeric():
new_id = int(new_id)
task["cat_id"] = new_id
datastore_client.put(task)
else:
print("Found an error in datastore.py > update_item_category. Non-numeric category id")
def get_all_entities_from_kind_as_df(entity_kind):
"""
Gets all transactions for a given kind
Returns: pandas.DataFrame
"""
import pandas as pd
query = datastore_client.query(kind=entity_kind)
q_result = query.fetch()
df = pd.DataFrame(q_result)
# TODO, make sure keys are also included
return df
def upload_categories():
"""
Uploades categories based on excel file
TODO: TEST A BIT BEFORE RERUNNING!
"""
import pandas as pd
filepath = "../data/varekategorier.xlsx"
cats =
|
pd.read_excel(filepath)
|
pandas.read_excel
|
import pandas as pd
def combine_reciprocal_hits(keep_df, other_df):
"""
"""
missed_samples = set(other_df.index.values).difference(
set(keep_df.index.values))
for each in missed_samples:
hit = other_df.loc[each, 'B_id']
if hit not in keep_df['B_id'].values:
new_row = [hit] + [None for i in range(keep_df.shape[1] - 1)]
keep_df.loc[each] = new_row
return keep_df
def combine_single_hits(keep_df, other_df):
"""
"""
new_spus = set(other_df['subject'].unique()).difference(
keep_df['B_id'].values)
for spu in new_spus:
scores = other_df['bitscore'][other_df['subject'] == spu]
row = [scores.idxmax()] + [None for i in range(keep_df.shape[1] - 1)]
keep_df.loc[spu] = row
return keep_df
def add_uniprot_annotations(sample_df, uniprot):
"""
"""
gene_df = pd.DataFrame(index=uniprot.index.values,
columns=["UniProt.ID", "UniProt.Name"],
dtype=str)
for idx in uniprot.index.values:
prot_id, prot_name = uniprot.loc[idx, 'subject'].split('|')[1:]
if isinstance(prot_id, str) and isinstance(prot_name, str):
gene_df.loc[idx, 'UniProt.ID'] = prot_id
gene_df.loc[idx, 'UniProt.Name'] = prot_name
return pd.concat([sample_df, gene_df], axis=1, join='outer', sort=False)
def add_interpro_annotations(sample_df, interpro_file):
"""
"""
data = {'evm': [], 'IPR.IDs': [], 'IPR.Desc': []}
with open(interpro_file, 'r') as f:
for line in f:
line = line.strip().split('\t')
evm = line[0]
ipr_ids = []
desc_ids = []
for each in line[2:]:
ipr, desc = each.split(';')
ipr_ids.append(ipr.strip())
desc_ids.append(desc.strip())
data['evm'].append(evm)
data['IPR.IDs'].append(';'.join(ipr_ids))
data['IPR.Desc'].append(';'.join(desc_ids))
ipr = pd.DataFrame(data)
ipr.set_index('evm', inplace=True)
return pd.concat([sample_df, ipr], axis=1, join='outer', sort=False)
def add_kegg_annotations(sample_df, kegg_file):
"""
"""
data = {'evm': [], 'KEGG.IDs': []}
with open(kegg_file, 'r') as f:
for line in f:
line = line.strip().split('\t')
data['evm'].append(line[0])
data['KEGG.IDs'].append(line[4])
kegg = pd.DataFrame(data)
kegg.set_index('evm', inplace=True)
return pd.concat([sample_df, kegg], axis=1, join='outer', sort=False)
def add_ncbi_annotations(sample_df, ncbi):
"""
"""
gene_df = pd.DataFrame(index=uniprot.index.values,
columns=["NCBI.ID"], dtype=str)
for idx in ncbi.index.values:
gene_df.loc[idx, 'NCBI.ID'] = ncbi.loc[idx, 'subject'].split('|')[-2]
return pd.concat([sample_df, gene_df], axis=1, join='outer', sort=False)
def add_trembl_annotations(sample_df, tremble):
gene_df = pd.DataFrame(index=uniprot.index.values,
columns=["TrEMBL.ID"], dtype=str)
for idx in ncbi.index.values:
gene_df.loc[idx, 'TrEMBL.ID'] = ncbi.loc[idx, 'subject'].split('|')[1]
return pd.concat([sample_df, gene_df], axis=1, join='outer', sort=False)
if __name__ == "__main__":
blast_columns = ['subject', 'perc.id', 'length', 'mismatch', 'gapopen',
'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
protein_models = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/ProteinModels_SPU_BestHits_peptide.txt",
sep='\t', index_col=0)
transcripts_pep = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/SPU_BestHits_peptide.txt",
sep='\t', index_col=0)
transcripts_nuc = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/SPU_BestHits.txt",
sep='\t', index_col=0)
homologues = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/best_spu_aligns.blastn",
sep='\t', header=None, index_col=0,
names=blast_columns)
uniprot = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.SwissProt.blast",
sep='\t', header=None, index_col=0,
names=blast_columns)
interpro_file = "/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.ipr"
kegg_file = "/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.KEGG.blast"
ncbi = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.nr.blast",
sep='\t', header=None, index_col=0, names=blast_columns)
trembl = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.TrEMBL.blast",
sep='\t', header=None, index_col=0,
names=blast_columns)
annotations = combine_reciprocal_hits(pd.DataFrame(protein_models['B_id']),
pd.DataFrame(transcripts_pep['B_id']))
annotations = combine_reciprocal_hits(annotations,
|
pd.DataFrame(transcripts_nuc)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([
|
pd.Timestamp('2011-01-01')
|
pandas.Timestamp
|
"""
Retrieves howfairis variables for an input file of retrieved Github repositories.
"""
import os
import time
from datetime import datetime
import argparse
from howfairis import Repo, Checker
import pandas as pd
from dotenv import load_dotenv
def get_howfairis_compliance(url_repo):
"""Retrieve howfairis compliance - see https://github.com/fair-software/howfairis
Args:
url_repo (string): repository URL
Returns:
repository (bool): Whether repository is publicly accessible with version control
license (bool): Whether repository has a license
registry (bool): Whether code is in a registry
citation (bool): Whether software is citable
checklist (bool): Whether a software quality checklist is used
"""
repo = Repo(url_repo)
checker = Checker(repo, is_quiet=True)
compliance = checker.check_five_recommendations()
return (compliance.repository, compliance.license, compliance.registry,
compliance.citation, compliance.checklist)
def read_input_file(file_path):
"""reads in the input file through Pandas
Args:
file_path (string): path to the file
Returns:
DataFrame
"""
if "xlsx" in file_path:
file = pd.read_excel(file_path, engine='openpyxl')
else:
file = pd.read_csv(file_path)
return file
def parse_repo(repo_url):
"""Parses a repository for howfairis variables
Args:
repo_url (string): repository that should be parsed
Returns:
list: a list with the repository url and the variables
"""
request_successful = False
while not request_successful:
try:
entry = [repo_url]
result = get_howfairis_compliance(repo_url)
entry.extend(result)
print(entry)
time.sleep(2)
request_successful = True
return entry
except Exception as e: # pylint: disable=broad-except
print(f"Error occured for {repo_url} (most likely timeout issue due"
f" to API limitation. Sleep for a while. Error message: {e}")
if "Something went wrong asking the repo for its default branch" in str(
e):
print("Skipping repository...")
request_successful = True # skip this repo
elif "TimeoutError" in str(e):
time.sleep(5)
else:
time.sleep(1500)
# if unauthorized API is used, rate limit is lower,
# leading to a ban and waiting time needs to be increased
# see: https://github.com/fair-software/howfairis/#rate-limit
load_dotenv()
token = os.getenv('GITHUB_TOKEN')
user = os.getenv('GITHUB_USER')
if token is not None and user is not None:
os.environ['APIKEY_GITHUB'] = user + ":" + token
if __name__ == '__main__':
# Initiate the parser
parser = argparse.ArgumentParser()
# Add arguments to be parsed
parser.add_argument(
"--input",
"-i",
help="The file name of the retrieved repositories.",
default="../collect_repositories/results/repositories_filtered.csv")
parser.add_argument("--output",
"-o",
help="The file name of the filtered repositories.",
default="results/repositories_howfairis.csv")
# Read arguments from the command line
args = parser.parse_args()
print(f"Retrieving howfairis variables for the following file: {args.input}")
df_repos = read_input_file(args.input)
howfairis_variables = []
for counter, url in enumerate(df_repos["html_url"]):
howfairis_variables.append(parse_repo(url))
if counter % 10 == 0:
print(f"Parsed {counter} out of {len(df_repos.index)} repos.")
df_howfairis = pd.DataFrame(howfairis_variables,
columns=[
"html_url", "howfairis_repository",
"howfairis_license", "howfairis_registry",
"howfairis_citation", "howfairis_checklist"
])
df_repo_merged =
|
pd.merge(df_repos, df_howfairis, how="left", on='html_url')
|
pandas.merge
|
import copy
from functools import partial
from unittest import TestCase
import numpy as np
import pandas as pd
import pandas.testing
from pypika import Table
from pypika.analytics import Sum
from fireant import DataSet, DataType, Field, Rollup
from fireant.tests.dataset.mocks import (
CumSum,
ElectionOverElection,
dimx0_metricx1_df,
dimx0_metricx2_df,
dimx1_date_df,
dimx1_date_operation_df,
dimx1_num_df,
dimx1_str_df,
dimx2_date_str_df,
dimx2_date_str_ref_df,
mock_dataset,
no_index_df,
test_database,
)
from fireant.utils import alias_selector as f
from fireant.widgets.pandas import Pandas
def format_float(x, is_raw=False):
if pd.isnull(x):
return ''
if x in [np.inf, -np.inf]:
return 'Inf'
return f'{x:.0f}' if is_raw else f'{x:,.0f}'
format_float_raw = partial(format_float, is_raw=True)
pd.set_option('display.max_columns', 500)
|
pd.set_option('display.width', 1000)
|
pandas.set_option
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <NAME>
#
import os
import pandas as pd
import json
def main():
#
# Args --------------------------------------------------------------------
#
study_prefix = 'FINNGEN_R6_'
# Manifest files from Finngen release
in_finngen = 'configs/inputs/r6_finngen.json'
in_gs_path_list = 'configs/inputs/gcs_input_paths_finngen.txt'
# List of completed datasets on GCS
# gsutil ls "gs://genetics-portal-sumstats-b38/unfiltered/gwas/*/_SUCCESS" > configs/inputs/gcs_completed_paths.txt
# Use `in_completed_path_list = None` if first run
in_completed_path_list = 'configs/inputs/gcs_completed_paths.txt'
# Path to write main manifest file
out_manifest = 'configs/finngen.manifest.json'
# Output directory for sumstats on GCS
out_gs_path = 'gs://genetics-portal-dev-sumstats/unfiltered/gwas_220212/'
keep_columns = [
'code',
'trait',
'trait_category',
'n_cases',
'n_controls'
]
finngen = (
pd.read_json(path_or_buf=in_finngen, lines=True)
.rename(
columns={
'phenocode': 'code',
'phenostring': 'trait',
'category': 'trait_category',
'num_cases': 'n_cases',
'num_controls': 'n_controls'
}
)
)
finngen = finngen[keep_columns]
finngen['code'] = study_prefix + finngen['code']
finngen['n_total'] = finngen['n_cases'] + finngen['n_controls']
gcs = pd.read_csv(in_gs_path_list, sep='\t', header=None, names=['in_path'])
gcs['code'] = gcs['in_path'].apply(parse_code, prefix=study_prefix, splitBy='finngen_R6_')
merged =
|
pd.merge(gcs, finngen, on='code')
|
pandas.merge
|
import os
import pandas as pd
norm = {'Altura': [1.65, 1.85,1.88],
'Peso': [75, 90,80]}
df =
|
pd.DataFrame(norm)
|
pandas.DataFrame
|
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (os.path.join(os.getcwd(), "images", root.find('filename').text),
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text),
member[0].text
)
xml_list.append(value)
xml_df =
|
pd.DataFrame(xml_list, columns=None)
|
pandas.DataFrame
|
import os
import h5py
import pandas as pd
import numpy as np
def convert_nodes(nodes_file, node_types_file, **params):
is_h5 = False
try:
h5file = h5py.File(nodes_file, 'r')
is_h5 = True
except Exception as e:
pass
if is_h5:
update_h5_nodes(nodes_file, node_types_file, **params)
return
update_csv_nodes(nodes_file, node_types_file, **params)
# columns which need to be renamed, key is original name and value is the updated name
column_renames = {
'id': 'node_id',
'model_id': 'node_type_id',
'electrophysiology': 'dynamics_params',
'level_of_detail': 'model_type',
'morphology': 'morphology',
'params_file': 'dynamics_params',
'x_soma': 'x',
'y_soma': 'y',
'z_soma': 'z'
}
def update_h5_nodes(nodes_file, node_types_file, network_name, output_dir='output',
column_order=('node_type_id', 'model_type', 'model_template', 'model_processing', 'dynamics_params',
'morphology')):
# open nodes and node-types into a single table
input_h5 = h5py.File(nodes_file, 'r')
output_name = '{}_nodes.h5'.format(network_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
nodes_output_fn = os.path.join(output_dir, output_name)
# save nodes hdf5
with h5py.File(nodes_output_fn, 'w') as h5:
#h5.copy()
#grp = h5.create_group('/nodes/{}'.format(network_name))
#input_grp = input_h5['/nodes/']
nodes_path = '/nodes/{}'.format(network_name)
h5.copy(input_h5['/nodes/'], nodes_path)
grp = h5[nodes_path]
grp.move('node_gid', 'node_id')
grp.move('node_group', 'node_group_id')
node_types_csv = pd.read_csv(node_types_file, sep=' ')
node_types_csv = node_types_csv.rename(index=str, columns=column_renames)
# Change values for model type
model_type_map = {
'biophysical': 'biophysical',
'point_IntFire1': 'point_process',
'intfire': 'point_process',
'virtual': 'virtual',
'iaf_psc_alpha': 'nest:iaf_psc_alpha',
'filter': 'virtual'
}
node_types_csv['model_type'] = node_types_csv.apply(lambda row: model_type_map[row['model_type']], axis=1)
# Add model_template column
def model_template(row):
model_type = row['model_type']
if model_type == 'biophysical':
return 'ctdb:Biophys1.hoc'
elif model_type == 'point_process':
return 'nrn:IntFire1'
else:
return 'NONE'
node_types_csv['model_template'] = node_types_csv.apply(model_template, axis=1)
# Add model_processing column
def model_processing(row):
model_type = row['model_type']
if model_type == 'biophysical':
return 'aibs_perisomatic'
else:
return 'NONE'
node_types_csv['model_processing'] = node_types_csv.apply(model_processing, axis=1)
# Reorder columns
orig_columns = node_types_csv.columns
col_order = [cn for cn in column_order if cn in orig_columns]
col_order += [cn for cn in node_types_csv.columns if cn not in column_order]
node_types_csv = node_types_csv[col_order]
# Save node-types csv
node_types_output_fn = os.path.join(output_dir, '{}_node_types.csv'.format(network_name))
node_types_csv.to_csv(node_types_output_fn, sep=' ', index=False, na_rep='NONE')
# open nodes and node-types into a single table
'''
print('loading csv files')
nodes_tmp = pd.read_csv(nodes_file, sep=' ')
node_types_tmp = pd.read_csv(node_types_file, sep=' ')
nodes_df = pd.merge(nodes_tmp, node_types_tmp, on='node_type_id')
n_nodes = len(nodes_df.index)
# rename required columns
nodes_df = nodes_df.rename(index=str, columns=column_renames)
# Old versions of node_type_id may be set to strings/floats, convert to integers
dtype_ntid = nodes_df['node_type_id'].dtype
if dtype_ntid == 'object':
# if string, move model_id to pop_name and create an integer node_type_id column
if 'pop_name' in nodes_df.columns:
nodes_df = nodes_df.drop('pop_name', axis=1)
nodes_df = nodes_df.rename(index=str, columns={'node_type_id': 'pop_name'})
ntid_map = {pop_name: indx for indx, pop_name in enumerate(nodes_df['pop_name'].unique())}
nodes_df['node_type_id'] = nodes_df.apply(lambda row: ntid_map[row['pop_name']], axis=1)
elif dtype_ntid == 'float64':
nodes_df['node_type_id'] = nodes_df['node_type_id'].astype('uint64')
# divide columns up into nodes and node-types columns, and for nodes determine which columns are valid for every
# node-type. The rules are
# 1. If all values are the same for a node-type-id, column belongs in node_types csv. If there's any intra
# node-type heterogenity then the column belongs in the nodes h5.
# 2. For nodes h5 columns, a column belongs to a node-type-id if it contains at least one non-null value
print('parsing input')
opt_columns = [n for n in nodes_df.columns if n not in ['node_id', 'node_type_id']]
heterogeneous_cols = {cn: False for cn in opt_columns}
nonnull_cols = {} # for each node-type, a list of columns that contains at least one non-null value
for node_type_id, nt_group in nodes_df.groupby(['node_type_id']):
nonnull_cols[node_type_id] = set(nt_group.columns[nt_group.isnull().any() == False].tolist())
for col_name in opt_columns:
heterogeneous_cols[col_name] |= len(nt_group[col_name].unique()) > 1
nodes_columns = set(cn for cn, val in heterogeneous_cols.items() if val)
nodes_types_columns = [cn for cn, val in heterogeneous_cols.items() if not val]
# Check for nodes columns that has non-numeric values, these will require some special processing to save to hdf5
string_nodes_columns = set()
for col_name in nodes_columns:
if nodes_df[col_name].dtype == 'object':
string_nodes_columns.add(col_name)
if len(string_nodes_columns) > 0:
print('Warning: column(s) {} have non-numeric values that vary within a node-type and will be stored in h5 format'.format(list(string_nodes_columns)))
# Divide the nodes columns into groups and create neccessary lookup tables. If two node-types share the same
# non-null columns then they belong to the same group
grp_idx2cols = {} # group-id --> group-columns
grp_cols2idx = {} # group-columns --> group-id
grp_id2idx = {} # node-type-id --> group-id
group_index = -1
for nt_id, cols in nonnull_cols.items():
group_columns = sorted(list(nodes_columns & cols))
col_key = tuple(group_columns)
if col_key in grp_cols2idx:
grp_id2idx[nt_id] = grp_cols2idx[col_key]
else:
group_index += 1
grp_cols2idx[col_key] = group_index
grp_idx2cols[group_index] = group_columns
grp_id2idx[nt_id] = group_index
# merge x,y,z columns, if they exists, into 'positions' dataset
grp_pos_cols = {}
for grp_idx, cols in grp_idx2cols.items():
pos_list = []
for coord in ['x', 'y', 'z']:
if coord in cols:
pos_list += coord
grp_idx2cols[grp_idx].remove(coord)
if len(pos_list) > 0:
grp_pos_cols[grp_idx] = pos_list
# Create the node_group and node_group_index columns
nodes_df['__bmtk_node_group'] = nodes_df.apply(lambda row: grp_id2idx[row['node_type_id']], axis=1)
nodes_df['__bmtk_node_group_index'] = [0]*n_nodes
for grpid in grp_idx2cols.keys():
group_size = len(nodes_df[nodes_df['__bmtk_node_group'] == grpid])
nodes_df.loc[nodes_df['__bmtk_node_group'] == grpid, '__bmtk_node_group_index'] = range(group_size)
# Save nodes.h5 file
nodes_output_fn = os.path.join(output_dir, '{}_nodes.h5'.format(network_name))
node_types_output_fn = os.path.join(output_dir, '{}_node_types.csv'.format(network_name))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
print('Creating {}'.format(nodes_output_fn))
with h5py.File(nodes_output_fn, 'w') as hf:
hf.create_dataset('nodes/node_gid', data=nodes_df['node_id'], dtype='uint64')
hf['nodes/node_gid'].attrs['network'] = network_name
hf.create_dataset('nodes/node_type_id', data=nodes_df['node_type_id'], dtype='uint64')
hf.create_dataset('nodes/node_group', data=nodes_df['__bmtk_node_group'], dtype='uint32')
hf.create_dataset('nodes/node_group_index', data=nodes_df['__bmtk_node_group_index'], dtype='uint64')
for grpid, cols in grp_idx2cols.items():
group_slice = nodes_df[nodes_df['__bmtk_node_group'] == grpid]
for col_name in cols:
dataset_name = 'nodes/{}/{}'.format(grpid, col_name)
if col_name in string_nodes_columns:
# for columns with non-numeric values
dt = h5py.special_dtype(vlen=bytes)
hf.create_dataset(dataset_name, data=group_slice[col_name], dtype=dt)
else:
hf.create_dataset(dataset_name, data=group_slice[col_name])
# special case for positions
if grpid in grp_pos_cols:
hf.create_dataset('nodes/{}/positions'.format(grpid),
data=group_slice.as_matrix(columns=grp_pos_cols[grpid]))
# Save the node_types.csv file
print('Creating {}'.format(node_types_output_fn))
node_types_table = nodes_df[['node_type_id'] + nodes_types_columns]
node_types_table = node_types_table.drop_duplicates()
if len(sort_order) > 0:
node_types_table = node_types_table.sort_values(by=sort_order)
node_types_table.to_csv(node_types_output_fn, sep=' ', index=False) # , na_rep='NONE')
'''
def update_csv_nodes(nodes_file, node_types_file, network_name, output_dir='network',
column_order=('node_type_id', 'model_type', 'model_template', 'model_processing',
'dynamics_params', 'morphology')):
# open nodes and node-types into a single table
print('loading csv files')
nodes_tmp = pd.read_csv(nodes_file, sep=' ')
node_types_tmp = pd.read_csv(node_types_file, sep=' ')
if 'model_id' in nodes_tmp:
nodes_df = pd.merge(nodes_tmp, node_types_tmp, on='model_id')
elif 'node_type_id' in nodes_tmp:
nodes_df =
|
pd.merge(nodes_tmp, node_types_tmp, on='node_type_id')
|
pandas.merge
|
import pandas as pd
from pandas.plotting import radviz
import matploblib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
#import the model we are using
from sklearn.ensemble import RandomForestRegressor
from sklearn import svm, preprocessing
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn import linear_model
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import Perceptron
def read(file):
return pd.read_csv(file)
driver_id = read('driver_ids.csv')
ride_id = read('ride_ids.csv')
ride_time = read('ride_timestamps.csv')
#Get rid of these rows in the dataset
ride_time = ride_time[ride_time.event != 'requested_at']
ride_time = ride_time[ride_time.event != 'accepted_at']
ride_time = ride_time[ride_time.event != 'picked_up_at']
ride_time = ride_time[ride_time.event != 'arrived_at']
#To numpy form
driver_id_np = (driver_id).to_numpy()
ride_id_np = (ride_id).to_numpy()
ride_time_np = (ride_time).to_numpy()
# To list
driver_id_li = (driver_id_np).tolist()
ride_id_li = (ride_id_np).tolist()
ride_time_li = (ride_time_np).tolist()
# Calculate Fare
for i in range(len(ride_id_li)):
fare = (ride_id_li[i][2]*1.15*0.000621371 + ride_id_li[i][3]*0.22/60 + 2) * ((100+ride_id_li[i][4])/100) + 1.75
if fare < 5:
fare = 5
if fare >400:
fare = 400
ride_id_li[i].append(fare)
# Print out one of the fare
print(ride_id_li[0][5])
# Collecting fares for each driver using hashmap
driver_faredict = {}
for i in ride_id_li:
if i[0] not in driver_faredict:
driver_faredict[i[0]] = [i[5]]
else:
driver_faredict[i[0]].append(i[5])
# Print out all fares given one driver identity
print(driver_faredict['052bba06c5fc0bdea4bc2f9cb92b37c7'])
# map driver id to the ride id
driver_rideiddict = {}
for i in ride_id_li:
if i[0] not in driver_rideiddict:
driver_rideiddict[i[0]] = [i[1]]
else:
driver_rideiddict[i[0]].append(i[1])
# print ride id given a driver id
print(driver_rideiddict['007f0389f9c7b03ef97098422f902e62'])
# Reverse the dictionary
rideid_driverdict = {}
for i in driver_rideiddict.keys():
for j in driver_rideiddict[i]:
rideid_driverdict[j] = [i]
# print
print(rideid_driverdict['01f133164433ea7682545a41643e6949'])
# Map ride id to driver id and ride's specific time
rideid_driver_timedict = {}
for i in ride_time_li:
if i[0] in rideid_driverdict:
rideid_driverdict[i[0]].append(i[2])
for i in rideid_driverdict.keys():
if len(rideid_driverdict[i]) == 2:
rideid_driver_timedict[i] = rideid_driverdict[i]
print(rideid_driver_timedict['07f9b5246c8431e3e5bac56d9f48b4f9'])
signal = 0
for i in rideid_driver_timedict.values():
if len(i) != 2:
siganl += 1
# Given Ride id print out driver id and time
print(signal)
# Map driver id to all rides' time
driver_alltripsdict = {}
for i in rideid_driver_timedict.keys():
if rideid_driver_timedict[i][0] not in driver_alltripsdict:
driver_alltripsdict[rideid_driver_timedict[i][0]] = [rideid_driver_timedict[i][1]]
elif rideid_driver_timedict[i][0] in driver_alltripsdict:
driver_alltripsdict[rideid_driver_timedict[i][0]].append(rideid_driver_timedict[i][1])
# Print out all rides' time give a driver id
print(driver_alltripsdict['007f0389f9c7b03ef97098422f902e62'])
# Split the date to only fetch month and day
for i in driver_alltripsdict.keys():
for j in range(len(driver_alltripsdict[i])):
driver_alltripsdict[i][j] = driver_alltripsdict[i][j].split()[0].split("-")[1:]
# Print out all months and days of all rides given a driver id
print(driver_alltripsdict['007f0389f9c7b03ef97098422f902e62'])
# Change dates from string to integer
for i in driver_alltripsdict.keys():
for j in range(len(driver_alltripsdict[i])):
driver_alltripsdict[i][j] = [int(driver_alltripsdict[i][j][0]),int(driver_alltripsdict[i][j][1])]
# Print
print(driver_alltripsdict['052bba06c5fc0bdea4bc2f9cb92b37c7'])
'''last bording date'''
for i in driver_alltripsdict.keys():
month = 1
day = 1
for j in driver_alltripsdict[i]:
if j[0] > month:
month = j[0]
day = j[1]
elif j[0] == month:
if j[1] > day:
day = j[1]
driver_alltripsdict[i] = [[month, day]]
# print
print(driver_alltripsdict['052bba06c5fc0bdea4bc2f9cb92b37c7'])
# Add last boarding date
for i in driver_alltripsdict.keys():
for k in driver_id_li:
if i == k[0]:
driver_alltripsdict[i].append(k[1].split()[0].split("-")[1:])
# print onboarding date and last boarding date
print(driver_alltripsdict['052bba06c5fc0bdea4bc2f9cb92b37c7'])
'''convert to int'''
for i in driver_alltripsdict.keys():
for j in range(len(driver_alltripsdict[i])):
driver_alltripsdict[i][j] = [int(driver_alltripsdict[i][j][0]),int(driver_alltripsdict[i][j][1])]
print(driver_alltripsdict['052bba06c5fc0bdea4bc2f9cb92b37c7'])
driver_on_offdict = {}
for i in driver_alltripsdict.keys():
if len(driver_alltripsdict[i]) == 2:
driver_on_offdict[i] = driver_alltripsdict[i]
signal = 0
for i in driver_on_offdict.keys():
if len(driver_alltripsdict[i]) != 2:
signal += 1
# print
print(signal)
'''calculate date difference'''
for i in driver_on_offdict.keys():
lastMonth = driver_on_offdict[i][0][0]
lastDay = driver_on_offdict[i][0][1]
startMonth = driver_on_offdict[i][1][0]
startDay = driver_on_offdict[i][1][1]
startDays = (startMonth - 1) * 30 + startDay
lastDays = (lastMonth - 1) * 30 + lastDay
difference = lastDays - startDays
driver_on_offdict[i] = difference
# print the date difference of a driver
print(driver_on_offdict['fff482c704d36a1afe8b8978d5486283'])
'''Average Lifetime Value'''
farelist = []
farelistnew = []
for i in driver_on_offdict.keys():
for k in driver_faredict.keys():
if i == k:
farelist.append(driver_faredict[k])
for i in farelist:
sum3 = 0
for k in i:
sum3 += k
farelistnew.append(sum3)
# Print the total fare of a driver
print(farelistnew[0])
sum4 = 0
for i in farelistnew:
sum4 += i
newaverage = sum4/len(farelistnew)
# Print average total fare
print(round(newaverage, 3))
'''Average days spent in Lyft'''
averageDay = 0
for i in driver_on_offdict.keys():
averageDay += driver_on_offdict[i]
averageDay /= len(driver_on_offdict)
# Print out average days
print(round(averageDay, 3))
'''feature1: Driver ID'''
Driver_ID_LIST = []
for i in driver_on_offdict.keys():
Driver_ID_LIST.append(i)
# Convert to numpy array
driver_id_nparray = np.asarray(Driver_ID_LIST).reshape(-1,1)
# Check Shape
print(driver_id_nparray.shape)
'''feature2: Days in Lyft'''
Driver_day_list = []
for i in Driver_ID_LIST:
if i in driver_on_offdict:
Driver_day_list.append(driver_on_offdict[i])
# Convert to numpy array
driver_days_nparray = np.asarray(Driver_day_list).reshape(-1,1)
# Check Shape
print(driver_days_nparray.shape)
"""Mix 2 features together """
a = np.append(driver_id_nparray,driver_days_nparray,axis = 1)
# Print out the numpy array
print(a)
'''Target: Life Time Value'''
LTV_dict = {}
Driver_LTV_list = []
for i in driver_on_offdict.keys():
for j in driver_faredict.keys():
if (i == j):
LTV_dict[j] = driver_faredict[j]
for i in LTV_dict.keys():
total = 0
for j in LTV_dict[i]:
total += j
LTV_dict[i] = total
for i in Driver_ID_LIST:
if i in LTV_dict:
Driver_LTV_list.append(LTV_dict[i])
# Conver to numpy array
driver_LTV_nparrays = np.asarray(Driver_LTV_list).reshape(-1,1)
# Check Shape
print(driver_LTV_nparrays.shape)
'''feature3: Primetime Percentage for each driver'''
Driver_total_primetime = {}
for i in range(len(Driver_ID_LIST)):
for k in range(len(ride_id_li)):
if Driver_ID_LIST[i] == ride_id_li[k][0]:
Driver_total_primetime[Driver_ID_LIST[i]] = []
for i in range(len(Driver_ID_LIST)):
for k in range(len(ride_id_li)):
if Driver_ID_LIST[i] == ride_id_li[k][0]:
print("yes")
Driver_total_primetime[Driver_ID_LIST[i]].append(ride_id_li[k][4])
# Print out relative level of prime time
print(Driver_total_primetime['007f0389f9c7b03ef97098422f902e62'])
for i in Driver_total_primetime.keys():
sum1 = 0
for k in Driver_total_primetime[i]:
sum1 += k
num = len(Driver_total_primetime[i])
Driver_total_primetime[i] = sum1/num
# calculate prime time value for each driver as a unique feature
print(Driver_total_primetime['007f0389f9c7b03ef97098422f902e62'])
Driver_primetime_list = []
for i in Driver_total_primetime:
Driver_primetime_list.append(Driver_total_primetime[i])
# Convert to numpy array
Driver_primetime_nparrays = np.asarray(Driver_primetime_list).reshape(-1,1)
# Check shape if it is consistent with others
print(Driver_primetime_nparrays.shape)
'''feature4: total ride distance'''
driver_dist_dict = {}
for i in ride_id_li:
if i[0] not in driver_dist_dict:
driver_dist_dict[i[0]] = [i[2]]
else:
driver_dist_dict[i[0]].append(i[2])
for i in driver_dist_dict.keys():
total_dist = 0
num_rides = 0
for j in driver_dist_dict[i]:
total_dist += j
num_rides += 1
driver_dist_dict[i] = [total_dist, num_rides]
print(driver_dist_dict["002be0ffdc997bd5c50703158b7c2491"])
total_dist_list = []
for i in Driver_ID_LIST:
if i in driver_dist_dict:
total_dist_list.append(driver_dist_dict[i][0])
# Convert to numpy array
total_dist_nparray = np.asarray(total_dist_list).reshape(-1,1)
# Check Shape
print(total_dist_nparray.shape)
'''feature5: total number of rides'''
total_numrides_list = []
for i in Driver_ID_LIST:
if i in driver_dist_dict:
total_numrides_list.append(driver_dist_dict[i][1])
# Conver to numpy array
total_numrides_nparray = np.asarray(total_numrides_list).reshape(-1,1)
# Check shape
print(total_numrides_nparray.shape)
'''feature6: total duration'''
driver_time_dict = {}
for i in ride_id_li:
if i[0] not in driver_time_dict:
driver_time_dict[i[0]] = [i[3]]
else:
driver_time_dict[i[0]].append(i[3])
for i in driver_time_dict.keys():
total_time = 0
for j in driver_time_dict[i]:
total_time += j
driver_time_dict[i] = total_time / 60
print(driver_time_dict["002be0ffdc997bd5c50703158b7c2491"])
total_time_list = []
for i in Driver_ID_LIST:
if i in driver_time_dict:
total_time_list.append(driver_time_dict[i])
# Convert to numpy array
total_time_nparray = np.asarray(total_time_list).reshape(-1,1)
# Check shape
print(total_time_nparray.shape)
"""feature7: month (categorical) ---> march"""
driver_onboard_dict = {}
for i in driver_id_li:
driver_onboard_dict[i[0]] = int(i[1].split()[0].split("-")[1])
print(driver_onboard_dict["11506b81721ca68ef019764de3d8edbd"])
march_list = []
for i in Driver_ID_LIST:
if i in driver_onboard_dict:
if driver_onboard_dict[i] == 3:
march_list.append(1)
else:
march_list.append(0)
march_nparray = np.asarray(march_list).reshape(-1,1)
print(march_nparray.shape)
"""feature7: month (categorical) ---> april"""
april_list = []
for i in Driver_ID_LIST:
if i in driver_onboard_dict:
if driver_onboard_dict[i] == 4:
april_list.append(1)
else:
april_list.append(0)
april_nparray = np.asarray(april_list).reshape(-1,1)
print(april_nparray.shape)
"""feature7: month (categorical) ---> may"""
may_list = []
for i in Driver_ID_LIST:
if i in driver_onboard_dict:
if driver_onboard_dict[i] == 5:
may_list.append(1)
else:
may_list.append(0)
may_nparray = np.asarray(may_list).reshape(-1,1)
print(may_nparray.shape)
'''feature8: accepted to pickup interval time'''
ride_timenew = read('ride_timestamps.csv')
ride_timenew = ride_timenew[ride_timenew.event != 'requested_at']
ride_timenew = ride_timenew[ride_timenew.event != 'dropped_off_at']
ride_timenew = ride_timenew[ride_timenew.event != 'arrived_at']
ride_time_npnew = (ride_timenew).to_numpy()
ride_time_linew = (ride_time_npnew).tolist()
accept_pick_intervaldict = {}
for k in ride_time_linew:
if k[0] in rideid_driverdict:
if k[0] not in accept_pick_intervaldict:
accept_pick_intervaldict[k[0]] = [k[2].split()[1]]
else:
accept_pick_intervaldict[k[0]].append(k[2].split()[1])
print(accept_pick_intervaldict["00006efeb0d5e3ccad7d921ddeee9900"])
for i in accept_pick_intervaldict.keys():
accept_time = accept_pick_intervaldict[i][0].split(":")
pick_time = accept_pick_intervaldict[i][1].split(":")
accept_hour = int(accept_time[0])
accept_minute = int(accept_time[1])
accept_second = int(accept_time[2])
pick_hour = int(pick_time[0])
pick_minute = int(pick_time[1])
pick_second = int(pick_time[2])
if pick_hour == 0 and accept_hour == 23:
pick_hour = 24
time_difference = (pick_hour - accept_hour) * 60 + (pick_minute - accept_minute) + (pick_second - accept_second)/60
time_difference = round(time_difference, 2)
accept_pick_intervaldict[i] = [time_difference]
for i in rideid_driverdict.keys():
if i in accept_pick_intervaldict:
accept_pick_intervaldict[i].append(rideid_driverdict[i][0])
print(accept_pick_intervaldict["00006efeb0d5e3ccad7d921ddeee9900"])
driver_interval_dict = {}
for i in accept_pick_intervaldict.keys():
if accept_pick_intervaldict[i][1] not in driver_interval_dict:
driver_interval_dict[accept_pick_intervaldict[i][1]] = [accept_pick_intervaldict[i][0]]
else:
driver_interval_dict[accept_pick_intervaldict[i][1]].append(accept_pick_intervaldict[i][0])
for i in driver_interval_dict:
total = 0
num = 0
for j in driver_interval_dict[i]:
total += j
num += 1
driver_interval_dict[i] = total / num
print(driver_interval_dict["039da9c077e17af98ca8530e4d7975f1"])
accept_pick_list = []
for i in Driver_ID_LIST:
if i in driver_interval_dict:
accept_pick_list.append(driver_interval_dict[i])
accept_pick_nparray = np.asarray(accept_pick_list).reshape(-1,1)
print(accept_pick_nparray.shape)
"""mix together"""
#featurelist = np.append(driver_id_nparray,driver_days_nparray,axis = 1)
featurelist = 0
featurelist = np.append(driver_days_nparray, Driver_primetime_nparrays,axis = 1)
featurelist = np.append(featurelist,total_dist_nparray ,axis = 1)
featurelist = np.append(featurelist, total_numrides_nparray,axis = 1)
featurelist = np.append(featurelist, total_time_nparray,axis = 1)
featurelist = np.append(featurelist,march_nparray ,axis = 1)
featurelist = np.append(featurelist, april_nparray,axis = 1)
featurelist = np.append(featurelist,may_nparray ,axis = 1)
featurelist = np.append(featurelist,accept_pick_nparray,axis = 1)
featurelist = np.array(featurelist)
#feature scaling
featurelist = StandardScaler().fit_transform(featurelist)
# Get first row of the featurelist
print(featurelist[0])
LFTlist = np.array(driver_LTV_nparrays).reshape(len(driver_LTV_nparrays))
# Check Shape
print(LFTlist.shape)
# training-testing split
train_features, test_features, train_labels, test_labels = train_test_split(featurelist, LFTlist, test_size = 0.25, random_state = 42)
# Check Shape
print('Training Features Shape: ', train_features.shape)
print('Training Labels Shape: ', train_labels.shape)
print('Testing Features Shape: ', test_features.shape)
print('Testing Labels Shape: ', test_labels.shape)
"""method1: random forest"""
rf = RandomForestRegressor(n_estimators = 1, random_state = 187)
rf.fit(train_features, train_labels)
#Make Predictions
#Use the forest's predict method on the test data
predictions = rf.predict(test_features)
#Calculate the absolute errors
errors = abs(predictions - test_labels)
#print out the mean absolute error
print('Mean Absolute Error:', round(np.mean(errors), 2), 'newton.')
# Calculate mean absolute percentage error
mape = 100 * (errors / test_labels)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
"""Linear Regression"""
clf = svm.SVR(kernel = "linear")
clf.fit(train_features,train_labels)
print(clf.score(train_features, train_labels))
errors = 0
pmax_pred = np.empty(test_labels.shape)
i = 0
for X,y in zip(test_features, test_labels):
print(f"Model: {clf.predict([X])[0]}, Actual: {y}")
errors += abs(clf.predict([X])[0] - y)
pmax_pred[i] = clf.predict([X])[0]
i += 1
errors = errors / test_features.shape[0]
print('Mean Absolute error:',round(errors, 2) , 'newton.')
# Calculate mean absolute percentage error
mape = 100 * (errors / test_labels)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
print(pmax_pred)
print(test_labels)
"""Generate graph for linear regression"""
from pandas import DataFrame
new = DataFrame(pmax_pred, test_labels)
new.to_csv("line_to_scatter_converter.csv")
mid =
|
pd.read_csv("line_to_scatter_converter.csv")
|
pandas.read_csv
|
from datetime import date, timedelta, datetime
import numpy as np
import pandas as pd
from dateutil.relativedelta import relativedelta
from sqlalchemy import func, or_, and_, text, column
from app import db
from app.context import get_import_date, get_import_id
from app.models import OckovaciMisto, Okres, Kraj, OckovaciMistoMetriky, CrMetriky, OckovaniRegistrace, Populace, \
PrakticiKapacity, OckovaniRezervace, OckovaniLide, Vakcina, ZdravotnickeStredisko, OckovaciZarizeni
def unique_nrpzs_subquery():
"""Returns unique NRPZS within all centers."""
return db.session.query(OckovaciMisto.nrpzs_kod) \
.group_by(OckovaciMisto.nrpzs_kod) \
.having(func.count(OckovaciMisto.nrpzs_kod) == 1)
def unique_nrpzs_active_subquery():
"""Returns unique NRPZS within active centers."""
return db.session.query(OckovaciMisto.nrpzs_kod) \
.filter(OckovaciMisto.status == True) \
.group_by(OckovaciMisto.nrpzs_kod) \
.having(func.count(OckovaciMisto.nrpzs_kod) == 1)
def has_unique_nrpzs(center_id):
res = db.session.query(func.count(OckovaciMisto.id)) \
.filter(OckovaciMisto.id == center_id) \
.filter(or_(and_(OckovaciMisto.status == True, OckovaciMisto.nrpzs_kod.in_(unique_nrpzs_active_subquery())),
and_(OckovaciMisto.status == False, OckovaciMisto.nrpzs_kod.in_(unique_nrpzs_subquery())))) \
.one()
return res[0] == 1
def find_kraj_options():
return db.session.query(Kraj.nazev, Kraj.id).order_by(Kraj.nazev).all()
def find_centers(filter_column, filter_value):
centers = db.session.query(OckovaciMisto.id, OckovaciMisto.nazev, Okres.nazev.label("okres"),
Kraj.nazev_kratky.label("kraj"), Kraj.id.label("kraj_id"), OckovaciMisto.longitude,
OckovaciMisto.latitude, OckovaciMisto.adresa, OckovaciMisto.status,
OckovaciMisto.bezbarierovy_pristup, OckovaciMisto.vekove_skupiny, OckovaciMisto.typ,
OckovaciMisto.davky, OckovaciMisto.vakciny,
OckovaciMistoMetriky.registrace_fronta, OckovaciMistoMetriky.registrace_prumer_cekani,
OckovaciMistoMetriky.ockovani_odhad_cekani,
OckovaciMistoMetriky.registrace_fronta_prumer_cekani,
OckovaciMistoMetriky.registrace_pred_zavorou) \
.join(OckovaciMistoMetriky) \
.outerjoin(Okres, OckovaciMisto.okres_id == Okres.id) \
.outerjoin(Kraj, Okres.kraj_id == Kraj.id) \
.filter(filter_column == filter_value) \
.filter(OckovaciMistoMetriky.datum == get_import_date()) \
.filter(or_(OckovaciMisto.status == True, OckovaciMistoMetriky.registrace_fronta > 0,
OckovaciMistoMetriky.rezervace_cekajici > 0, OckovaciMisto.typ == 'WALKIN')) \
.filter(OckovaciMisto.typ != 'AČR') \
.group_by(OckovaciMisto.id, OckovaciMisto.nazev, Okres.id, Kraj.id, OckovaciMisto.longitude,
OckovaciMisto.latitude, OckovaciMisto.adresa, OckovaciMisto.status,
OckovaciMisto.bezbarierovy_pristup, OckovaciMisto.vekove_skupiny, OckovaciMisto.typ,
OckovaciMisto.davky, OckovaciMisto.vakciny, OckovaciMistoMetriky.registrace_fronta,
OckovaciMistoMetriky.registrace_prumer_cekani, OckovaciMistoMetriky.ockovani_odhad_cekani,
OckovaciMistoMetriky.registrace_fronta_prumer_cekani, OckovaciMistoMetriky.registrace_pred_zavorou) \
.order_by(Kraj.nazev_kratky, Okres.nazev, OckovaciMisto.nazev) \
.all()
return centers
def find_third_doses_centers():
center_ids = db.session.query(OckovaniRezervace.ockovaci_misto_id) \
.distinct() \
.filter(OckovaniRezervace.kalendar_ockovani == 'V3') \
.all()
return [center[0] for center in center_ids]
def find_centers_vaccine_options():
return db.session.query(func.unnest(OckovaciMisto.vakciny).label('vyrobce')).order_by('vyrobce').distinct().all()
def find_doctor_offices(nrpzs_kod):
df = pd.read_sql_query(
f"""
select coalesce(z.zarizeni_nazev, min(s.nazev_cely)) zarizeni_nazev, o.nazev okres, k.nazev kraj,
k.nazev_kratky kraj_kratky, s.druh_zarizeni, s.obec, s.psc, s.ulice, s.cislo_domu, s.telefon, s.email,
s.web, s.latitude, s.longitude
from ockovaci_zarizeni z
full join zdravotnicke_stredisko s on s.nrpzs_kod = z.id
left join okresy o on o.id = coalesce(z.okres_id, s.okres_kod)
join kraje k on k.id = o.kraj_id
where z.id = '{nrpzs_kod}' or s.nrpzs_kod = '{nrpzs_kod}'
group by z.zarizeni_nazev, o.nazev, k.nazev, k.nazev_kratky, s.druh_zarizeni, s.obec, s.psc, s.ulice,
s.cislo_domu, s.telefon, s.email, s.web, s.latitude, s.longitude
""",
db.engine)
return df
NRPZS_PEDIATRICIAN_CODE = 321
def find_doctors(okres_id=None, kraj_id=None):
okres_id_sql = 'null' if okres_id is None else f"'{okres_id}'"
kraj_id_sql = 'null' if kraj_id is None else f"'{kraj_id}'"
df = pd.read_sql_query(
f"""
select z.id, z.zarizeni_nazev, o.nazev okres, o.kraj_id, k.nazev kraj, k.nazev_kratky kraj_kratky,
z.provoz_ukoncen, m.ockovani_pocet_davek, m.ockovani_pocet_davek_zmena_tyden, m.ockovani_vakciny_7,
count(n.nrpzs_kod) nabidky,
case when s.druh_zarizeni_kod = {NRPZS_PEDIATRICIAN_CODE} then true else false end pediatr
from ockovaci_zarizeni z
left join zdravotnicke_stredisko s on s.nrpzs_kod = z.id
left join okresy o on o.id = z.okres_id
join kraje k on k.id = o.kraj_id
left join zarizeni_metriky m on m.zarizeni_id = z.id and m.datum = '{get_import_date()}'
left join (
select left(zdravotnicke_zarizeni_kod, 11) nrpzs_kod
from praktici_kapacity n
where n.pocet_davek > 0 and (n.expirace is null or n.expirace >= '{get_import_date()}')
) n on n.nrpzs_kod = z.id
where prakticky_lekar = True
and (z.okres_id = {okres_id_sql} or {okres_id_sql} is null)
and (o.kraj_id = {kraj_id_sql} or {kraj_id_sql} is null)
group by z.id, z.zarizeni_nazev, o.nazev, o.kraj_id, k.nazev, k.nazev_kratky, z.provoz_ukoncen,
m.ockovani_pocet_davek, m.ockovani_pocet_davek_zmena_tyden, m.ockovani_vakciny_7, pediatr
order by k.nazev_kratky, o.nazev, z.zarizeni_nazev
""",
db.engine
)
df['ockovani_vakciny_7'] = df['ockovani_vakciny_7'].replace({None: ''})
df['provoz_ukoncen'] = df['provoz_ukoncen'].astype('bool')
df['ockovani_pocet_davek'] = df['ockovani_pocet_davek'].replace({np.nan: 0})
df['ockovani_pocet_davek_zmena_tyden'] = df['ockovani_pocet_davek_zmena_tyden'].replace({np.nan: 0})
return df
def find_doctors_map():
df = pd.read_sql_query(
f"""
select z.id, z.zarizeni_nazev, z.provoz_ukoncen, s.latitude, s.longitude, m.ockovani_pocet_davek,
m.ockovani_pocet_davek_zmena_tyden, m.ockovani_vakciny_7, count(n.nrpzs_kod) nabidky,
case when s.druh_zarizeni_kod = {NRPZS_PEDIATRICIAN_CODE} then true else false end pediatr
from ockovaci_zarizeni z
left join zdravotnicke_stredisko s on s.nrpzs_kod = z.id
left join zarizeni_metriky m on m.zarizeni_id = z.id and m.datum = '{get_import_date()}'
left join (
select left(zdravotnicke_zarizeni_kod, 11) nrpzs_kod
from praktici_kapacity n
where n.pocet_davek > 0 and (n.expirace is null or n.expirace >= '{get_import_date()}')
) n on n.nrpzs_kod = z.id
where prakticky_lekar = True
group by z.id, z.zarizeni_nazev, z.provoz_ukoncen, s.latitude, s.longitude, m.ockovani_pocet_davek,
m.ockovani_pocet_davek_zmena_tyden, m.ockovani_vakciny_7, pediatr
""",
db.engine
)
df['ockovani_vakciny_7'] = df['ockovani_vakciny_7'].replace({None: ''})
df['provoz_ukoncen'] = df['provoz_ukoncen'].astype('bool')
df['latitude'] = df['latitude'].replace({np.nan: None})
df['longitude'] = df['longitude'].replace({np.nan: None})
df['ockovani_pocet_davek'] = df['ockovani_pocet_davek'].replace({np.nan: 0})
df['ockovani_pocet_davek_zmena_tyden'] = df['ockovani_pocet_davek_zmena_tyden'].replace({np.nan: 0})
return df
def find_doctors_vaccine_options():
return db.session.query(Vakcina.vyrobce) \
.join(OckovaniLide, Vakcina.vakcina == OckovaniLide.vakcina) \
.distinct(Vakcina.vyrobce) \
.order_by(Vakcina.vyrobce) \
.all()
def find_free_vaccines_available(nrpzs_kod=None, okres_id=None, kraj_id=None):
return db.session.query(PrakticiKapacity.datum_aktualizace, PrakticiKapacity.pocet_davek,
PrakticiKapacity.typ_vakciny, PrakticiKapacity.mesto, PrakticiKapacity.nazev_ordinace,
PrakticiKapacity.deti, PrakticiKapacity.dospeli, PrakticiKapacity.kontakt_tel,
PrakticiKapacity.kontakt_email, PrakticiKapacity.expirace, PrakticiKapacity.poznamka,
PrakticiKapacity.kraj, ZdravotnickeStredisko.nrpzs_kod, ZdravotnickeStredisko.latitude,
ZdravotnickeStredisko.longitude) \
.outerjoin(ZdravotnickeStredisko, ZdravotnickeStredisko.zdravotnicke_zarizeni_kod == PrakticiKapacity.zdravotnicke_zarizeni_kod) \
.filter(or_(func.left(PrakticiKapacity.zdravotnicke_zarizeni_kod, 11) == nrpzs_kod, nrpzs_kod is None)) \
.filter(or_(ZdravotnickeStredisko.okres_kod == okres_id, okres_id is None)) \
.filter(or_(ZdravotnickeStredisko.kraj_kod == kraj_id, kraj_id is None)) \
.filter(PrakticiKapacity.pocet_davek > 0) \
.filter(or_(PrakticiKapacity.expirace == None, PrakticiKapacity.expirace >= get_import_date())) \
.order_by(PrakticiKapacity.kraj, PrakticiKapacity.mesto, PrakticiKapacity.nazev_ordinace,
PrakticiKapacity.typ_vakciny) \
.all()
def find_free_vaccines_vaccine_options():
return db.session.query(PrakticiKapacity.typ_vakciny) \
.filter(PrakticiKapacity.pocet_davek > 0) \
.filter(or_(PrakticiKapacity.expirace == None, PrakticiKapacity.expirace >= get_import_date())) \
.distinct(PrakticiKapacity.typ_vakciny) \
.order_by(PrakticiKapacity.typ_vakciny) \
.all()
def count_vaccines_center(center_id):
mista = pd.read_sql_query(
"""
select ockovaci_mista.id ockovaci_misto_id, ockovaci_mista.nazev, okres_id, kraj_id
from ockovaci_mista join okresy on ockovaci_mista.okres_id=okresy.id
where ockovaci_mista.id='{}';
""".format(center_id),
db.engine
)
prijato = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pocet_davek) prijato
from ockovani_distribuce
where akce = 'Příjem' and ockovaci_misto_id = '{}' and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(center_id, get_import_date()),
db.engine
)
prijato_odjinud = pd.read_sql_query(
"""
select cilove_ockovaci_misto_id ockovaci_misto_id, vyrobce, sum(pocet_davek) prijato_odjinud
from ockovani_distribuce
where akce = 'Výdej' and cilove_ockovaci_misto_id = '{}' and datum < '{}'
group by (cilove_ockovaci_misto_id, vyrobce);
""".format(center_id, get_import_date()),
db.engine
)
vydano = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pocet_davek) vydano
from ockovani_distribuce
where akce = 'Výdej' and ockovaci_misto_id = '{}' and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(center_id, get_import_date()),
db.engine
)
spotreba = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pouzite_davky) pouzito, sum(znehodnocene_davky) znehodnoceno
from ockovani_spotreba
where ockovaci_misto_id = '{}' and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(center_id, get_import_date()),
db.engine
)
vyrobci = pd.read_sql_query("select vyrobce from vakciny;", db.engine)
mista_key = mista
mista_key['join'] = 0
vyrobci_key = vyrobci
vyrobci_key['join'] = 0
df = mista_key.merge(vyrobci_key).drop('join', axis=1)
df = pd.merge(df, prijato, how="left")
df = pd.merge(df, prijato_odjinud, how="left")
df = pd.merge(df, vydano, how="left")
df = pd.merge(df, spotreba, how="left")
df['prijato'] = df['prijato'].fillna(0).astype('int')
df['prijato_odjinud'] = df['prijato_odjinud'].fillna(0).astype('int')
df['vydano'] = df['vydano'].fillna(0).astype('int')
df['pouzito'] = df['pouzito'].fillna(0).astype('int')
df['znehodnoceno'] = df['znehodnoceno'].fillna(0).astype('int')
df['prijato_celkem'] = df['prijato'] + df['prijato_odjinud'] - df['vydano']
df['skladem'] = df['prijato_celkem'] - df['pouzito'] - df['znehodnoceno']
df = df.groupby(by=['vyrobce'], as_index=False).sum().sort_values(by=['vyrobce'])
df = df[(df['prijato_celkem'] > 0) | (df['pouzito'] > 0) | (df['znehodnoceno'] > 0)]
return df
def count_vaccines_kraj(kraj_id):
mista = pd.read_sql_query(
"""
select ockovaci_mista.id ockovaci_misto_id, ockovaci_mista.nazev, kraj_id
from ockovaci_mista join okresy on ockovaci_mista.okres_id=okresy.id
where kraj_id='{}';
""".format(kraj_id),
db.engine
)
mista_ids = ','.join("'" + misto + "'" for misto in mista['ockovaci_misto_id'].tolist())
prijato = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pocet_davek) prijato
from ockovani_distribuce
where akce = 'Příjem' and ockovaci_misto_id in ({}) and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(mista_ids, get_import_date()),
db.engine
)
prijato_odjinud = pd.read_sql_query(
"""
select cilove_ockovaci_misto_id ockovaci_misto_id, vyrobce, sum(pocet_davek) prijato_odjinud
from ockovani_distribuce
where akce = 'Výdej' and cilove_ockovaci_misto_id in ({}) and ockovaci_misto_id not in({}) and datum < '{}'
group by (cilove_ockovaci_misto_id, vyrobce);
""".format(mista_ids, mista_ids, get_import_date()),
db.engine
)
vydano = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(pocet_davek) vydano
from ockovani_distribuce
where akce = 'Výdej' and ockovaci_misto_id in ({}) and cilove_ockovaci_misto_id not in({}) and cilove_ockovaci_misto_id != '-' and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(mista_ids, mista_ids, get_import_date()),
db.engine
)
spotreba = pd.read_sql_query(
"""
select ockovaci_misto_id, vyrobce, sum(znehodnocene_davky) znehodnoceno
from ockovani_spotreba
where ockovaci_misto_id in ({}) and datum < '{}'
group by (ockovaci_misto_id, vyrobce);
""".format(mista_ids, get_import_date()),
db.engine
)
ockovano = pd.read_sql_query(
"""
select kraj_nuts_kod kraj_id, sum(pocet) ockovano, vyrobce
from ockovani_lide
join vakciny on vakciny.vakcina = ockovani_lide.vakcina
where kraj_nuts_kod = '{}' and datum < '{}'
group by kraj_nuts_kod, vyrobce
""".format(kraj_id, get_import_date()),
db.engine
)
vyrobci = pd.read_sql_query("select vyrobce from vakciny;", db.engine)
mista_key = mista
mista_key['join'] = 0
vyrobci_key = vyrobci
vyrobci_key['join'] = 0
df = mista_key.merge(vyrobci_key).drop('join', axis=1)
df = pd.merge(df, prijato, how="left")
df = pd.merge(df, prijato_odjinud, how="left")
df = pd.merge(df, vydano, how="left")
df = pd.merge(df, spotreba, how="left")
df['prijato'] = df['prijato'].fillna(0).astype('int')
df['prijato_odjinud'] = df['prijato_odjinud'].fillna(0).astype('int')
df['vydano'] = df['vydano'].fillna(0).astype('int')
df['znehodnoceno'] = df['znehodnoceno'].fillna(0).astype('int')
df = df.groupby(by=['kraj_id', 'vyrobce'], as_index=False).sum()
df = pd.merge(df, ockovano, how="left")
df['ockovano'] = df['ockovano'].fillna(0).astype('int')
df['prijato_celkem'] = df['prijato'] + df['prijato_odjinud'] - df['vydano']
df['skladem'] = df['prijato_celkem'] - df['ockovano'] - df['znehodnoceno']
df = df.groupby(by=['vyrobce'], as_index=False).sum().sort_values(by=['vyrobce'])
df = df[(df['prijato_celkem'] > 0) | (df['ockovano'] > 0) | (df['znehodnoceno'] > 0)]
return df
def count_vaccines_cr():
prijato = pd.read_sql_query(
"""
select vyrobce, sum(pocet_davek) prijato
from ockovani_distribuce
where akce = 'Příjem' and datum < '{}'
group by (vyrobce);
""".format(get_import_date()),
db.engine
)
spotreba = pd.read_sql_query(
"""
select vyrobce, sum(znehodnocene_davky) znehodnoceno
from ockovani_spotreba
where datum < '{}'
group by (vyrobce);
""".format(get_import_date()),
db.engine
)
ockovano = pd.read_sql_query(
"""
select sum(pocet) ockovano, vyrobce
from ockovani_lide
join vakciny on vakciny.vakcina = ockovani_lide.vakcina
where datum < '{}'
group by vyrobce
""".format(get_import_date()),
db.engine
)
vyrobci = pd.read_sql_query("select vyrobce from vakciny;", db.engine)
df = pd.merge(vyrobci, prijato, how="left")
df = pd.merge(df, spotreba, how="left")
df['prijato'] = df['prijato'].fillna(0).astype('int')
df['znehodnoceno'] = df['znehodnoceno'].fillna(0).astype('int')
df = df.groupby(by=['vyrobce'], as_index=False).sum()
df = pd.merge(df, ockovano, how="left")
df['ockovano'] = df['ockovano'].fillna(0).astype('int')
df['prijato_celkem'] = df['prijato']
df['skladem'] = df['prijato_celkem'] - df['ockovano'] - df['znehodnoceno']
df = df.groupby(by=['vyrobce'], as_index=False).sum().sort_values(by=['vyrobce'])
df = df[(df['prijato_celkem'] > 0) | (df['ockovano'] > 0) | (df['znehodnoceno'] > 0)]
return df
def count_registrations(filter_column, filter_value):
mista = pd.read_sql_query(
"""
select ockovaci_mista.id ockovaci_misto_id from ockovaci_mista join okresy on ockovaci_mista.okres_id=okresy.id
where {}='{}';
""".format(filter_column, filter_value),
db.engine
)
mista_ids = ','.join("'" + misto + "'" for misto in mista['ockovaci_misto_id'].tolist())
if len(mista_ids) == 0:
return pd.DataFrame()
df = pd.read_sql_query(
f"""
select *
from ockovani_registrace
where import_id = {get_import_id()} and ockovaci_misto_id in({mista_ids})
and datum + '90 days'::interval >= '{get_import_date()}'
""",
db.engine
)
if df.empty:
return df
df['dnes'] = get_import_date()
df['datum_rezervace_fix'] = df['datum_rezervace'].where(df['datum_rezervace'] != date(1970, 1, 1))
df['fronta_pocet'] = df[['pocet']].where(df['rezervace'] == False).fillna(0).astype('int')
df['fronta_cekani'] = (df['dnes'] - df['datum']).astype('timedelta64[ns]').dt.days
df['fronta_pocet_x_cekani'] = df['fronta_pocet'] * df['fronta_cekani']
df['s_terminem_pocet'] = df[['pocet']].where((df['rezervace'] == True) & (df['ockovani'] < 1)).fillna(0).astype('int')
df['registrace_7'] = df[['pocet']].where(df['datum'] >= get_import_date() - timedelta(7))
df['registrace_7_rez'] = df[['pocet']].where((df['rezervace'] == True) & (df['datum'] >= get_import_date() - timedelta(7)))
# df['registrace_14'] = df[['pocet']].where(df['datum'] >= get_import_date() - timedelta(14))
# df['registrace_14_rez'] = df[['pocet']].where((df['rezervace'] == True) & (df['datum'] >= get_import_date() - timedelta(14)))
# df['registrace_30'] = df[['pocet']].where(df['datum'] >= get_import_date() - timedelta(30))
# df['registrace_30_rez'] = df[['pocet']].where((df['rezervace'] == True) & (df['datum'] >= get_import_date() - timedelta(30)))
df['rezervace_7_cekani'] = (df['datum_rezervace_fix'] - df['datum']).astype('timedelta64[ns]').dt.days
df['rezervace_7_pocet'] = df[['pocet']].where((df['rezervace'] == True) & (df['datum_rezervace_fix'] >= get_import_date() - timedelta(7)))
df['rezervace_7_pocet_x_cekani'] = df['rezervace_7_cekani'] * df['rezervace_7_pocet']
df['rezervace_14_pocet'] = df[['pocet']].where((df['rezervace'] == True) & (df['datum_rezervace_fix'] >= get_import_date() - timedelta(14)))
df = df.groupby(['vekova_skupina', 'povolani']).sum()
df['uspesnost_7'] = ((df['registrace_7_rez'] / df['registrace_7']) * 100).replace({np.nan: None})
# df['uspesnost_14'] = ((df['registrace_14_rez'] / df['registrace_14']) * 100).replace({np.nan: None})
# df['uspesnost_30'] = ((df['registrace_30_rez'] / df['registrace_30']) * 100).replace({np.nan: None})
df['registrace_7'] = df['registrace_7'].astype('int')
df['registrace_7_rez'] = df['registrace_7_rez'].astype('int')
# df['registrace_14'] = df['registrace_14'].astype('int')
# df['registrace_14_rez'] = df['registrace_14_rez'].astype('int')
# df['registrace_30'] = df['registrace_30'].astype('int')
# df['registrace_30_rez'] = df['registrace_30_rez'].astype('int')
df['fronta_prumer_cekani'] = ((df['fronta_pocet_x_cekani'] / df['fronta_pocet']) / 7).replace({np.nan: None})
df['rezervace_prumer_cekani'] = ((df['rezervace_7_pocet_x_cekani'] / df['rezervace_7_pocet']) / 7).replace({np.nan: None})
df = df[(df['fronta_pocet'] > 0) | df['fronta_prumer_cekani'].notnull() | df['rezervace_prumer_cekani'].notnull() | df['uspesnost_7'].notnull()]
return df.reset_index().sort_values(by=['vekova_skupina', 'povolani'])
def count_vaccinated(kraj_id=None):
ockovani = pd.read_sql_query(
"""
select vekova_skupina, coalesce(sum(pocet) filter(where poradi_davky = 1), 0) pocet_ockovani_castecne,
coalesce(sum(pocet) filter(where poradi_davky = davky), 0) pocet_ockovani_plne,
coalesce(sum(pocet) filter(where poradi_davky = 3), 0) pocet_ockovani_posilujici
from ockovani_lide o
join vakciny v on v.vakcina = o.vakcina
where datum < '{}' and (kraj_bydl_nuts = '{}' or {})
group by vekova_skupina
order by vekova_skupina
""".format(get_import_date(), kraj_id, kraj_id is None),
db.engine
)
if kraj_id is not None:
mista = pd.read_sql_query(
"""
select ockovaci_mista.id ockovaci_misto_id from ockovaci_mista join okresy on ockovaci_mista.okres_id=okresy.id
where kraj_id='{}';
""".format(kraj_id),
db.engine
)
mista_ids = ','.join("'" + misto + "'" for misto in mista['ockovaci_misto_id'].tolist())
else:
mista_ids = "''"
registrace = pd.read_sql_query(
"""
select vekova_skupina, sum(pocet) filter (where rezervace = false and ockovani < 1) pocet_fronta,
sum(pocet) filter (where rezervace = true and ockovani < 1) pocet_s_terminem
from ockovani_registrace
where import_id = {} and (ockovaci_misto_id in({}) or {})
group by vekova_skupina
""".format(get_import_id(), mista_ids, kraj_id is None),
db.engine
)
populace = pd.read_sql_query(
"""
select vekova_skupina, sum(pocet) pocet_vek, min_vek
from populace p
join populace_kategorie k on (k.min_vek <= vek and k.max_vek >= vek)
where orp_kod = '{}'
group by vekova_skupina
""".format('CZ0' if kraj_id is None else kraj_id),
db.engine
)
ockovani['vekova_skupina'] = ockovani['vekova_skupina'].replace(['nezařazeno'], 'neuvedeno')
merged = pd.merge(ockovani, registrace, how="left")
merged = pd.merge(merged, populace, how="left")
merged['pocet_fronta'] = merged['pocet_fronta'].fillna(0).astype('int')
merged['pocet_s_terminem'] = merged['pocet_s_terminem'].fillna(0).astype('int')
merged['podil_ockovani_castecne'] = (merged['pocet_ockovani_castecne'] / merged['pocet_vek']).fillna(0)
merged['podil_ockovani_plne'] = (merged['pocet_ockovani_plne'] / merged['pocet_vek']).fillna(0)
merged['podil_ockovani_posilujici'] = (merged['pocet_ockovani_posilujici'] / merged['pocet_vek']).fillna(0)
merged['zajem'] = ((merged['pocet_fronta'] + merged['pocet_s_terminem'] + merged['pocet_ockovani_castecne'])
/ merged['pocet_vek']).replace({np.nan: None})
return merged
def count_vaccinated_category():
df = pd.read_sql_query(
"""
select indikace_zdravotnik, indikace_socialni_sluzby, indikace_ostatni, indikace_pedagog,
indikace_skolstvi_ostatni, indikace_bezpecnostni_infrastruktura, indikace_chronicke_onemocneni,
coalesce(sum(pocet) filter(where poradi_davky = 1), 0) pocet_ockovani_castecne,
coalesce(sum(pocet) filter(where poradi_davky = davky), 0) pocet_ockovani_plne
from ockovani_lide o
join vakciny v on v.vakcina = o.vakcina
group by indikace_zdravotnik, indikace_socialni_sluzby, indikace_ostatni, indikace_pedagog,
indikace_skolstvi_ostatni, indikace_bezpecnostni_infrastruktura, indikace_chronicke_onemocneni
""",
db.engine
)
df['bez_indikace'] = ~(df['indikace_zdravotnik'] | df['indikace_socialni_sluzby'] | df['indikace_ostatni']
| df['indikace_pedagog'] | df['indikace_skolstvi_ostatni']
| df['indikace_bezpecnostni_infrastruktura'] | df['indikace_chronicke_onemocneni'])
df = df.melt(id_vars=['pocet_ockovani_castecne', 'pocet_ockovani_plne'],
value_vars=['bez_indikace', 'indikace_zdravotnik', 'indikace_socialni_sluzby', 'indikace_ostatni',
'indikace_pedagog', 'indikace_skolstvi_ostatni', 'indikace_bezpecnostni_infrastruktura',
'indikace_chronicke_onemocneni'],
var_name='indikace', value_name='aktivni')
df = df[df['aktivni'] == True].groupby(['indikace']).sum()
labels = {
'bez_indikace': ['bez indikace', ''],
'indikace_zdravotnik': [
'Zdravotník',
'''Zdravotničtí pracovníci (zejména nemocnice, ZZS, primární ambulantní péče, farmaceuti, laboratoře
vyšetřující COVID-19, zdravotníci v sociálních službách), oblast ochrany veřejného zdraví.'''
],
'indikace_socialni_sluzby': ['Sociální služby', 'Pracovníci nebo klienti v sociálních službách.'],
'indikace_ostatni': [
'Ostatní',
'''Pracovníci kritické infrastruktury, kteří zahrnují integrovaný záchranný systém, pracovníky energetiky,
vládu a krizové štáby (osoba není začleněna v indikačních skupinách zdravotník nebo sociální služby).'''
],
'indikace_pedagog': ['Pedagog', 'Pedagogičtí pracovníci.'],
'indikace_skolstvi_ostatni': ['Školství ostatní', 'Ostatní pracovníci ve školství.'],
'indikace_bezpecnostni_infrastruktura': [
'Bezpečnostní infrastruktura',
'Zaměstnanci Ministerstva obrany nebo bezpečnostní sbory.'
],
'indikace_chronicke_onemocneni': [
'Chronické onemocnění',
'''Chronicky nemocní (hematoonkologické onemocnění, onkologické onemocnění (solidní nádory), závažné akutní
nebo dlouhodobé onemocnění srdce, závažné dlouhodobé onemocnění plic, diabetes mellitus, obezita, závažné
dlouhodobé onemocnění ledvin, závažné dlouhodobé onemocnění jater, stav po transplantaci nebo na čekací
listině, hypertenze, závažné neurologické nebo neuromuskulární onemocnění, vrozený nebo získaný kognitivní
deficit, vzácné genetické onemocnění, závažné oslabení imunitního systému, jiné závažné onemocnění).'''],
}
labels_df = pd.DataFrame.from_dict(labels, orient='index', columns=['kategorie', 'popis'])
df = pd.merge(df, labels_df, how='outer', left_on='indikace', right_index=True)
return df.dropna().sort_values(by=['pocet_ockovani_plne'], ascending=False)
def count_reservations_category():
ockovani_skupiny = pd.read_sql_query(
"""
select povolani kategorie, sum(case when rezervace is false and ockovani < 1 then pocet else 0 end) cekajici,
sum(case when rezervace is true and ockovani < 1 then pocet else 0 end) s_terminem,
sum(case when ockovani = 1 then pocet else 0 end) ockovani, sum(pocet) celkem
from ockovani_registrace where import_id={} group by povolani order by sum(pocet) desc
""".format(get_import_id()),
db.engine
)
return ockovani_skupiny
def count_supplies():
df = pd.read_sql_query('select datum, vyrobce, pocet from dodavky_vakcin where pocet > 0', db.engine)
df = df.pivot_table(values='pocet', index='datum', columns='vyrobce', aggfunc='sum')
return df.fillna(0)
def count_end_date_category():
df = pd.read_sql_query(
"""
select vekova_skupina, min(datum) mesic from (
select min(vekova_skupina) vekova_skupina, sum(pocet) * 0.7 populace
from populace_kategorie
join populace on vek >= min_vek and orp_kod = 'CZ0'
where min_vek >= 18
group by vekova_skupina
) t1
join (
select datum, sum(pocet) over (order by datum rows between unbounded preceding and current row) celkem
from (
select datum, sum(pocet / davky) as pocet
from dodavky_vakcin d
join vakciny v on (d.vyrobce = v.vyrobce)
group by datum
) t3
) t2
on populace <= celkem
group by vekova_skupina
order by vekova_skupina
""",
db.engine
)
return df.set_index('mesic')
def count_end_date_vaccinated():
metrics = db.session.query(CrMetriky.ockovani_pocet_castecne_zmena_tyden, CrMetriky.ockovani_pocet_castecne,
CrMetriky.ockovani_pocet_plne_zmena_tyden, CrMetriky.ockovani_pocet_plne,
CrMetriky.pocet_obyvatel_celkem) \
.filter(CrMetriky.datum == get_import_date()) \
.one()
if metrics is None or metrics.ockovani_pocet_castecne_zmena_tyden is None \
or CrMetriky.ockovani_pocet_plne_zmena_tyden is None:
return None
population = metrics.pocet_obyvatel_celkem
population_to_vaccinate = population * 0.7
days = (7 * (2 * population_to_vaccinate - metrics.ockovani_pocet_castecne - metrics.ockovani_pocet_plne)) \
/ (metrics.ockovani_pocet_castecne_zmena_tyden + metrics.ockovani_pocet_plne_zmena_tyden)
return get_import_date() + timedelta(days=days)
def count_end_date_supplies():
metrics = db.session.query(CrMetriky.pocet_obyvatel_celkem) \
.filter(CrMetriky.datum == get_import_date()) \
.one()
if metrics is None:
return None
population_to_vaccinate = metrics.pocet_obyvatel_celkem * 0.7
end_date = db.session.query(column('datum')).from_statement(text(
f"""
select datum from (
select datum, sum(pocet) over (order by datum rows between unbounded preceding and current row) as celkem_lidi
from (
select datum, sum(pocet / davky) as pocet
from dodavky_vakcin d
join vakciny v on (d.vyrobce = v.vyrobce)
group by datum
) t1
) t2
where celkem_lidi > {population_to_vaccinate}
order by datum
limit 1
"""
)).one_or_none()
if end_date is None:
return None
months = ['ledna', 'února', 'března', 'dubna', 'května', 'června', 'července', 'srpna', 'září', 'října',
'listopadu', 'prosince']
return months[end_date[0].month - 1] + end_date[0].strftime(" %Y")
def count_end_date_interested():
metrics = db.session.query(CrMetriky.ockovani_pocet_castecne, CrMetriky.ockovani_pocet_plne,
CrMetriky.ockovani_pocet_davek_zmena_tyden) \
.filter(CrMetriky.datum == get_import_date()) \
.one()
if metrics is None or metrics.ockovani_pocet_davek_zmena_tyden is None:
return None
registrations_waiting = db.session.query(func.sum(OckovaniRegistrace.pocet)) \
.filter(OckovaniRegistrace.ockovani < 1) \
.filter(OckovaniRegistrace.import_id == get_import_id()) \
.one()
waiting_count = registrations_waiting[0] * 2 + metrics.ockovani_pocet_castecne - metrics.ockovani_pocet_plne
days = (7 * waiting_count) / metrics.ockovani_pocet_davek_zmena_tyden
return get_import_date() + timedelta(days=days)
def count_eligible():
eligible_population = db.session.query(func.sum(Populace.pocet)) \
.filter(Populace.vek >= 12) \
.filter(Populace.orp_kod == "CZ0") \
.one()
return eligible_population[0]
def count_interest():
metrics = db.session.query(CrMetriky.ockovani_pocet_castecne, CrMetriky.pocet_obyvatel_celkem) \
.filter(CrMetriky.datum == get_import_date()) \
.one()
registrations_waiting = db.session.query(func.sum(OckovaniRegistrace.pocet)) \
.filter(OckovaniRegistrace.ockovani < 1) \
.filter(OckovaniRegistrace.import_id == get_import_id()) \
.one()
interest_eligible = (metrics.ockovani_pocet_castecne + registrations_waiting[0]) / count_eligible()
interest_all = (metrics.ockovani_pocet_castecne + registrations_waiting[0]) / metrics.pocet_obyvatel_celkem
return interest_all, interest_eligible
def count_free_slots(center_id=None):
rezervace_1 = pd.read_sql_query(
"""
select datum, volna_kapacita volna_kapacita_1, maximalni_kapacita maximalni_kapacita_1
from ockovani_rezervace
where ockovaci_misto_id = '{}' and datum >= '{}' and kalendar_ockovani = 'V1' and maximalni_kapacita != 0
order by datum
""".format(center_id, get_import_date()),
db.engine
)
rezervace_3 = pd.read_sql_query(
"""
select datum, volna_kapacita volna_kapacita_3, maximalni_kapacita maximalni_kapacita_3
from ockovani_rezervace
where ockovaci_misto_id = '{}' and datum >= '{}' and kalendar_ockovani = 'V3' and maximalni_kapacita != 0
order by datum
""".format(center_id, get_import_date()),
db.engine
)
if rezervace_1.empty and rezervace_3.empty:
return rezervace_1
elif rezervace_1.empty:
rezervace = rezervace_3
elif rezervace_3.empty:
rezervace = rezervace_1
else:
rezervace = pd.merge(rezervace_1, rezervace_3, how='outer', on='datum')
rezervace = rezervace.set_index('datum')
idx = pd.date_range(rezervace.index.min(), rezervace.index.max())
return rezervace.reindex(idx).fillna(0)
def count_vaccinated_week():
return db.session.query(column('datum'), column('pocet_1'), column('pocet_2'), column('pocet_3'), column('pocet_celkem')).from_statement(text(
f"""
select datum,
sum(case when poradi_davky = 1 then pocet else 0 end) pocet_1,
sum(case when poradi_davky = 2 then pocet else 0 end) pocet_2,
sum(case when poradi_davky = 3 then pocet else 0 end) pocet_3,
sum(pocet) pocet_celkem
from ockovani_lide
where datum >= '{get_import_date() - timedelta(10)}'
group by datum
order by datum
"""
)).all()
def count_top_centers():
return db.session.query(column('zarizeni_nazev'), column('pocet')).from_statement(text(
f"""
select zarizeni_nazev, sum(pocet) pocet
from ockovani_lide
where datum >= '{get_import_date() - timedelta(7)}'
group by zarizeni_nazev
order by pocet desc
limit 10;
"""
)).all()
def count_vaccinated_unvaccinated_comparison():
populace = pd.read_sql_query("select sum(pocet) populace from populace where orp_kod = 'CZ0'", db.engine)
ockovani = pd.read_sql_query(
f"""
select datum,
sum(case when poradi_davky = 1 then pocet else 0 end) populace_ockovani,
sum(case when poradi_davky = v.davky then pocet else 0 end) populace_plne,
sum(case when poradi_davky = 3 then pocet else 0 end) populace_posilujici
from ockovani_lide o
join vakciny v on (o.vakcina = v.vakcina)
where datum < '{get_import_date()}'
group by datum
order by datum
""",
db.engine
)
ockovani[['populace_ockovani', 'populace_plne', 'populace_posilujici']] = \
ockovani[['populace_ockovani', 'populace_plne', 'populace_posilujici']].transform(pd.Series.cumsum)
srovnani = pd.read_sql_query(
f"""
select n.datum,
n.celkem nakazeni_celkem,
n.bez_ockovani nakazeni_bez, n.bez_ockovani_vek_prumer nakazeni_bez_vek,
n.nedokoncene_ockovani nakazeni_castecne, n.nedokoncene_ockovani_vek_prumer nakazeni_castecne_vek,
n.dokoncene_ockovani nakazeni_plne, n.dokoncene_ockovani_vek_prumer nakazeni_plne_vek,
n.posilujici_davka nakazeni_posilujici, n.posilujici_davka_vek_prumer nakazeni_posilujici_vek,
h.celkem hospitalizace_celkem,
h.bez_ockovani hospitalizace_bez, h.bez_ockovani_vek_prumer hospitalizace_bez_vek,
h.nedokoncene_ockovani hospitalizace_castecne, h.nedokoncene_ockovani_vek_prumer hospitalizace_castecne_vek,
h.dokoncene_ockovani hospitalizace_plne, h.dokoncene_ockovani_vek_prumer hospitalizace_plne_vek,
h.posilujici_davka hospitalizace_posilujici, h.posilujici_davka_vek_prumer hospitalizace_posilujici_vek,
j.celkem hospitalizace_jip_celkem,
j.bez_ockovani hospitalizace_jip_bez, j.bez_ockovani_vek_prumer hospitalizace_jip_bez_vek,
j.nedokoncene_ockovani hospitalizace_jip_castecne, j.nedokoncene_ockovani_vek_prumer hospitalizace_jip_castecne_vek,
j.dokoncene_ockovani hospitalizace_jip_plne, j.dokoncene_ockovani_vek_prumer hospitalizace_jip_plne_vek,
j.posilujici_davka hospitalizace_jip_posilujici, j.posilujici_davka_vek_prumer hospitalizace_jip_posilujici_vek,
u.celkem umrti_celkem,
u.bez_ockovani umrti_bez, u.bez_ockovani_vek_prumer umrti_bez_vek,
u.nedokoncene_ockovani umrti_castecne, u.nedokoncene_ockovani_vek_prumer umrti_castecne_vek,
u.dokoncene_ockovani umrti_plne, u.dokoncene_ockovani_vek_prumer umrti_plne_vek,
u.posilujici_davka umrti_posilujici, u.posilujici_davka_vek_prumer umrti_posilujici_vek
from nakazeni_ockovani n
join hospitalizace_ockovani h on h.datum = n.datum
join hospitalizace_jip_ockovani j on j.datum = n.datum
join umrti_ockovani u on u.datum = n.datum
where n.datum < '{get_import_date()}'
""",
db.engine
)
ockovani['key'] = 0
populace['key'] = 0
df = pd.merge(ockovani, populace)
df = df.drop(columns=['key'])
df['populace_bez'] = df['populace'] - df['populace_ockovani']
df['populace_castecne'] = df['populace_ockovani'] - df['populace_plne']
df['populace_plne'] = df['populace_plne'] - df['populace_posilujici']
df = pd.merge(df, srovnani)
df_vek = df.copy()
datasets = ['nakazeni', 'hospitalizace', 'hospitalizace_jip', 'umrti']
groups = ['bez', 'castecne', 'plne', 'posilujici']
for d in datasets:
for g in groups:
df_vek[d + '_' + g + '_mult'] = df_vek[d + '_' + g] * df_vek[d + '_' + g + '_vek']
df_vek = df_vek.rolling(7, on='datum').sum()
for d in datasets:
for g in groups:
df_vek[d + '_' + g + '_vek'] = df_vek[d + '_' + g + '_mult'] / df_vek[d + '_' + g]
df_vek[d + '_' + g + '_vek'] = df_vek[d + '_' + g + '_vek'].replace({np.nan: None})
for d in datasets:
df_vek[d + '_celkem_vek'] = sum([df_vek[d + '_' + g + '_mult'] for g in groups]) / df_vek[d + '_celkem']
df_vek[d + '_celkem_vek'] = df_vek[d + '_celkem_vek'].replace({np.nan: None})
for g in groups:
df_vek['populace_' + g + '_zastoupeni'] = df_vek['populace_' + g] / df_vek['populace']
return df_vek
def count_vaccinated_unvaccinated_comparison_age():
populace = pd.read_sql_query(
"""
select vekova_skupina, sum(pocet) populace, min_vek
from populace p
join populace_kategorie k on (k.min_vek <= vek and k.max_vek >= vek)
where orp_kod = 'CZ0'
group by vekova_skupina
""",
db.engine)
srovnani = pd.read_sql_query(
f"""
select n.tyden, n.tyden_od, n.tyden_do, n.vekova_skupina,
nakazeni_celkem, nakazeni_bez, nakazeni_castecne, nakazeni_plne, nakazeni_posilujici,
hospitalizace_celkem, hospitalizace_bez, hospitalizace_castecne, hospitalizace_plne, hospitalizace_posilujici,
hospitalizace_jip_celkem, hospitalizace_jip_bez, hospitalizace_jip_castecne, hospitalizace_jip_plne, hospitalizace_jip_posilujici
from nakazeni_ockovani_vek n
left join hospitalizace_ockovani_vek h on (h.tyden = n.tyden and h.vekova_skupina = n.vekova_skupina)
left join hospitalizace_jip_ockovani_vek j on (j.tyden = n.tyden and j.vekova_skupina = n.vekova_skupina)
where n.tyden_do < '{get_import_date()}'
""",
db.engine
)
srovnani_max_datum = srovnani['tyden_od'].max()
srovnani = srovnani[srovnani['tyden_od'] == srovnani_max_datum]
srovnani['vekova_skupina'] = srovnani['vekova_skupina'].str.replace(' let', '') \
.replace({'80-84': '80+', '85-89': '80+', '90+': '80+'})
srovnani = srovnani.groupby(['tyden', 'tyden_od', 'tyden_do', 'vekova_skupina']).sum().reset_index()
ockovani = pd.read_sql_query(
f"""
select vekova_skupina,
sum(case when poradi_davky = 1 then pocet else 0 end) populace_ockovani,
sum(case when poradi_davky = v.davky then pocet else 0 end) populace_plne,
sum(case when poradi_davky = 3 then pocet else 0 end) populace_posilujici
from ockovani_lide o
join vakciny v on (o.vakcina = v.vakcina)
where datum < '{srovnani_max_datum}'
group by vekova_skupina
""",
db.engine
)
ockovani = ockovani.groupby(['vekova_skupina']).sum().reset_index()
df = pd.merge(ockovani, populace)
df = pd.merge(df, srovnani)
df['vekova_skupina'] = df['vekova_skupina'].replace(
{'12-15': '12-17', '16-17': '12-17', '18-24': '18-29', '25-29': '18-29', '30-34': '30-39', '35-39': '30-39',
'40-44': '40-49', '45-49': '40-49', '50-54': '50-59', '55-59': '50-59', '60-64': '60-69', '65-69': '60-69',
'70-74': '70-79', '75-79': '70-79'})
df = df.groupby(['tyden', 'tyden_od', 'tyden_do', 'vekova_skupina']).sum().reset_index()
df['populace_bez'] = df['populace'] - df['populace_ockovani']
df['populace_plne'] = df['populace_plne'] - df['populace_posilujici']
df_norm = df.copy()
datasets = ['nakazeni', 'hospitalizace', 'hospitalizace_jip']
groups = ['bez', 'plne', 'posilujici']
for g in groups:
df_norm['populace_' + g + '_zastoupeni'] = df_norm['populace_' + g] / df_norm['populace']
for d in datasets:
for g in groups:
df_norm[d + '_' + g + '_norm'] = ((100000 * df_norm[d + '_' + g]) / df_norm['populace_' + g]) \
.replace({np.nan: 0})
for g in ['plne', 'posilujici']:
df_norm[d + '_' + g + '_ratio'] = (df_norm[d + '_bez_norm'] / df_norm[d + '_' + g + '_norm']).replace({np.inf: np.nan})
df_norm.loc[df_norm['populace_' + g + '_zastoupeni'] < 0.05, d + '_' + g + '_ratio'] = np.nan
df_norm[d + '_' + g + '_ratio'] = df_norm[d + '_' + g + '_ratio'].replace({np.nan: None})
return df_norm
def count_hospitalization_probabilities():
date_from = get_import_date() - relativedelta(months=4)
date_to = get_import_date() - relativedelta(months=1)
hosp = pd.read_sql_query(
f"""
select vek_kat, tezky_stav, jip, kyslik, upv, ecmo, umrti, dni_tezky_stav, dni_jip, dni_kyslik, dni_upv, dni_ecmo
from uzis_modely_05_hospitalizovani
where datum_positivity >= '{date_from}' and datum_positivity <= '{date_to}'
""",
db.engine)
hosp['vek_kat'] = hosp['vek_kat'].replace({'0-4': '0-9', '5-9': '0-9', '10-14': '10-19', '15-19': '10-19',
'20-24': '20-29', '25-29': '20-29', '30-34': '30-39', '35-39': '30-39',
'40-44': '40-49', '45-49': '40-49', '50-54': '50-59', '55-59': '50-59',
'60-64': '60-69', '65-69': '60-69', '70-74': '70-79', '75-79': '70-79',
'80-84': '80-89', '85-89': '80-89', '90-94': '90+', '95-99': '90+',
'100-104': '90+', '105-109': '90+'})
hosp['hospitalizace'] = True
hosp = hosp.groupby(['vek_kat']).sum().reset_index()
nakazeni = pd.read_sql_query(
f"select pocet, vek from nakazeni where datum >= '{date_from}' and datum <= '{date_to}'",
db.engine)
bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 150]
labels = ['0-9', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79', '80-89', '90+']
nakazeni['vekova_skupina'] = pd.cut(nakazeni['vek'], bins=bins, labels=labels, right=False)
nakazeni = nakazeni.drop(columns=['vek']).groupby(['vekova_skupina']).sum().reset_index()
df = pd.merge(hosp, nakazeni, left_on=['vek_kat'], right_on=['vekova_skupina'])
df['hospitalizace_r'] = df['hospitalizace'] / df['pocet']
df['jip_r'] = df['jip'] / df['pocet']
df['kyslik_r'] = df['kyslik'] / df['pocet']
df['upv_r'] = df['upv'] / df['pocet']
df['ecmo_r'] = df['ecmo'] / df['pocet']
df['tezky_stav_r'] = df['tezky_stav'] / df['pocet']
df['umrti_r'] = df['umrti'] / df['pocet']
df['jip_d'] = (df['dni_jip'] / df['jip']).replace({np.nan: None})
df['kyslik_d'] = (df['dni_kyslik'] / df['kyslik']).replace({np.nan: None})
df['upv_d'] = (df['dni_upv'] / df['upv']).replace({np.nan: None})
df['ecmo_d'] = (df['dni_ecmo'] / df['ecmo']).replace({np.nan: None})
df['tezky_stav_d'] = (df['dni_tezky_stav'] / df['tezky_stav']).replace({np.nan: None})
return (df, date_from, date_to)
def get_registrations_graph_data(center_id=None):
registrace = pd.read_sql_query(
"""
select datum, sum(pocet) pocet_registrace
from ockovani_registrace
where (ockovaci_misto_id = '{}' or {}) and import_id = {} and datum < '{}'
group by datum
""".format(center_id, center_id is None, get_import_id(), get_import_date()),
db.engine
)
rezervace = pd.read_sql_query(
"""
select datum_rezervace datum, sum(pocet) pocet_rezervace
from ockovani_registrace
where (ockovaci_misto_id = '{}' or {}) and import_id = {} and rezervace = true and datum_rezervace < '{}'
group by datum_rezervace
""".format(center_id, center_id is None, get_import_id(), get_import_date()),
db.engine
)
merged = pd.merge(registrace, rezervace, how='outer')
if merged.empty:
return merged
merged = merged.set_index('datum')
idx = pd.date_range(merged.index.min(), merged.index.max())
return merged.reindex(idx).fillna(0)
def get_queue_graph_data(center_id=None, kraj_id=None):
if center_id and kraj_id:
return
elif center_id:
fronta = pd.read_sql_query(
f"""
select datum, registrace_fronta, rezervace_cekajici_1, rezervace_cekajici_2, rezervace_cekajici_3
from ockovaci_mista_metriky
where misto_id = '{center_id}'
""",
db.engine
)
elif kraj_id:
fronta = pd.read_sql_query(
f"""
select datum, registrace_fronta, rezervace_cekajici_1, rezervace_cekajici_2, rezervace_cekajici_3
from kraje_metriky
where kraj_id = '{kraj_id}'
""",
db.engine
)
else:
fronta = pd.read_sql_query(
"""
select datum, registrace_fronta, rezervace_cekajici_1, rezervace_cekajici_2, rezervace_cekajici_3
from cr_metriky
""",
db.engine
)
fronta = fronta.set_index('datum').fillna(0).sort_values('datum')
for idx, row in fronta.iterrows():
if row['registrace_fronta'] == 0 \
and row['rezervace_cekajici_1'] == 0 \
and row['rezervace_cekajici_2'] == 0 \
and row['rezervace_cekajici_3'] == 0:
fronta.drop(idx, inplace=True)
else:
break
return fronta
def get_vaccination_graph_data(center_id):
rezervace = pd.read_sql_query(
"""
select datum, sum(maximalni_kapacita) kapacita, sum(maximalni_kapacita-volna_kapacita) rezervace,
sum(maximalni_kapacita) filter(where kalendar_ockovani = 'V1') kapacita_1,
sum(maximalni_kapacita-volna_kapacita) filter(where kalendar_ockovani = 'V1') rezervace_1,
sum(maximalni_kapacita) filter(where kalendar_ockovani = 'V2') kapacita_2,
sum(maximalni_kapacita-volna_kapacita) filter(where kalendar_ockovani = 'V2') rezervace_2,
sum(maximalni_kapacita) filter(where kalendar_ockovani = 'V3') kapacita_3,
sum(maximalni_kapacita-volna_kapacita) filter(where kalendar_ockovani = 'V3') rezervace_3
from ockovani_rezervace
where ockovaci_misto_id = '{}'
group by datum
""".format(center_id),
db.engine
)
spotreba = pd.read_sql_query(
"""
select datum, sum(pouzite_davky) pouzite
from ockovani_spotreba
where ockovaci_misto_id = '{}'
group by datum
""".format(center_id),
db.engine
)
merged = pd.merge(rezervace, spotreba, how='outer')
if has_unique_nrpzs(center_id):
ockovani = pd.read_sql_query(
"""
select datum, sum(pocet) ockovane, sum(pocet) filter(where poradi_davky = 1) ockovane_1,
sum(pocet) filter(where poradi_davky = 2) ockovane_2,
sum(pocet) filter(where poradi_davky = 3) ockovane_3
from ockovani_lide o
join ockovaci_mista m on m.nrpzs_kod = o.zarizeni_kod
where m.id = '{}'
group by datum
""".format(center_id),
db.engine
)
merged = pd.merge(merged, ockovani, how='outer')
if merged.empty:
return merged
merged = merged.set_index('datum')
idx = pd.date_range(merged.index.min(), merged.index.max())
merged = merged.reindex(idx).fillna(0)
merged['pouzite'] = np.where(merged.index.date < get_import_date(), merged['pouzite'], None)
if 'ockovane' in merged:
merged['ockovane'] = np.where(merged.index.date < get_import_date(), merged['ockovane'], None)
merged['ockovane_1'] = np.where(merged.index.date < get_import_date(), merged['ockovane_1'], None)
merged['ockovane_2'] = np.where(merged.index.date < get_import_date(), merged['ockovane_2'], None)
merged['ockovane_3'] = np.where(merged.index.date < get_import_date(), merged['ockovane_3'], None)
return merged
def get_vaccination_total_graph_data():
ockovani = pd.read_sql_query(
"""
select datum, sum(pocet) filter(where poradi_davky = 1) ockovani_castecne,
sum(pocet) filter(where poradi_davky = v.davky) ockovani_plne,
sum(pocet) filter(where poradi_davky = 3) ockovani_3
from ockovani_lide o
join vakciny v on v.vakcina = o.vakcina
group by datum
""",
db.engine
)
return ockovani.set_index('datum').fillna(0).sort_values('datum').cumsum()
def get_received_vaccine_graph_data():
return db.session.query(column('vyrobce'), column('datum'), column('prijem')).from_statement(text(
"""
select
vyrobce,
array_agg(base.datum) as datum,
array_agg(base.prijem) as prijem
from (
select
vyrobce,
datum,
sum(pocet_davek) as prijem
from ockovani_distribuce
where akce='Příjem'
group by datum, vyrobce
order by vyrobce, datum
) base
group by vyrobce
"""
)).all()
def get_used_vaccine_graph_data():
return db.session.query(column('vyrobce'), column('datum'), column('ockovano')).from_statement(text(
"""
select vyrobce, array_agg(base.datum) as datum, array_agg(base.ockovano) as ockovano
from (
select vyrobce, datum, sum(pocet) as ockovano
from ockovani_lide
join vakciny on vakciny.vakcina = ockovani_lide.vakcina
group by vyrobce, datum
order by vyrobce, datum
) base
group by vyrobce
"""
)).all()
def get_infected_graph_data():
nakazeni = pd.read_sql_query(
"""
select datum, vekova_skupina, sum(pocet) pocet_nakazeni
from nakazeni n
join populace_kategorie k on (k.min_vek <= vek and k.max_vek >= vek)
group by datum, vekova_skupina
""",
db.engine
)
populace = pd.read_sql_query(
"""
select vekova_skupina, sum(pocet) pocet_vek
from populace p
join populace_kategorie k on (k.min_vek <= vek and k.max_vek >= vek)
where orp_kod = 'CZ0'
group by vekova_skupina
""",
db.engine
)
df = pd.date_range(nakazeni['datum'].min(), nakazeni['datum'].max(), name='datum').to_frame().reset_index(drop=True)
df['datum'] = df['datum'].dt.date
df['join'] = 0
populace['join'] = 0
df = pd.merge(df, populace, how='outer')
df = df.drop('join', axis=1)
df = pd.merge(df, nakazeni, how='left')
df = df.fillna(0)
df['vekova_skupina_grp'] = df['vekova_skupina'].replace(
{'0-11': '0-17', '12-15': '0-17', '16-17': '0-17', '18-24': '18-29', '25-29': '18-29', '30-34': '30-39',
'35-39': '30-39', '40-44': '40-49', '45-49': '40-49', '50-54': '50-59', '55-59': '50-59', '60-64': '60-69',
'65-69': '60-69', '70-74': '70-79', '75-79': '70-79'})
df = df.groupby(['vekova_skupina_grp', 'datum']).sum()
df_sum = df.rolling(7).sum()
df_sum['pocet_nakazeni_norm'] = ((df_sum['pocet_nakazeni'] / df_sum['pocet_vek']) * 100000)
df = pd.merge(df_sum[['pocet_nakazeni_norm']], df[['pocet_nakazeni']], left_index=True, right_index=True)
df = df[df.index.get_level_values(1) >= df.index.get_level_values(1).min() + timedelta(7)]
return df
def get_deaths_graph_data():
umrti = pd.read_sql_query(
"""
select datum, vekova_skupina, sum(pocet) pocet_umrti
from umrti u
join populace_kategorie k on (k.min_vek <= vek and k.max_vek >= vek)
group by datum, vekova_skupina
""",
db.engine
)
populace = pd.read_sql_query(
"""
select vekova_skupina, sum(pocet) pocet_vek
from populace p
join populace_kategorie k on (k.min_vek <= vek and k.max_vek >= vek)
where orp_kod = 'CZ0'
group by vekova_skupina
""",
db.engine
)
df = pd.date_range(umrti['datum'].min(), umrti['datum'].max(), name='datum').to_frame().reset_index(drop=True)
df['datum'] = df['datum'].dt.date
df['join'] = 0
populace['join'] = 0
df = pd.merge(df, populace, how='outer')
df = df.drop('join', axis=1)
df = pd.merge(df, umrti, how='left')
df = df.fillna(0)
df['vekova_skupina_grp'] = df['vekova_skupina'].replace(
{'0-11': '0-17', '12-15': '0-17', '16-17': '0-17', '18-24': '18-29', '25-29': '18-29', '30-34': '30-39',
'35-39': '30-39', '40-44': '40-49', '45-49': '40-49', '50-54': '50-59', '55-59': '50-59', '60-64': '60-69',
'65-69': '60-69', '70-74': '70-79', '75-79': '70-79'})
df = df.groupby(['vekova_skupina_grp', 'datum']).sum()
df_sum = df.rolling(7).sum()
df_sum['pocet_umrti_norm'] = ((df_sum['pocet_umrti'] / df_sum['pocet_vek']) * 100000)
df = pd.merge(df_sum[['pocet_umrti_norm']], df[['pocet_umrti']], left_index=True, right_index=True)
df = df[df.index.get_level_values(1) >= df.index.get_level_values(1).min() + timedelta(7)]
return df
def get_hospitalized_graph_data():
hospitalizace = pd.read_sql_query("select * from hospitalizace", db.engine)
hospitalizace = hospitalizace.set_index('datum')
return hospitalizace
def get_tests_graph_data():
df = pd.read_sql_query("select * from testy", db.engine)
df = df.set_index('datum')
df_sum = df.rolling(7).sum()
df_sum['pozitivita_diagnosticka'] = (df_sum['pozit_typologie_test_indik_diagnosticka'] / df_sum['typologie_test_indik_diagnosticka']) * 100
df_sum['pozitivita_epidemiologicka'] = (df_sum['pozit_typologie_test_indik_epidemiologicka'] / df_sum['typologie_test_indik_epidemiologicka']) * 100
df_sum['pozitivita_preventivni'] = (df_sum['pozit_typologie_test_indik_preventivni'] / df_sum['typologie_test_indik_preventivni']) * 100
df = pd.merge(df_sum[['pozitivita_diagnosticka', 'pozitivita_epidemiologicka', 'pozitivita_preventivni']], df, left_index=True, right_index=True)
df = df[df.index >= df.index.min() + timedelta(7)]
return df
def get_infected_orp_graph_data():
df = pd.read_sql_query(
f"""
select ruian_kod, ((100000.0 * aktivni_pripady) / pocet) nakazeni, aktivni_pripady, nazev_obce
from charakteristika_obci n
join obce_orp o on o.uzis_orp = n.orp_kod
join populace_orp p on p.orp_kod = o.kod_obce_orp
where datum = '{get_import_date()}'
""",
db.engine
)
df['ruian_kod'] = df['ruian_kod'].round(0).astype('str')
return df
def get_vaccinated_orp_graph_data():
df = pd.read_sql_query(
f"""
select ruian_kod, (100.0 * sum(l.pocet)) / min(p.pocet) ockovani, nazev_obce
from ockovani_lide l
join obce_orp o on o.uzis_orp = l.orp_bydl_kod
join populace_orp p on p.orp_kod = o.kod_obce_orp
join vakciny v on v.vakcina = l.vakcina
where poradi_davky = davky
group by ruian_kod, nazev_obce
""",
db.engine
)
df['ruian_kod'] = df['ruian_kod'].round(0).astype('str')
return df
def get_hospitalized_orp_graph_data():
df = pd.read_sql_query(
f"""
select ruian_kod, ((100000.0 * pocet_hosp) / pocet) hospitalizovani, pocet_hosp, nazev_obce
from situace_orp s
join obce_orp o on o.uzis_orp = s.orp_kod
join populace_orp p on p.orp_kod = o.kod_obce_orp
where datum = '{get_import_date()}'
""",
db.engine
)
df['ruian_kod'] = df['ruian_kod'].round(0).astype('str')
return df
def get_tests_orp_graph_data():
df = pd.read_sql_query(
f"""
select ruian_kod, case when testy_7 > 0 then (100.0 * nove_pripady_7_dni) / testy_7 else 0 end pozitivita, testy_7, nazev_obce
from situace_orp s
join charakteristika_obci c on (c.datum = s.datum and c.orp_kod = s.orp_kod)
join obce_orp o on o.uzis_orp = s.orp_kod
where s.datum = '{get_import_date()}'
""",
db.engine
)
df['ruian_kod'] = df['ruian_kod'].round(0).astype('str')
return df
def get_hospital_capacities_graph_data():
capacities = pd.read_sql_query(f"select * from kapacity_nemocnic where datum < '{get_import_date()}'", db.engine).fillna(0)
capacities['luzka_standard_kyslik_kapacita_volna_covid_pozitivni'] += capacities['inf_luzka_kyslik_kapacita_volna_covid_pozitivni']
capacities['luzka_standard_kyslik_kapacita_volna_covid_negativni'] += capacities['inf_luzka_kyslik_kapacita_volna_covid_negativni']
capacities['luzka_standard_kyslik_kapacita_celkem'] += capacities['inf_luzka_kyslik_kapacita_celkem']
capacities['luzka_hfno_cpap_kapacita_volna_covid_pozitivni'] += capacities['inf_luzka_hfno_kapacita_volna_covid_pozitivni']
capacities['luzka_hfno_cpap_kapacita_volna_covid_negativni'] += capacities['inf_luzka_hfno_kapacita_volna_covid_negativni']
capacities['luzka_hfno_cpap_kapacita_celkem'] += capacities['inf_luzka_hfno_kapacita_celkem']
capacities['luzka_upv_niv_kapacita_volna_covid_pozitivni'] += capacities['inf_luzka_upv_kapacita_volna_covid_pozitivni']
capacities['luzka_upv_niv_kapacita_volna_covid_negativni'] += capacities['inf_luzka_upv_kapacita_volna_covid_negativni']
capacities['luzka_upv_niv_kapacita_celkem'] += capacities['inf_luzka_upv_kapacita_celkem']
capacities_21 = pd.read_sql_query("select * from kapacity_nemocnic_21 where datum >= '2021-03-30'", db.engine).fillna(0)
capacities_20 =
|
pd.read_sql_query("select * from kapacity_nemocnic_20", db.engine)
|
pandas.read_sql_query
|
import argparse
import glob
import itertools
import os
import random
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind, kendalltau
def parse_argument() -> argparse.Namespace:
"""
Parse input arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_folder',
type=str,
default='./other_seeds',
help='Folder containing hypotheses files with different random seeds.',
)
args = parser.parse_args()
return args
def get_index(filename):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = pd.read_csv(filename, sep='\t', names=names)
df = df[df['probability'] > 0.2]
df.sort_values(by='probability', ascending=False, inplace=True, kind='mergesort')
return df.index.tolist()
def get_ranks_and_index(filename):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = pd.read_csv(filename, sep='\t', names=names)
df = df[df['probability'] > 0.20]
df.sort_values(by='probability', ascending=False, inplace=True, kind='mergesort')
df['rank'] = df['probability'].rank(method='dense', ascending=False)
return df.index.tolist(), df['rank'].tolist()
def get_ranks_using_index(filename, index):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = pd.read_csv(filename, sep='\t', names=names)
df.sort_values(by='probability', ascending=False, inplace=True, kind='mergesort')
df['rank'] = df['probability'].rank(method='dense', ascending=False)
df = df.loc[index, :]
df.sort_values(by='probability', ascending=False, inplace=True, kind='mergesort')
return df['rank'].tolist()
def get_triples(filename):
names = ['sub', 'pred', 'obj', 'label', 'probability']
df = pd.read_csv(filename, sep='\t', names=names)
df = df[df['probability'] > 0.2]
df['triple'] = df.apply(lambda row: ' '.join([row['sub'], row['pred'], row['obj']]), axis=1)
return df['triple'].tolist()
def rbo(l1, l2, p=0.98):
"""
https://github.com/ragrawal/measures/blob/master/measures/rankedlist/RBO.py
"""
"""
Calculates Ranked Biased Overlap (RBO) score.
l1 -- Ranked List 1
l2 -- Ranked List 2
"""
if l1 == None: l1 = []
if l2 == None: l2 = []
sl,ll = sorted([(len(l1), l1),(len(l2),l2)])
s, S = sl
l, L = ll
if s == 0: return 0
# Calculate the overlaps at ranks 1 through l
# (the longer of the two lists)
ss = set([]) # contains elements from the smaller list till depth i
ls = set([]) # contains elements from the longer list till depth i
x_d = {0: 0}
sum1 = 0.0
for i in range(l):
x = L[i]
y = S[i] if i < s else None
d = i + 1
# if two elements are same then
# we don't need to add to either of the set
if x == y:
x_d[d] = x_d[d-1] + 1.0
# else add items to respective list
# and calculate overlap
else:
ls.add(x)
if y != None: ss.add(y)
x_d[d] = x_d[d-1] + (1.0 if x in ss else 0.0) + (1.0 if y in ls else 0.0)
#calculate average overlap
sum1 += x_d[d]/d * pow(p, d)
sum2 = 0.0
for i in range(l-s):
d = s+i+1
sum2 += x_d[d]*(d-s)/(d*s)*pow(p,d)
sum3 = ((x_d[l]-x_d[s])/l+x_d[s]/s)*pow(p,l)
# Equation 32
rbo_ext = (1-p)/p*(sum1+sum2)+sum3
return rbo_ext
def main():
args = parse_argument()
files_without_original = glob.glob(os.path.join(args.input_folder, '*.txt'))
files_with_original = files_without_original + ['./hypotheses_confidence.txt']
print(f'Number of files without original found: {len(files_without_original)}')
print(f'Number of files with original found: {len(files_with_original)}')
# #######
# # rbo #
# #######
# # our hypotheses
# all_ranks = []
# for idx, f in enumerate(files_with_original):
# print(f'Processing {idx+1}/{len(files_with_original)}...')
# all_ranks.append(get_index(f))
# rbo_list = []
# for rp in list(itertools.combinations(all_ranks, 2)):
# rbo_list.append(rbo(rp[0], rp[1], p=0.99))
# print(f'RBO value: {np.mean(rbo_list)} +- {np.var(rbo_list)}')
# # random baseline
# all_ranks_baseline = []
# for r in all_ranks:
# all_ranks_baseline.append(random.sample(range(108078), len(r)))
# rbo_baseline_list = []
# for rp in list(itertools.combinations(all_ranks_baseline, 2)):
# rbo_baseline_list.append(rbo(rp[0], rp[1], p=0.99))
# print(f'Baseline RBO value: {np.mean(rbo_baseline_list)} +- {np.var(rbo_baseline_list)}')
# _, pval = ttest_ind(rbo_list, rbo_baseline_list)
# print(f'p-value: {pval}')
# ##############
# # kendalltau #
# ##############
# original_index, original_rank = get_ranks_and_index('./hypotheses_confidence.txt')
# other_ranks = [original_rank]
# for idx, f in enumerate(files_without_original):
# print(f'Processing {idx+1}/{len(files_without_original)}...')
# other_ranks.append(get_ranks_using_index(f, original_index))
# taus_list = []
# for rp in list(itertools.combinations(other_ranks, 2)):
# tau, pval = kendalltau(rp[0], rp[1])
# taus_list.append(tau)
# print(f'tau: {np.mean(taus_list)} +- {np.var(taus_list)}')
# # random baseline
# other_ranks_baseline = []
# for _ in other_ranks:
# other_ranks_baseline.append(random.sample(range(108078), len(original_rank)))
# taus_baseline_list = []
# for rp in list(itertools.combinations(other_ranks_baseline, 2)):
# tau, pval = kendalltau(rp[0], rp[1])
# taus_baseline_list.append(tau)
# print(f'Baseline tau: {np.mean(taus_baseline_list)} +- {np.var(taus_baseline_list)}')
# _, pval = ttest_ind(taus_list, taus_baseline_list)
# print(f'p-value: {pval}')
#####################
# common hypotheses #
#####################
# validated hypotheses
df_validated =
|
pd.read_csv('../figure5/all_validated_hypothesis.txt', sep='\t')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_getitem_fancy_slice(self):
pass
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
|
tm.assert_frame_equal(panel.loc['a1'], df1)
|
pandas.util.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 14:45:51 2018
@author: slauniai
"""
import numpy as np
import os
# import pickle
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#import timeit
from scipy import stats
from spafhy_point import SpaFHy_point, read_setup
from spafhy_parameters import parameters
# from canopygrid import CanopyGrid
# from bucketgrid import BucketGrid
# from iotools import read_FMI_weather, read_HydeDaily
eps = np.finfo(float).eps # machine epsilon
spathy_path = os.path.join('c:', r'c:\datat\spathydata')
ff = os.path.join(r'c:\repositories\spathy\ini', 'spafhy_point_default0.ini')
# dump into pickle
#ou = os.path.join(r'c:\ModelResults\Spathy', 'Site_results_newest.pkl')
#pickle.dump(res, open(ou, 'wb'))
def run_forest_sites():
# runs ECsite_eval for all sites and saves results into pickle
sites=['FIHy', 'FICage4', 'FICage12', 'FISod', 'FIKal', 'FILet', 'SEKno', 'SESky2', 'SENor','FISii', 'FISiiA']
sites = [sites[1]]
#print sites
#sites = ['FIHy']
res = dict.fromkeys(sites)
for s in sites:
out, dat, forc = ECsite_eval(s)
a = {}
a['Wliq_mod'] = np.ravel(out[0].bu.results['Wliq'])
a['Wliq_low'] = np.ravel(out[1].bu.results['Wliq'])
a['Wliq_high'] = np.ravel(out[2].bu.results['Wliq'])
a['ET_mod'] = np.ravel(out[0].cpy.results['ET'])
a['ET_low'] = np.ravel(out[1].cpy.results['ET'])
a['ET_high'] = np.ravel(out[2].cpy.results['ET'])
a['Tr_mod'] = np.ravel(out[0].cpy.results['Transpi'])
a['Tr_low'] = np.ravel(out[1].cpy.results['Transpi'])
a['Tr_high'] = np.ravel(out[2].cpy.results['Transpi'])
a['Ef_mod'] = np.ravel(out[0].cpy.results['Efloor'])
# a['Ef_low'] = np.ravel(out[1].cpy.results['Efloor'])
# a['Ef_high'] = np.ravel(out[2].cpy.results['Efloor'])
a['SWE_mod'] = np.ravel(out[0].cpy.results['SWE'])
a['SWE_low'] = np.ravel(out[1].cpy.results['SWE'])
a['SWE_high'] = np.ravel(out[2].cpy.results['SWE'])
a['data'] = dat
a['forc'] = forc
res[s] = a
del a, out, forc
return res
def ECsite_eval(site):
p = {'FIHy': {'LAIc': 3.5, 'LAId': 0.5, 'hc': 15.0, 'soil': [0.44, 0.33, 0.13, 2e-6], 'orgd': 0.05, 'fb': True},
'FICage4': {'LAIc': 0.6, 'LAId': 0.1, 'hc': 0.4, 'soil': [0.44, 0.33, 0.13, 2e-6], 'orgd': 0.05, 'fb': True},
'FICage12': {'LAIc': 1.4, 'LAId': 0.4, 'hc': 1.7, 'soil': [0.44, 0.30, 0.13, 2e-6], 'orgd': 0.05, 'fb': True},
'FISod': {'LAIc': 2.1, 'LAId': 0.1, 'hc': 15.0, 'soil': [0.41, 0.21, 0.05, 1e-4], 'orgd': 0.05, 'fb': True},
'FIKal': {'LAIc': 2.1, 'LAId': 0.1, 'hc': 15.0, 'soil': [0.9, 0.42, 0.11, 5e-10], 'orgd': 0.08, 'fb': True}, # peat
'FILet': {'LAIc': 4.3, 'LAId': 2.3, 'hc': 15.0, 'soil': [0.9, 0.42, 0.11, 5e-10], 'orgd': 0.08, 'fb': True}, # peat
'SEKno': {'LAIc': 3.6, 'LAId': 0.2, 'hc': 15.0, 'soil': [0.44, 0.33, 0.13, 2e-6], 'orgd': 0.05, 'fb': True}, # medium textured
'SESky2': {'LAIc': 5.3, 'LAId': 0.5, 'hc': 15.0, 'soil': [0.44, 0.33, 0.13, 1e-6], 'orgd': 0.05, 'fb': True},
'SENor': {'LAIc': 5.5, 'LAId': 1.3, 'hc': 15.0, 'soil': [0.43, 0.33, 0.02, 1e-6], 'orgd': 0.05, 'fb': True},
'FISii': {'LAIc': 0.01, 'LAId': 0.3, 'hc': 0.3, 'soil': [0.9, 0.42, 0.11, 5e-10], 'orgd': 0.20, 'fb': False},
'FISiiA': {'LAIc': 0.01, 'LAId': 0.3, 'hc': 0.3, 'soil': [0.9, 0.42, 0.11, 5e-10], 'orgd': 0.20, 'fb': True},
}
pgen, pcpy, pbu, _ = parameters() # default parameters
pgen['spatial_cpy'] = False
pgen['spatial_soil'] = False
pcpy['lai_conif']= p[site]['LAIc']
pcpy['lai_decid']= p[site]['LAId']
pcpy['hc']= p[site]['hc']
L = pcpy['lai_conif'] + pcpy['lai_decid']
pcpy['cf'] = np.maximum(0.2, 1.5 * L / (1.5 * L + 1.43) - 0.2)
pbu['poros'] = p[site]['soil'][0]
pbu['fc'] = p[site]['soil'][1]
pbu['wp'] = p[site]['soil'][2]
pbu['ksat'] = p[site]['soil'][3]
pbu['org_depth'] = p[site]['orgd']
fb = p[site]['fb']
""" read forcing data and evaluation data """
if site in ['FIHy', 'FICage4', 'FICage12']:
dat, FORC = read_hydedata(site)
elif site in ['FISii', 'FISiiA']:
dat, FORC = read_siikaneva_data()
else:
dat, FORC = read_daily_ECdata(site)
FORC['Prec'] = FORC['Prec'] / pgen['dt'] # mms-1
FORC['T'] = FORC['Ta'].copy()
cmask = np.ones(1)
# print(cmask)
# run model for different parameter combinations, save results into dataframe
# +/- 20%, 20%, 20%
p = [[10.0, 2.1, 1.3, 4.5],
[8.5, 1.7, 1.05, 3.15],
[11.5, 2.5, 2.6, 5.4]]
out = []
for k in range(3):
a = p[k]
pcpy['amax'] = a[0]
pcpy['g1_conif'] = a[1]
pcpy['g1_decid'] = 1.6 * a[1]
pcpy['wmax'] = a[2]
pcpy['wmaxsnow'] = a[3]
model = SpaFHy_point(pgen, pcpy, pbu, cmask, FORC, cpy_outputs=True, bu_outputs=True)
nsteps=len(FORC)
model._run(0, nsteps, soil_feedbacks=fb)
out.append(model)
del model
# best model
# Wliq_mod = np.ravel(out[0].bu.results['Wliq'])
# Wliq_low = np.ravel(out[1].bu.results['Wliq'])
# Wliq_high = np.ravel(out[2].bu.results['Wliq'])
ET_mod = np.ravel(out[0].cpy.results['ET'])
ET_low = np.ravel(out[1].cpy.results['ET'])
ET_high = np.ravel(out[2].cpy.results['ET'])
# E_mod = np.ravel(out[0].cpy.results['Evap'])
# E_low = np.ravel(out[1].cpy.results['Evap'])
# E_high = np.ravel(out[2].cpy.results['Evap'])
tvec = dat.index
et_dry = dat['ET'].copy()
et_dry[dat['Prec'] > 0.1] = np.NaN
sns.set_style('whitegrid')
with sns.color_palette('muted'):
plt.figure()
plt.subplot(2,3,(1,2))
plt.plot(tvec, et_dry, 'o', markersize=4, alpha=0.3, label='meas')
plt.fill_between(tvec, ET_low, ET_high, facecolor='grey', alpha=0.6, label='range')
plt.plot(tvec, ET_mod, 'k-', alpha=0.4, lw=0.5, label='mod')
#plt.xlim([pd.datetime(2003, 10, 1), pd.datetime(2011,1,1)])
plt.legend(loc=2, fontsize=8)
plt.setp(plt.gca().get_xticklabels(), fontsize=8)
plt.setp(plt.gca().get_yticklabels(), fontsize=8)
plt.ylabel('ET$_{dry}$ (mm d$^{-1}$)', fontsize=8)
plt.ylim([-0.05, 5.0])
#plt.xlim([pd.datetime(2002, 1, 1), pd.datetime(2011,1,1)])
plt.title(site)
# sns.despine()
# scatterplot
plt.subplot(2,3,3)
et_dry[dat['doy'] < 0] = np.NaN
et_dry[dat['doy'] > 366] = np.NaN
meas = np.array(et_dry.values.tolist())
ix = np.where(np.isfinite(meas))
meas = meas[ix].copy()
mod = ET_mod[ix].copy()
slope, intercept, r_value, p_value, std_err = stats.linregress(meas, mod)
print('slope', slope, 'interc', intercept)
# slope, intercept, _, _ = stats.theilslopes(meas, mod, 0.95)
meas = meas[:, np.newaxis]
slope, _, _, _ = np.linalg.lstsq(meas, mod)
intercept = 0.0
xx = np.array([min(meas), max(meas)])
plt.plot(meas, mod, 'o', markersize=4, alpha=0.3)
plt.plot(xx, slope*xx + intercept, 'k-')
plt.plot([0, 5], [0, 5], 'k--')
plt.text(0.3, 4.2, 'y = %.2f x + %.2f' %(slope, intercept), fontsize=8)
plt.xlim([-0.01, 5]); plt.ylim([-0.01, 5])
plt.setp(plt.gca().get_xticklabels(), fontsize=8)
ax = plt.gca()
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.ylabel('ET$_{dry}$ mod (mm d$^{-1}$)', fontsize=8)
plt.setp(plt.gca().get_yticklabels(), fontsize=8)
plt.xlabel('ET$_{dry}$ meas (mm d$^{-1}$)', fontsize=8)
os.chdir(os.path.join(r'c:\ModelResults\Spathy'))
tt = site + '_ET.png'
plt.savefig(tt)
# plt.subplot(2,3,(4,5))
# #plt.plot(tvec, SWCa, 'o', markersize=4, alpha=0.3,label='meas')
# plt.fill_between(tvec, Wliq_low, Wliq_high, facecolor='grey', alpha=0.6, label='range')
# plt.plot(tvec, Wliq_mod, 'k-',alpha=0.4, lw=0.5, label='mod')
# #plt.xlim([pd.datetime(2003, 10, 1), pd.datetime(2011,1,1)])
# plt.legend(loc=2, fontsize=8)
# plt.setp(plt.gca().get_xticklabels(), fontsize=8)
# plt.setp(plt.gca().get_yticklabels(), fontsize=8)
# plt.ylabel('$\\theta$ (m$^3$ m$^{-3}$)', fontsize=8)
# # plt.xlim([pd.datetime(2002, 1, 1), pd.datetime(2011,1,1)])
return out, dat, FORC
def Hyde_eval(pgen, pcpy, pbu):
# this deos some figs for hyde
""" read forcing data and evaluation data """
fname = r'c:\datat\spathydata\HydeDaily2000-2010.txt'
dat, FORC = read_HydeDaily(fname)
FORC['Prec'] = FORC['Prec'] / pgen['dt'] # mms-1
FORC['T'] = FORC['Ta'].copy()
cmask = np.ones(1)
# run model for different parameter combinations, save results into dataframe
# +/- 15%, 15%, 20%, 30%
# amax, g1_conif, wmax, wmaxsnow
p = [[10.0, 2.1, 1.3, 4.5],
[8.5, 1.7, 1.05, 3.15],
[11.5, 2.5, 2.6, 5.4]]
out = []
for k in range(3):
a = p[k]
pcpy['amax'] = a[0]
pcpy['g1_conif'] = a[1]
pcpy['g1_decid'] = 1.6*a[1]
pcpy['wmax'] = a[2]
pcpy['wmaxsnow'] = a[3]
model = SpaFHy_point(pgen, pcpy, pbu, cmask, FORC, cpy_outputs=True, bu_outputs=True)
nsteps=len(FORC)
model._run(0, nsteps)
out.append(model)
del model
# best model
Wliq_mod = np.ravel(out[0].bu.results['Wliq'])
Wliq_low = np.ravel(out[1].bu.results['Wliq'])
Wliq_high = np.ravel(out[2].bu.results['Wliq'])
ET_mod = np.ravel(out[0].cpy.results['ET'])
ET_low = np.ravel(out[1].cpy.results['ET'])
ET_high = np.ravel(out[2].cpy.results['ET'])
E_mod = np.ravel(out[0].cpy.results['Evap'])
E_low = np.ravel(out[1].cpy.results['Evap'])
E_high = np.ravel(out[2].cpy.results['Evap'])
# SWC_mod = np.ravel(out[0].cpy.results['ET'])
# SWC_low = np.ravel(out[1].cpy.results['ET'])
# SWC_high = np.ravel(out[2].cpy.results['ET']))
SWCa = dat['SWCa']
SWCb = dat['SWCb']
SWCc = dat['SWCc']
tvec = dat.index
et_dry = dat['ET']
et_dry[dat['Prec']>0.1] = np.NaN
sns.set_style('whitegrid')
with sns.color_palette('muted'):
plt.figure()
plt.subplot(2,3,(1,2))
plt.plot(tvec, et_dry, 'o', markersize=4, alpha=0.3, label='meas')
plt.fill_between(tvec, ET_low, ET_high, facecolor='grey', alpha=0.6, label='range')
plt.plot(tvec, ET_mod, 'k-', alpha=0.4, lw=0.5, label='mod')
#plt.xlim([pd.datetime(2003, 10, 1), pd.datetime(2011,1,1)])
plt.legend(loc=2, fontsize=8)
plt.setp(plt.gca().get_xticklabels(), fontsize=8)
plt.setp(plt.gca().get_yticklabels(), fontsize=8)
plt.ylabel('ET$_{dry}$ (mm d$^{-1}$)', fontsize=8)
plt.ylim([-0.05, 5.0])
plt.xlim([pd.datetime(2002, 1, 1), pd.datetime(2011,1,1)])
# sns.despine()
plt.subplot(2,3,(4,5))
plt.plot(tvec, SWCa, 'o', markersize=4, alpha=0.3,label='meas')
plt.fill_between(tvec, Wliq_low, Wliq_high, facecolor='grey', alpha=0.6, label='range')
plt.plot(tvec, Wliq_mod, 'k-',alpha=0.4, lw=0.5, label='mod')
#plt.xlim([pd.datetime(2003, 10, 1), pd.datetime(2011,1,1)])
plt.legend(loc=2, fontsize=8)
plt.setp(plt.gca().get_xticklabels(), fontsize=8)
plt.setp(plt.gca().get_yticklabels(), fontsize=8)
plt.ylabel('$\\theta$ (m$^3$ m$^{-3}$)', fontsize=8)
plt.xlim([pd.datetime(2002, 1, 1), pd.datetime(2011,1,1)])
# scatterplot
plt.subplot(2,3,6)
meas = np.array(SWCa.values.tolist())
slope, intercept, r_value, p_value, std_err = stats.linregress(meas, Wliq_mod)
#print slope, intercept
xx = np.array([min(meas), max(meas)])
plt.plot(meas, Wliq_mod, 'o', markersize=5, alpha=0.3)
plt.plot(xx, slope*xx + intercept, 'k-')
plt.plot([0.05, 0.45], [0.05, 0.45], 'k--')
plt.text( 0.07, 0.42, 'y = %.2f x + %.2f' %(slope, intercept), fontsize=8)
plt.xlim([0.05, 0.45]); plt.ylim([0.05, 0.45])
ax = plt.gca()
ax.set_yticks([0.1, 0.2, 0.3, 0.4])
ax.set_xticks([0.1, 0.2, 0.3, 0.4])
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.ylabel('$\\theta$ mod (m$^3$ m$^{-3}$)', fontsize=8)
plt.setp(plt.gca().get_yticklabels(), fontsize=8)
plt.setp(plt.gca().get_yticklabels(), fontsize=8)
plt.setp(plt.gca().get_xticklabels(), fontsize=8)
plt.xlabel('$\\theta$ meas (m$^3$ m$^{-3}$)', fontsize=8)
# scatterplot
plt.subplot(2,3,3)
meas = np.array(et_dry.values.tolist())
ix = np.where(np.isfinite(meas))
meas=meas[ix].copy()
mod = ET_mod[ix].copy()
slope, intercept, r_value, p_value, std_err = stats.linregress(meas, mod)
xx = np.array([min(meas), max(meas)])
plt.plot(meas, mod, 'o', markersize=4, alpha=0.3)
plt.plot(xx, slope*xx + intercept, 'k-')
plt.plot([0, 5], [0, 5], 'k--')
plt.text(0.3, 4.2, 'y = %.2f x + %.2f' %(slope, intercept), fontsize=8)
plt.xlim([-0.01, 5]); plt.ylim([-0.01, 5])
plt.setp(plt.gca().get_xticklabels(), fontsize=8)
ax = plt.gca()
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
plt.ylabel('ET$_{dry}$ mod (mm d$^{-1}$)', fontsize=8)
plt.setp(plt.gca().get_yticklabels(), fontsize=8)
plt.xlabel('ET$_{dry}$ meas (mm d$^{-1}$)', fontsize=8)
#plt.savefig('Hyde_validate.pdf')
#plt.savefig('Hyde_validate.png')
# snowpack and throughfall
plt.figure()
SWE_mod = np.ravel(out[0].cpy.results['SWE'])
SWE_low = np.ravel(out[1].cpy.results['SWE'])
SWE_hi = np.ravel(out[2].cpy.results['SWE'])
swe_meas = dat['SWE']
plt.plot(tvec, swe_meas, 'o', markersize=10, alpha=0.3, label='meas')
plt.fill_between(tvec, SWE_low, SWE_hi, facecolor='grey', alpha=0.6, label='range')
plt.plot(tvec, SWE_mod, 'k-', alpha=0.4, lw=0.5, label='mod')
plt.title('SWE'); plt.ylabel('SWE mm')
return out, dat, FORC
def read_daily_ECdata(site):
#if site=='FICage4':
if site=='FISod':
fpath = os.path.join(spathy_path, 'DailyCEIP', 'FMI_Sodankyla')
yrs = np.arange(2001, 2010)
fnames = [ 'SodDaily_%4d.dat' %(k) for k in yrs]
cols=['doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','Prec','U','Pamb',
'SWC1','SWC2','Tsoil1', 'Tsoil2', 'Rnetflag', 'Snowdepth']
if site=='FIKal':
fpath = os.path.join(spathy_path, 'DailyCEIP', 'FMI_Kalevansuo')
yrs = [2005, 2006, 2007, 2008]
fnames = ['KalevansuoDaily_%4d.dat' %(k) for k in yrs]
cols=['doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','empty1','Prec','U','Pamb',
'WTD', 'Snowdepth', 'Rnetflag']
if site=='SEKno':
fpath = os.path.join(spathy_path, 'DailyCEIP', 'Knottasen')
yrs =[2007]
fnames = ['KnoDaily_%4d.dat' %(k) for k in yrs]
cols=['doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','Prec','U','Pamb',
'SWC1','SWC2','Tsoil1', 'Tsoil2', 'Rnetflag', 'Snowdepth']
if site=='SENor':
fpath = os.path.join(spathy_path, 'DailyCEIP', 'Norunda')
yrs = [1996, 1997, 1999, 2003]
fnames = ['NorDaily_%4d.dat' %(k) for k in yrs]
cols=['doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','Prec','U','Pamb',
'SWC1','SWC2','Tsoil1', 'Tsoil2', 'Rnetflag', 'Snowdepth']
if site=='SESky2':
fpath = os.path.join(spathy_path, 'DailyCEIP', 'Skyttorp2')
yrs = [2005]
fnames = ['Sky2Daily_2005.dat']
cols=['doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','Prec','U','Pamb',
'SWC1','SWC2','Tsoil1', 'Tsoil2', 'Rnetflag', 'Snowdepth']
if site=='FILet':
fpath = os.path.join(spathy_path, 'DailyCEIP', 'FMI_Lettosuo')
yrs = [2010, 2011, 2012]
fnames = ['FILet_Daily_%4d.dat' %(k) for k in yrs]
cols=['year', 'month', 'day', 'doy','NEE','GPP','TER','ET','H','Rnet','Rg', 'Par','Prec_site', 'Prec', 'Ta', 'RH',
'VPD','CO2','U','Pamb', 'WTD','WTDwest','WTDsouth', 'WTDeast', 'WTDnorth', 'SWC1', 'SWC2', 'empty', 'Ts1', 'Ts2',
'Ts3', 'Ts4', 'NEEflag', 'ETflag', 'Hflag','Rnetflag']
if site == 'FICage4':
cols = ['time','doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','SWCa','PrecSmear','Prec','U','Pamb']
dat = pd.DataFrame()
for k in range(len(yrs)):
fname = os.path.join(fpath, fnames[k])
tmp = pd.read_csv(fname,sep='\s+',header=None, names=cols)
tmp['year'] = yrs[k]
dat = dat.append(tmp)
dat['doy'] = dat['doy'].astype(int)
tvec = pd.to_datetime(dat['year'] * 1000 + dat['doy'], format='%Y%j')
#dat.drop('year')
dat.index = tvec
# forcing data
forc = dat[['doy', 'Ta', 'VPD', 'Prec', 'Par', 'U']]
forc['Par'] = 1./4.6*forc['Par']
forc['Rg'] = 2.0*forc['Par']
forc['VPD'][forc['VPD'] <= 0] = eps
forc = forc.interpolate() # fills missing values by linear interpolation
forc['CO2'] = 380.0
#relatively extractable water, from soil moisture
forc['Rew'] = 1.0
if site=='SEKno':
forc['Rg'] = 1.4*forc['Rg']
# fc=0.30
# wp=0.10
# Wliq=dat['SWCa']
# Rew=np.maximum( 0.0, np.minimum( (Wliq-wp)/(fc - wp + eps), 1.0) )
# forc['Rew']=Rew
return dat, forc
def read_hydedata(site):
fname = os.path.join(spathy_path, 'HydeDaily2000-2010.txt')
cols=['time','doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet','Ta','VPD','CO2','PrecSmear','Prec','U','Pamb',
'SWE0','SWCh','SWCa','SWCb','SWCc', 'Tsh','Tsa','Tsb','Tsc','RnetFlag','Trfall','Snowdepth','Snowdepthstd','SWE','SWEstd','Roff1','Roff2']
dat=pd.read_csv(fname,sep='\s+',header=None, names=None, parse_dates=[[0,1,2]], keep_date_col=False)
dat.columns=cols
dat.index=dat['time']; dat=dat.drop(['time','SWE0'],axis=1)
forc=dat[['doy','Ta','VPD','Prec','Par','U']]; forc['Par']= 1/4.6*forc['Par']; forc['Rg']=2.0*forc['Par']
forc['VPD'][forc['VPD']<=0]=eps
#relatively extractable water, Hyde A-horizon
#poros = 0.45
fc = 0.30
wp = 0.10
Wliq = dat['SWCa']
Rew = np.maximum( 0.0, np.minimum((Wliq-wp)/(fc - wp + eps), 1.0) )
forc['Rew'] = Rew
forc['CO2'] = 380.0
forc = forc.interpolate() # fills missing values by linear interpolation
if site == 'FICage4':
fname = os.path.join(spathy_path, 'HydeCage4yr-2000.txt')
cols = ['time','doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet',
'Ta','VPD','CO2','SWCa','PrecSmear','Prec','U','Pamb']
dat = pd.read_csv(fname,sep='\s+',header=None, names=None, parse_dates=[[0,1,2]], keep_date_col=False)
dat.columns=cols
dat.index = dat['time']
dat = dat.drop('time',axis=1)
forc = forc.ix[dat.index]
if site == 'FICage12':
fname = os.path.join(spathy_path, 'HydeCage12yr-2002.txt')
cols = ['time','doy','NEE','GPP','TER','ET','H','NEEflag','ETflag','Hflag','Par','Rnet',
'Ta','VPD','CO2','SWCa','PrecSmear','Prec','U','Pamb']
dat = pd.read_csv(fname,sep='\s+',header=None, names=None, parse_dates=[[0,1,2]], keep_date_col=False)
dat.columns=cols
dat.index = dat['time']
dat = dat.drop('time',axis=1)
forc = forc.ix[dat.index]
return dat, forc
def read_siikaneva_data():
# data available from years 2011, 2013 (good), 2014
yrs = [2011, 2013, 2014]
fpath = os.path.join(spathy_path, 'DailyCEIP', 'Siikaneva1')
fname = os.path.join(fpath, 'Sii_%s_daily.dat' % (yrs[0]))
dat = pd.read_csv(fname, sep=';', header='infer')
start = pd.datetime(yrs[0] ,1, 1)
end =pd.datetime(yrs[0], 12, 31)
tm = pd.date_range(start, end, freq='1d')
dat.index = tm
for k in yrs[1:]:
fname = os.path.join(fpath, 'Sii_%s_daily.dat' % (k))
tmp =
|
pd.read_csv(fname,sep=';',header='infer')
|
pandas.read_csv
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
# #tmpHilla=df_2018_2019.columns
# tmpHilla=pd.DataFrame(df_2018_2019.columns.values.tolist())
# tmpHilla.to_csv("/tmp/pycharm_project_355/columns.csv")
# my_list = df_2010_2011.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2012_2013.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2014_2015.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2016_2017.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2018_2019.columns.values.tolist()
# print (my_list)
# print()
#-------------------merge all csv--------------------------
# dfMerge1 = pd.merge(df_2010_2011, df_2012_2013, on='surgorder')
# dfMerge2 = pd.merge(dfMerge1, df_2014_2015, on='surgorder')
# dfMerge = pd.merge(dfMerge2, df_2016_2017, on='surgorder')
#dfMerge = pd.merge(df_2010_2011, df_2012_2013, on='SiteID')
#count distinc
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
def groupby_siteid():
df_2010 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='siteid')
df2 =pd.merge(df1, df_2012, on='siteid')
df3 =pd.merge(df2, df_2013, on='siteid')
df4 =pd.merge(df3, df_2014, on='siteid')
df5 =pd.merge(df4, df_2015, on='siteid')
df6 =pd.merge(df5, df_2016, on='siteid')
df7 =pd.merge(df6, df_2017, on='siteid')
df8 =pd.merge(df7, df_2018, on='siteid')
df_sum_all_Years =pd.merge(df8, df_2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_surgid():
df_2010 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='surgid')
df2 =pd.merge(df1, df_2012, on='surgid')
df3 =pd.merge(df2, df_2013, on='surgid')
df4 =pd.merge(df3, df_2014, on='surgid')
df5 =pd.merge(df4, df_2015, on='surgid')
df6 =pd.merge(df5, df_2016, on='surgid')
df7 =pd.merge(df6, df_2017, on='surgid')
df8 =pd.merge(df7, df_2018, on='surgid')
df_sum_all_Years =pd.merge(df8, df_2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years surgid.csv")
print()
print("details of surgid dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years surgid.csv")
print("num of doctors with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_hospid():
df_2010 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='hospid')
df2 =pd.merge(df1, df_2012, on='hospid')
df3 =pd.merge(df2, df_2013, on='hospid')
df4 =pd.merge(df3, df_2014, on='hospid')
df5 =pd.merge(df4, df_2015, on='hospid')
df6 =pd.merge(df5, df_2016, on='hospid')
df7 =pd.merge(df6, df_2017, on='hospid')
df8 =pd.merge(df7, df_2018, on='hospid')
df_sum_all_Years =pd.merge(df8, df_2019, on='hospid')
cols = df_sum_all_Years.columns.difference(['hospid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['hospid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years hospid.csv")
print(df_sum_all_Years)
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years hospid.csv")
print("num of hospital with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
return df_sum_all_Years
def draw_hist(data,num_of_bins,title,x_title,y_title,color):
plt.hist(data, bins=num_of_bins, color=color,ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
def group_by_count(group_by_value,name):
df_2010_2011_gb = df_2010_2011.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2012_2013_gb = df_2012_2013.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2014_2015_gb = df_2014_2015.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2016_2017_gb = df_2016_2017.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2018_2019_gb = df_2018_2019.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_merge_1=pd.merge(df_2010_2011_gb,df_2012_2013_gb, on=group_by_value)
df_merge_2=pd.merge(df_merge_1,df_2014_2015_gb, on=group_by_value)
df_merge_3=pd.merge(df_merge_2,df_2016_2017_gb, on=group_by_value)
df_merge_4=pd.merge(df_merge_3,df_2018_2019_gb, on=group_by_value)
cols = df_merge_4.columns.difference([group_by_value])
df_merge_4[name] = df_merge_4.loc[:,cols].sum(axis=1)
df_new=pd.DataFrame()
df_new[group_by_value] = df_merge_4[group_by_value]
df_new[name] = df_merge_4[name]
return df_new
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='siteid')
df2 = pd.merge(df1, df2012, on='siteid')
df3 = pd.merge(df2, df2013, on='siteid')
df4 = pd.merge(df3, df2014, on='siteid')
df5 = pd.merge(df4, df2015, on='siteid')
df6 = pd.merge(df5, df2016, on='siteid')
df7 = pd.merge(df6, df2017, on='siteid')
df8 = pd.merge(df7, df2018, on='siteid')
df_sum_all_Years =
|
pd.merge(df8, df2019, on='siteid')
|
pandas.merge
|
from sklearn.cluster import KMeans
def objGrafico():
import numpy as np
import json
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
url = "https://apto-api-rest-ifpe.herokuapp.com/api/desafio-tecnico/rankearCandidatosSimplificado"
#url = "https://run.mocky.io/v3/bd659a0b-5b5f-4989-b47d-657076841398"
#url = "https://run.mocky.io/v3/61703339-173a-4f8d-b235-edfe2405242e"
infoNotas = salvaDados()
a = np.array(kmeansLabel())
img_json = np.array(jsonImagem())
json_dump = json.dumps({'legenda': a, 'imagem': img_json, 'infoNotas': infoNotas, 'url': url}, cls=NumpyEncoder)
return json_dump
def salvaDados():
import numpy as np
import requests
import pandas as pd
import json
import re
r = requests.get("https://apto-api-rest-ifpe.herokuapp.com/api/desafio-tecnico/rankearCandidatosSimplificado").json()
#r = requests.get("https://run.mocky.io/v3/20963c21-73ca-406c-b12c-141c63aef532").json()
#r = requests.get("https://run.mocky.io/v3/61703339-173a-4f8d-b235-edfe2405242e").json()
notas = r['data']
#print(notas)
#print("notas")
data = []
#print(notas[0]['candidatoNotasDtoList'])
for idx, val in enumerate(notas):
#for count, nota in enumerate(val['candidatoNotasDtoList']):
if (val['idDesafioTecnico'] <= 1000):
#print(val['idDesafioTecnico'])
for id, nt in enumerate(val['candidatoNotasDtoList']):
if nt['pontuacao'] is None or nt['nota1'] is None or nt['nota2'] is None :
print("null")
else:
#print(nt)
data.append(nt)
print(data)
#print(data)
dataFrame = pd.DataFrame(data)
#print(dataFrame)
#print(data)
dataFrame = pd.DataFrame(data)
#print(dataFrame)
#print("chegou")
df =
|
pd.DataFrame(dataFrame)
|
pandas.DataFrame
|
"""Tradingview model"""
__docformat__ = "numpy"
import requests
from tradingview_ta import TA_Handler
import pandas as pd
from gamestonk_terminal import config_terminal as cfg
INTERVALS = {
"1m": "1 min",
"5m": "5 min",
"15m": "15 min",
"1h": "1 hour",
"4h": "4 hours",
"1d": "1 day",
"1W": "1 week",
"1M": "1 month",
}
SCREENERS = ["crypto", "forex", "cfd"]
def get_tradingview_recommendation(
ticker: str, screener: str, exchange: str, interval: str
) -> pd.DataFrame:
"""Get tradingview recommendation based on technical indicators
Parameters
----------
ticker : str
Ticker to get the recommendation from tradingview based on technical indicators
screener : str
Screener based on tradingview docs https://python-tradingview-ta.readthedocs.io/en/latest/usage.html
exchange: str
Exchange based on tradingview docs https://python-tradingview-ta.readthedocs.io/en/latest/usage.html
interval: str
Interval time to check technical indicators and correspondent recommendation
Returns
-------
df_recommendation: pd.DataFrame
Dataframe of tradingview recommendations based on technical indicators
"""
if not exchange:
s_req = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={ticker}&apikey={cfg.API_KEY_ALPHAVANTAGE}"
result = requests.get(s_req, stream=True)
exchange = result.json()["Exchange"]
if not interval:
df_recommendation = pd.DataFrame()
index_recommendation = []
for an_interval in ["1M", "1W", "1d", "4h", "1h", "15m", "5m", "1m"]:
# If the returned data was successful
if result.status_code == 200:
stock_recommendation = TA_Handler(
symbol=ticker,
screener=screener,
exchange=exchange,
interval=an_interval,
)
d_recommendation = stock_recommendation.get_analysis().summary
df_recommendation = df_recommendation.append(
d_recommendation, ignore_index=True
)
index_recommendation.append(INTERVALS[an_interval])
df_recommendation.index = index_recommendation
df_recommendation[["BUY", "NEUTRAL", "SELL"]] = df_recommendation[
["BUY", "NEUTRAL", "SELL"]
].astype(int)
df_recommendation.index.name = "Interval"
else:
stock_recommendation = TA_Handler(
symbol=ticker, screener=screener, exchange=exchange, interval=interval
)
d_recommendation = stock_recommendation.get_analysis().summary
df_recommendation =
|
pd.DataFrame.from_dict(d_recommendation, orient="index")
|
pandas.DataFrame.from_dict
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 by University of Kassel, T<NAME>, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
import numpy as np
import pandas as pd
import datetime as dt
from packaging import version
from pandapower import compare_arrays
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
__author__ = 'smeinecke'
def ensure_iterability(var, len_=None):
""" This function ensures iterability of a variable (and optional length). """
if hasattr(var, "__iter__") and not isinstance(var, str):
if isinstance(len_, int) and len(var) != len_:
raise ValueError("Length of variable differs from %i." % len_)
else:
len_ = len_ or 1
var = [var]*len_
return var
def find_idx_by_name(df, column, name):
idx = df.index[df[column] == name]
if len(idx) == 0:
raise UserWarning("In column '%s', there is no element named %s" % (column, name))
if len(idx) > 1:
raise UserWarning("In column '%s', multiple elements are named %s" % (column, name))
return idx[0]
def idx_in_2nd_array(arr1, arr2, match=True):
""" This function returns an array of indices of arr1 matching arr2.
arr1 may include duplicates. If an item of arr1 misses in arr2, 'match' decides whether
the idx of the nearest value is returned (False) or an error is raised (True).
"""
if match:
missings = list(set(arr1) - set(arr2))
if len(missings):
raise ValueError("These values misses in arr2: " + str(missings))
arr1_, uni_inverse = np.unique(arr1, return_inverse=True)
sort_lookup = np.argsort(arr2)
arr2_ = np.sort(arr2)
idx = np.searchsorted(arr2_, arr1_)
res = sort_lookup[idx][uni_inverse]
return res
def column_indices(df, query_cols):
""" returns an numpy array with the indices of the columns requested by 'query_cols'.
Works propperly for string column names. """
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols, query_cols, sorter=sidx)]
def merge_dataframes(dfs, keep="first", sort_index=True, sort_column=True, column_to_sort=None,
index_time_str=None, **kwargs):
"""
This is a wrapper function of pandas.concat(dfs, axis=0) to merge DataFrames.
INPUT:
**dfs** (DataFrames) - a sequence or mapping of DataFrames
OPTIONAL:
**keep** (str, "first") - Flag to decide which data are kept in case of duplicated
indices - first, last or all duplicated data.
**sort_index** (bool, True) - If True, the indices of the returning DataFrame will be
sorted. If False, the indices and columns will be in order of the original DataFrames.
**sort_column** (bool, True) - If True, the indices of the returning DataFrame will be
sorted. If False, the indices and columns will be in order of the original DataFrames.
**column_to_sort** (-, None) - If given, 'column_to_sort' must be a column name occuring in
both DataFrames. The returning DataFrame will be sorted by this column. The input
indices get lost.
**index_time_str** (str, None) - If given, the indices or the 'column_to_sort' if given will
be sorted in datetime order.
****kwargs** - Keyword arguments for pandas.concat() except axis, such as sort, join,
join_axes, ignore_index, keys. 'sort' can overwrite 'sort_index' and 'sort_column'.
"""
if "axis" in kwargs:
if kwargs["axis"] != 0:
logger.warning("'axis' is always assumed as zero.")
kwargs.pop("axis")
if "sort" in kwargs:
if not kwargs["sort"] == sort_index == sort_column:
sort_index = kwargs["sort"]
sort_column = kwargs["sort"]
if not sort_index or not sort_column:
logger.warning("'sort' overwrites 'sort_index' and 'sort_column'.")
kwargs.pop("sort")
# --- set index_column as index
if column_to_sort is not None:
if any([column_to_sort not in df.columns for df in dfs]):
raise KeyError("column_to_sort '%s' must be a column of " % column_to_sort +
"both dataframes, df1 and df2")
if not sort_index:
logger.warning("Since 'column_to_sort' is given, the returning DataFrame will be" +
"sorted by this column as well as the columns, although 'sort' " +
"was given as False.")
sort_index = True
dfs = [df.set_index(column_to_sort) for df in dfs]
# --- concat
df = pd.concat(dfs, axis=0, **kwargs)
# --- unsorted index and columns
output_index = df.index.drop_duplicates()
# --- drop rows with duplicated indices
if keep == "first":
df = df.groupby(df.index).first()
elif keep == "last":
df = df.groupby(df.index).last()
elif keep != "all":
raise ValueError("This value %s is unknown to 'keep'" % keep)
# --- sorted index and reindex columns
if sort_index:
if index_time_str:
dates = [dt.datetime.strptime(ts, index_time_str) for ts in df.index]
dates.sort()
output_index = [dt.datetime.strftime(ts, index_time_str) for ts in dates]
if keep == "all":
logger.warning("If 'index_time_str' is not None, keep cannot be 'all' but are " +
"assumed as 'first'.")
else:
output_index = sorted(df.index)
# --- reindex as required
if keep != "all":
if version.parse(pd.__version__) >= version.parse("0.21.0"):
df = df.reindex(output_index)
else:
df = df.reindex_axis(output_index)
if sort_column:
if version.parse(pd.__version__) >= version.parse("0.21.0"):
df = df.reindex(columns=sorted(df.columns))
else:
df = df.reindex_axis(sorted(df.columns), axis=1)
# --- get back column_to_sort as column from index
if column_to_sort is not None:
df.reset_index(inplace=True)
return df
def get_unique_duplicated_dict(df, subset=None, only_dupl_entries=False):
""" Returns a dict which keys are the indices of unique row of the dataframe 'df'. The values
of the dict are the indices which are duplicated to each key index.
This is a wrapper function of _get_unique_duplicated_dict() to consider only_dupl_entries.
"""
is_dupl = df.duplicated(subset=subset, keep=False)
uniq_dupl_dict = _get_unique_duplicated_dict(df[is_dupl], subset)
if not only_dupl_entries:
others = df.index[~is_dupl]
uniq_empties = {o: [] for o in others}
# python 3.5+
# uniq_dupl_dict = {**uniq_dupl_dict, **uniq_empties}
# python 3.4
for k, v in uniq_empties.items():
uniq_dupl_dict[k] = v
return uniq_dupl_dict
def _get_unique_duplicated_dict(df, subset=None):
""" Returns a dict which keys are the indices of unique row of the dataframe 'df'. The values
of the dict are the indices which are duplicated to each key index. """
subset = subset or df.columns
dupl = df.index[df.duplicated(subset=subset)]
uniq = df.index[~df.duplicated(subset=subset)]
uniq_dupl_dict = {}
# nan_str only needed since compare_arrays() using old numpy versions connected to python 3.4
# don't detect reliably nans as equal
nan_str = "nan"
while nan_str in df.values:
nan_str += "n"
for uni in uniq:
do_dupl_fit = compare_arrays(
np.repeat(df.loc[uni, subset].fillna(nan_str).values.reshape(1, -1), len(dupl), axis=0),
df.loc[dupl, subset].fillna(nan_str).values).all(axis=1)
uniq_dupl_dict[uni] = list(dupl[do_dupl_fit])
return uniq_dupl_dict
def reindex_dict_dataframes(dataframes_dict):
""" Set new continuous index starting at zero for every DataFrame in the dict. """
for key in dataframes_dict.keys():
if isinstance(dataframes_dict[key], pd.DataFrame) and key != "StudyCases":
dataframes_dict[key].index = list(range(dataframes_dict[key].shape[0]))
def ensure_full_column_data_existence(dict_, tablename, column):
"""
Ensures that the column of a dict's DataFrame is fully filled with information. If there are
missing data, it will be filled up by name tablename+index
"""
missing_data = dict_[tablename].index[dict_[tablename][column].isnull()]
# fill missing data by tablename+index, e.g. "Bus 2"
dict_[tablename][column].loc[missing_data] = [tablename + ' %s' % n for n in (
missing_data.values + 1)]
return dict_[tablename]
def avoid_duplicates_in_column(dict_, tablename, column):
""" Avoids duplicates in given column (as type string) of a dict's DataFrame """
query = dict_[tablename][column].duplicated(keep=False)
for double in dict_[tablename][column].loc[query].unique():
idx = dict_[tablename][column].index[dict_[tablename][column] == double]
dict_[tablename][column].loc[idx] = [double + " (%i)" % i for i in range(len(idx))]
if sum(dict_[tablename][column].duplicated()):
raise ValueError("The renaming by 'double + int' was not appropriate to remove all " +
"duplicates.")
def append_str_by_underline_count(str_series, append_only_duplicates=False, counting_start=1,
reserved_strings=None):
"""
Returns a Series of appended strings and a set of all strings which were appended or are set as
reserved by input.
INPUT:
**str_series** (Series with string values) - strings to be appended by "_" + a number
OPTIONAL:
**append_only_duplicates** (bool, False) - If True, all strings will be appended. If False,
only duplicated strings will be appended.
**counting_start** (int, 1) - Integer to start appending with
**reserved_strings** (iterable, None) - strings which are not allowed in str_series and must
be appended.
OUTPUT:
**appended_strings** (Series with string values) - appended strings
**reserved_strings** (set) - all reserved_strings from input and all strings which were
appended
"""
# --- initalizations
# ensure only unique values in reserved_strings:
reserved_strings = pd.Series(sorted(set(reserved_strings))) if reserved_strings is not None \
else
|
pd.Series()
|
pandas.Series
|
###################################################################
# <NAME> - drigols #
# Last update: 27/12/2021 #
###################################################################
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash import BashOperator
from airflow.models import Variable
from airflow import DAG
from datetime import datetime,date, timedelta
from io import BytesIO
from minio import Minio
from sqlalchemy.engine import create_engine
import pandas as pd
import math
DEFAULT_ARGS = {
'owner': 'Airflow',
'depends_on_past': False,
'start_date': datetime(2021, 1, 13),
}
dag = DAG(
'etl_work_accident_att',
default_args = DEFAULT_ARGS,
schedule_interval = "@once"
)
data_lake_server = Variable.get("data_lake_server")
data_lake_login = Variable.get("data_lake_login")
data_lake_password = Variable.get("data_lake_password")
database_server = Variable.get("database_server")
database_login = Variable.get("database_login")
database_password = Variable.get("database_password")
database_name = Variable.get("database_name")
url_connection = "mysql+pymysql://{}:{}@{}/{}".format(
str(database_login),
str(database_password),
str(database_server),
str(database_name)
)
engine = create_engine(url_connection)
client = Minio(
data_lake_server,
access_key = data_lake_login,
secret_key = data_lake_password,
secure = False
)
def extract():
# Obtendo toda a tabela employees e armazenando como um dataframe.
df_employees = pd.read_sql_table("employees", engine)
# Obtendo toda a tabela accident e armazenando como um dataframe.
df_accident = pd.read_sql_table("accident", engine)
# Verificando empregados que sofreram algum acidente.
work_accident = []
for emp in df_employees["emp_no"]:
if emp in df_accident["emp_no"].to_list():
work_accident.append(1)
else:
work_accident.append(0)
# Criando a estrutura do Dataframe temporário e atribuindo os dados.
df_ = pd.DataFrame(data=None, columns=["work_accident"])
df_["work_accident"] = work_accident
# Persiste os arquivos na área de Staging.
df_.to_csv(
"/tmp/work_accident.csv",
index = False
)
def load():
# Carrega os dados a partir da área de staging.
df_ =
|
pd.read_csv("/tmp/work_accident.csv")
|
pandas.read_csv
|
from pandas import DataFrame
import pandas as pd
from sklearn.cross_decomposition import PLSRegression, PLSCanonical
def pls_wrapper(pls):
class PLSPandasMixin(pls):
def fit(self, x, y):
self.x = x
self.y = y
return super().fit(x, y)
def transform(self, x, y):
#assert all(x.index == self.x.index) and all(y.index == self.y.index)
T, U = super().transform(x, y)
return DataFrame(T, index=x.index), DataFrame(U, index=y.index)
@property
def W(self):
return DataFrame(self.x_weights_, index=self.x.columns)
@property
def C(self):
return DataFrame(self.y_weights_, index=self.x.columns)
@property
def P(self):
return DataFrame(self.x_loadings_, index=self.x.columns)
@property
def Q(self):
return
|
DataFrame(self.y_loadings_, index=self.y.columns)
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
def plot_candle_chart(df, period=100, short_sma=50, long_sma=200):
df_plot = df[-period:]
bullish_df = df_plot[df_plot['Close'] > df_plot['Open']]
bearish_df = df_plot[df_plot['Close'] < df_plot['Open']]
plt.style.use('fivethirtyeight')
plt.figure(figsize=(20, 15))
plt.vlines(x=df_plot.index, ymin=df_plot['Low'], ymax=df_plot['High'], color='black', linewidth=1.5)
plt.vlines(x=bullish_df.index, ymin=bullish_df['Open'], ymax=bullish_df['Close'], color='green', linewidth=4)
plt.vlines(x=bearish_df.index, ymin=bearish_df['Close'], ymax=bearish_df['Open'], color='red', linewidth=4)
sma =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Quant, financial and econometrics helpers
- linear algebra, stationarity, robust covariances
- maturity, bootstrap, annuity, compounding, rate of discount and interest
- value at risk, duration, half-life
Author: <NAME>
License: MIT
"""
import numpy as np
import re
from pandas import DataFrame, Series
from statsmodels.tsa.stattools import adfuller
from pandas.api import types
try:
from settings import ECHO
except:
ECHO = False
def least_squares(data=None, y='y', x='x', add_constant=True, stdres=False):
"""Compute least squares fitted coefficients: helper for groupby apply
Parameters
----------
data : DataFrame, default is None
supplies dependent and regressor variables, useful for .groupby
y : str or DataFrame/Series, default is 'y'
column names of dependent variables, or DataFrame if data is None
x : str or DataFrame/Series, default is 'x'
column names of independent variables, or DataFrame if data is None
add_constant : bool, default is True
if True, then hstack 'Intercept' column of ones before x variables
stdres : bool, default is False
if True, then also return estimated residual std dev
Returns
-------
coef : Series or DataFrame
DataFrame (Series) of fitted coefs when y is multi- (one-) dimensional
Examples
--------
fm = data.groupby(by='year').apply(least_squares, y='y', x='x')
"""
if data is None:
X = x.to_numpy()
Y = DataFrame(y).to_numpy()
x = list(x.columns)
y = list(y.columns)
else:
x = [x] if isinstance(x, str) else list(x)
y = [y] if isinstance(y, str) else list(y)
X = data[x].to_numpy()
Y = data[y].to_numpy()
if add_constant:
X = np.hstack([np.ones((X.shape[0], 1)), X])
x = ['Intercept'] + x
b = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, Y)).T
if stdres:
b = np.hstack([b, np.std(Y-(X @ b.T), axis=0).reshape(-1,1)])
x = x + ['stdres']
return (Series(b[0], index=x) if len(b)==1 else
|
DataFrame(b, columns=x, index=y)
|
pandas.DataFrame
|
import csv
import pandas as pd
import math
import multiprocessing
import os
import shutil
import time
import uuid
from definitions import ROOT_DIR
from multiprocessing import Pool, Manager, Process
from tqdm import tqdm
def write_to_csv(q, csv_file_name, headers, buffer_size=500):
# Create the output files
csv_file = open(f"tmp/{csv_file_name}/{uuid.uuid4()}.csv", "w")
csv_writer = csv.writer(csv_file)
# Write headers
csv_writer.writerow(headers)
output = []
while True:
message = q.get()
if message is None:
if len(output) > 0:
csv_writer.writerows(output)
break
output.append(message)
if len(output) >= buffer_size:
csv_writer.writerows(output)
output = []
class ReportRunner:
def __init__(self, config):
self.turbo_mode = config.turbo_mode
self.preprocessed_content_store_path = config.preprocessed_content_store_path
self.html_content_dir_path = config.html_content_dir_path
self.content_item_batch_size = config.content_item_batch_size
self.csv_writer_buffer_size = config.csv_writer_buffer_size
self.total_content_items = config.total_content_items
self.manager = Manager()
def run(self, report_generators):
print(f"Reading {self.total_content_items} content items from the preprocessed content store...")
preprocessed_content_items = pd.read_csv(self.preprocessed_content_store_path, sep="\t", compression="gzip",
low_memory=False, chunksize=self.total_content_items
)
print("Finished reading from the preprocessed content store!")
preprocessed_content_items = next(preprocessed_content_items)
total_content_items = len(preprocessed_content_items)
print(f"Content item length: {total_content_items}")
num_work, chunksize = self.get_options_for_multiprocessing(total_content_items)
report_generators_with_queues = self.create_report_queues_by_generator(report_generators)
report_writer_processes = self.initialize_report_writers(report_generators_with_queues, num_work)
required_iterations = self.get_iterations_for_batch_size(total_content_items, self.content_item_batch_size)
content_items_iterator = preprocessed_content_items.iterrows()
for iteration in range(0, required_iterations):
print(f"Starting batch {iteration + 1}")
start_time = time.time()
content_item_tuples = self.create_batched_input_for_multiprocessing(content_items_iterator,
report_generators_with_queues,
total_content_items)
print(f"Created batch of {len(content_item_tuples)} tuples")
with Pool(num_work) as pool:
pool.starmap(self.multiprocess_content_items,
[content_item_tuple for content_item_tuple in tqdm(content_item_tuples)],
chunksize=chunksize)
pool.close()
pool.join()
elapsed_time_in_seconds = time.time() - start_time
print(f"Took {elapsed_time_in_seconds}s to process batch {iteration + 1}")
self.finalize_queues_for_report_writers(report_generators_with_queues.values(), num_work)
self.wait_for_report_writers_processes_to_terminate(report_writer_processes)
self.create_reports_from_temporary_files(report_generators)
def create_batched_input_for_multiprocessing(self, content_items_iterator, report_generators_with_queues,
total_content_items):
tuples = []
end_content_item_index = total_content_items - 1
for i in range(0, self.content_item_batch_size):
preprocessed_content_item_tuple = next(content_items_iterator)
tuples.append(
(preprocessed_content_item_tuple[1], self.html_content_dir_path, report_generators_with_queues))
if preprocessed_content_item_tuple[0] == end_content_item_index:
print(f"Reached end of the input file at index {end_content_item_index}")
break
return tuples
def create_report_queues_by_generator(self, report_generators):
queues_by_generator = {}
for generator in report_generators:
report_queue = self.manager.Queue()
queues_by_generator[generator] = report_queue
return queues_by_generator
def initialize_report_writers(self, report_queues_by_generator, number_of_workers_per_report):
report_writer_processes = []
# Create temporary dir for partial CSVs
os.mkdir(os.path.join(ROOT_DIR, 'tmp'))
for generator, queue in report_queues_by_generator.items():
os.mkdir(os.path.join(ROOT_DIR, f"tmp/{generator.filename}"))
# Create a csv writer process for each of the report workers we'll be using for this report
for i in range(number_of_workers_per_report):
report_writer_processes.append(self.initialize_writer_process(write_to_csv, queue, generator.filename,
generator.headers))
return report_writer_processes
def get_options_for_multiprocessing(self, total_content_items):
worker_multiplier = 8 if self.turbo_mode else 0.8
num_work = int(math.ceil(multiprocessing.cpu_count() * worker_multiplier)) # * 8
chunksize, remainder = divmod(total_content_items, num_work)
if remainder:
chunksize += 1
return num_work, chunksize
@staticmethod
def create_reports_from_temporary_files(report_generators):
for report_generator in report_generators:
temporary_dir = os.path.join(ROOT_DIR, f"tmp/{report_generator.filename}")
output_path = os.path.join(ROOT_DIR, f"data/{report_generator.filename}")
csv_dataframes = [pd.read_csv(os.path.join(temporary_dir, temporary_csv))
for temporary_csv in os.listdir(temporary_dir)]
|
pd.concat(csv_dataframes)
|
pandas.concat
|
""" @file ploy.py
"""
import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from mpl_toolkits import mplot3d
from matplotlib.collections import PatchCollection
import matplotlib.ticker as ticker
import scipy.stats as sts
from . import post_process
# # TeX fonts
# import matplotlib
# matplotlib.rcParams['mathtext.fontset'] = 'custom'
# matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
# matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
# matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
# # matplotlib.pyplot.title(r'ABC123 vs $\mathrm{ABC123}^{123}$')
# matplotlib.rcParams['mathtext.fontset'] = 'stix'
# matplotlib.rcParams['font.family'] = 'STIXGeneral'
# # matplotlib.pyplot.title(r'ABC123 vs $\mathrm{ABC123}^{123}$')
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif')
# rc('font', size=14)
# rc('legend', fontsize=13)
# rc('text.latex', preamble=r'\usepackage{cmbright}')
def plot_costs(unpacked):
""" Plots costs
"""
linewidth = 4
labelsize = 40
fontsize = 40
fig, axs = plt.subplots(1,1)
axs.set_xlabel('Time (s)', fontsize=fontsize)
# axs.set_ylabel('Cost', fontsize=fontsize)
axs.set_ylabel('Normalized Cost', fontsize=fontsize)
# axs.set_title('Cost VS Time')
for sim_name, metrics in unpacked.items():
tout = metrics['tout']
yout = metrics['yout']
final_cost = metrics['final_cost']
cost_to_go = metrics['cost_to_go']
optimal_cost = metrics['optimal_cost']
summed_opt_cost = np.sum(optimal_cost[0, :])
label = sim_name.split('Assignment', 1)[1]
### cost plots
if sim_name == 'AssignmentCustom':
# axs.plot(tout, summed_opt_cost*np.ones((yout.shape[0])), '--k', label='Optimal cost with no switching')
# axs.plot(tout, np.sum(final_cost, axis=1), '--c', label='Cum. Stage Cost'+' '+sim_name)
# axs.plot(tout, np.sum(cost_to_go, axis=1), '--r', label='Cost-to-go'+' '+sim_name)
# normalized costs
axs.plot(tout, np.ones((yout.shape[0])), '--k', linewidth=linewidth, label='Optimal cost')
axs.plot(tout, np.sum(final_cost, axis=1)/summed_opt_cost, '--c', linewidth=linewidth, label='Cum. Stage Cost'+' '+label)
axs.plot(tout, np.sum(cost_to_go, axis=1)/summed_opt_cost, '--r', linewidth=linewidth, label='Cost-to-go'+' '+label)
else:
# axs.plot(tout, np.sum(final_cost, axis=1), '-c', label='Cum. Stage Cost'+' '+sim_name)
## axs.plot(tout, np.sum(cost_to_go, axis=1), '-r', label='Cost-to-go'+' '+sim_name)
# normalized costs
axs.plot(tout, np.sum(final_cost, axis=1)/summed_opt_cost, '-c', linewidth=linewidth, label='Cum. Stage Cost'+' '+label)
axs.xaxis.set_tick_params(labelsize=labelsize)
axs.yaxis.set_tick_params(labelsize=labelsize)
# reorder the legend terms
handles, labels = axs.get_legend_handles_labels()
#TODO hardcoded - fix
try:
labels = [labels[1], labels[0], labels[2], labels[3]]
handles = [handles[1], handles[0], handles[2], handles[3]]
except IndexError:
# # DYN
# labels = [labels[1], labels[0]]
# handles = [handles[1], handles[0]]
labels = [labels[1], labels[0]]
handles = [handles[1], handles[0]]
axs.legend(handles, labels, loc='center right', bbox_to_anchor=(1.0, 0.25), fontsize=fontsize)
# Agent-by-agent cost plots on 1 figure
# plt.figure()
# for sim_name, metrics in unpacked.items():
# nagents = metrics['nagents']
# tout = metrics['tout']
# final_cost = metrics['final_cost']
# cost_to_go = metrics['cost_to_go']
# for zz in range(nagents):
# plt.plot(tout, final_cost[:, zz], '-.c', label='Cum. Stage Cost ({0})'.format(zz))
# plt.plot(tout, cost_to_go[:, zz], '-.r', label='Cost-to-go (assuming no switch) ({0})'.format(zz))
# plt.legend()
def plot_cost_histogram(unpacked_ensemble_metric):
""" Plots histogram of costs
"""
fontsize = 32
labelsize = 32
labels = ['Dyn', 'EMD']
fig, axs = plt.subplots(1,1)
axs.set_xlabel('Control Expenditure Difference (EMD - Dyn)/Dyn', fontsize=fontsize)
axs.set_ylabel('Frequency', fontsize=fontsize)
axs.hist(unpacked_ensemble_metric, histtype='bar', stacked=True, bins=10, align='left', label=labels)
axs.xaxis.set_tick_params(labelsize=labelsize)
axs.yaxis.set_tick_params(labelsize=labelsize)
axs.xaxis.offsetText.set_fontsize(fontsize)
axs.legend(fontsize=fontsize)
# TODO move to a different file
def atoi(text):
return int(text) if text.isdigit() else text
# TODO move to a different file
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
def plot_ensemble_cost_histogram(metrics_to_compare):
""" Plots histogram of agent swarm LQ costs for multiple ensembles
"""
fontsize = 40
labelsize = 40
fig, axs = plt.subplots(1,1)
axs.set_xlabel('Control Expenditure Difference (EMD - Dyn)/Dyn', fontsize=fontsize)
axs.set_ylabel('Frequency', fontsize=fontsize)
# Using DataFrames
labels = []
for ensemble_name in metrics_to_compare.keys():
labels.append(re.search('\d+v\d+', ensemble_name).group())
metrics_df = pd.DataFrame.from_dict(metrics_to_compare)
metrics_df.columns = labels
# order data by number of agents
labels.sort(key=natural_keys)
metrics_df = metrics_df[labels]
for i, (label, data) in enumerate(metrics_df.iteritems()):
nbins = int(len(data)/4)
data.hist(ax=axs, bins=nbins, align='left', edgecolor='k', alpha=0.5, label=label)
# data.plot.kde(ax=axs)
axs.grid(False)
axs.xaxis.set_tick_params(labelsize=labelsize)
axs.yaxis.set_tick_params(labelsize=labelsize)
axs.xaxis.offsetText.set_fontsize(fontsize)
axs.legend(fontsize=fontsize)
def plot_assignments(unpacked):
""" Plots assignments
"""
for sim_name, metrics in unpacked.items():
dx = metrics['dx']
nagents = metrics['nagents']
ntargets = metrics['ntargets']
tout = metrics['tout']
yout = metrics['yout']
assignments = yout[:, nagents*2*dx:].astype(np.int32)
assignment_switches = post_process.find_switches(tout, yout, nagents, nagents, dx, dx)
# recreate assignments per switch
asst_switch_indices = set()
asst_switch_indices.add(0) # add the origin assignment
for ii in range(nagents):
switch_indices = assignment_switches[ii]
for ind in switch_indices:
asst_switch_indices.add(ind)
# order the switch time
asst_switch_indices = sorted(asst_switch_indices) # becomes ordered list
# get assignment switches in increasing time order
asst_to_plot = np.zeros((len(asst_switch_indices), nagents)) # (starting assignment + switches)
asst_to_plot[0, :] = assignments[0, :]
for tt, ind in enumerate(asst_switch_indices):
asst_to_plot[tt, :] = assignments[ind, :]
# PLOT TOO BUSY, deprecate
plt.figure()
# plt.title("Agent-Target Assignments")
plt.xlabel('time (s)')
plt.ylabel('Assigned-to Target')
for ii in range(nagents):
plt.plot(tout, assignments[:, ii], '-', label='A{0}'.format(ii))
plt.legend()
# TEST
fig = plt.figure()
ax = plt.axes(projection='3d')
# fig, ax = plt.subplots()
ax.set_title(sim_name)
asst_array = np.zeros((nagents, tout.shape[0], ntargets)) # want to show propogation of assignment over time in y-axis
# construct assignment array
for tt in range(tout.shape[0]):
time = tout[tt]
for ii in range(nagents): # iterate consecutively through agents
# ax.plot3D(agent_i, tout, target_j, '-r', label=agent_traj_label)
jj = assignments[tt, ii]
asst_array[ii, tt, jj] = 1
# change color and marker if there's a switch
# # stack plots on top of each other
# agents = np.arange(nagents)
# for asst_num, (switch_ind, assignment) in enumerate(zip(asst_switch_indices, asst_to_plot)):
# assigned_to_targets = assignment
# # ax.plot(agents, assigned_to_targets, marker='s', label='Assignment{0}'.format(asst_num))
# ax.plot(agents, assigned_to_targets, label='Assignment{0}'.format(asst_num))
# # if sim_name != 'AssignmentCustom':
# # ax.fill_between(agents, assigned_to_targets, asst_to_plot[1], color='blue')
# ax.set_xlabel('agents')
# ax.set_ylabel('targets')
# ax.legend()
# plot 2d assignment plots in 3d at correct time step
cumulative_asst_label = 'Cumulative Assignment Projection'
agents = np.arange(nagents)
for asst_num, (switch_ind, assignment) in enumerate(zip(asst_switch_indices, asst_to_plot)):
switch_time = tout[switch_ind]
assigned_to_targets = assignment
if asst_num >= 1:
cumulative_asst_label = '__nolabel__'
ax.plot(agents, assigned_to_targets, tout[-1], zdir='y', color='blue', label=cumulative_asst_label)
color = next(ax._get_lines.prop_cycler)['color']
ax.plot(agents, assigned_to_targets, switch_time, '-s', color=color, zdir='y', label='Assignment{0}'.format(asst_num))
ax.scatter(agents, assigned_to_targets, tout[-1], color=color, zdir='y')
ax.add_collection3d(plt.fill_between(agents, assigned_to_targets, asst_to_plot[0], color='blue'), zs=tout[-1], zdir='y')
ax.set_xlabel('agents')
ax.set_ylabel('time (s)')
ax.set_zlabel('targets')
ax.legend()
ax.set_ylim3d(0, tout[-1])
ax.xaxis.set_ticks(np.arange(nagents))
ax.zaxis.set_ticks(np.arange(ntargets))
def plot_ensemble_switch_histogram(metrics_to_compare):
""" Plots histogram of assignment switches for multiple ensembles
"""
fontsize = 40
labelsize = 40
fig, axs = plt.subplots(1,1)
axs.set_xlabel('Assignment Switches', fontsize=fontsize)
axs.set_ylabel('Frequency', fontsize=fontsize)
# Using DataFrames
labels = []
for ensemble_name in metrics_to_compare.keys():
labels.append(re.search('\d+v\d+', ensemble_name).group())
metrics_df = pd.DataFrame.from_dict(metrics_to_compare)
metrics_df.columns = labels
# order data by number of agents
labels.sort(key=natural_keys)
metrics_df = metrics_df[labels]
for i, (label, data) in enumerate(metrics_df.iteritems()):
nbins = int(len(data)/4)
data.hist(ax=axs, bins=nbins, align='left', edgecolor='k', alpha=0.5, label=label)
# data.plot.kde(ax=axs)
axs.grid(False)
axs.xaxis.set_tick_params(labelsize=labelsize)
axs.yaxis.set_tick_params(labelsize=labelsize)
tick_spacing = 1
axs.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
axs.xaxis.offsetText.set_fontsize(fontsize)
axs.legend(fontsize=fontsize)
def plot_ensemble_avg_switch(metrics_to_compare):
""" Plots average number of assignment switches over time for multiple ensembles
"""
fontsize = 40
labelsize = 40
fig, axs = plt.subplots(1,1)
axs.set_xlabel('Agents', fontsize=fontsize)
axs.set_ylabel('Average \# Assign. Switches', fontsize=fontsize)
# Using DataFrames
labels = []
for ensemble_name in metrics_to_compare.keys():
labels.append(re.search('\d+v\d+', ensemble_name).group())
metrics_df = pd.DataFrame(metrics_to_compare, index=[0])
metrics_df.columns = labels
# order data by number of agents
labels.sort(key=natural_keys)
metrics_df = metrics_df[labels]
metrics = {'Ensemble': labels, 'Average Assignment Switches': metrics_df.values.tolist()[0]}
metrics_df =
|
pd.DataFrame(metrics)
|
pandas.DataFrame
|
# coding: utf-8
# # Windows 10 Coin
#
# train: (row: 1,347,190, columns: 1,085)
# test: (row: 374,136, columns: 1,084)
#
# y value: if HasClicked == True, app 1.8%
#
# How to run
# 1. Put the train and test files in ..\input
# 2. Put the script file in ..\script
# 3. In Jupyter Notebook, run all and get submission file in the same script folder
# In[1]:
# Timer and file info
import math
import time
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc # We're gonna be clearing memory a lot
import matplotlib.pyplot as plt
import seaborn as sns
import random
import lightgbm as lgb
import hashlib
#from ml_metrics import mapk
from datetime import datetime
import re
import csv
#import pickle
#from sklearn.tree import DecisionTreeClassifier
#from sklearn.ensemble import ExtraTreesClassifier
#from sklearn.ensemble import RandomForestClassifier
from sklearn import ensemble
from sklearn import model_selection
from sklearn.metrics import matthews_corrcoef, f1_score, classification_report, confusion_matrix, precision_score, recall_score
# Timer
class Timer:
def __init__(self, text=None):
self.text = text
def __enter__(self):
self.cpu = time.clock()
self.time = time.time()
if self.text:
print("{}...".format(self.text))
print(datetime.now())
return self
def __exit__(self, *args):
self.cpu = time.clock() - self.cpu
self.time = time.time() - self.time
if self.text:
print("%s: cpu %0.2f, time %0.2f\n" % (self.text, self.cpu, self.time))
# Split to train and holdout sets with counts
def sample_train_holdout(_df, sample_count, holdout_count):
random.seed(7)
sample_RowNumber = random.sample(list(_df['RowNumber']), (sample_count + holdout_count))
train_RowNumber = random.sample(sample_RowNumber, sample_count)
holdout_RowNumber = list(set(sample_RowNumber) - set(train_RowNumber))
holdout = _df[_df['RowNumber'].isin(holdout_RowNumber)].copy()
_df = _df[_df['RowNumber'].isin(train_RowNumber)]
return _df, holdout
# Sampling for train and holdout with imbalanced binary label
def trainHoldoutSampling(_df, _id, _label, _seed=7, t_tr=0.5, t_ho=0.5, f_tr=0.05, f_ho=0.5):
random.seed(_seed)
positive_id = list(_df[_df[_label]==True][_id].values)
negative_id = list(_df[_df[_label]==False][_id].values)
train_positive_id = random.sample(positive_id, int(len(positive_id) * t_tr))
holdout_positive_id = random.sample(list(set(positive_id)-set(train_positive_id)), int(len(positive_id) * t_ho))
train_negative_id = random.sample(negative_id, int(len(negative_id) * f_tr))
holdout_negative_id = random.sample(list(set(negative_id)-set(train_negative_id)), int(len(negative_id) * f_ho))
train_id = list(set(train_positive_id)|set(train_negative_id))
holdout_id = list(set(holdout_positive_id)|set(holdout_negative_id))
print('train count: {}, train positive count: {}'.format(len(train_id),len(train_positive_id)))
print('holdout count: {}, holdout positive count: {}'.format(len(holdout_id),len(holdout_positive_id)))
return _df[_df[_id].isin(train_id)], _df[_df[_id].isin(holdout_id)]
def datetime_features2(_df, _col):
_format='%m/%d/%Y %I:%M:%S %p'
_df[_col] = _df[_col].apply(lambda x: datetime.strptime(x, _format))
colYear = _col+'Year'
colMonth = _col+'Month'
colDay = _col+'Day'
colHour = _col+'Hour'
#colYearMonthDay = _col+'YearMonthDay'
#colYearMonthDayHour = _col+'YearMonthDayHour'
_df[colYear] = _df[_col].apply(lambda x: x.year)
_df[colMonth] = _df[_col].apply(lambda x: x.month)
_df[colDay] = _df[_col].apply(lambda x: x.day)
_df[colHour] = _df[_col].apply(lambda x: x.hour)
#ymd = [colYear, colMonth, colDay]
#ymdh = [colYear, colMonth, colDay, colHour]
#_df[colYearMonthDay] = _df[ymd].apply(lambda x: '_'.join(str(x)), axis=1)
#_df[colYearMonthDayHour] = _df[ymdh].apply(lambda x: '_'.join(str(x)), axis=1)
return _df
# Change date column datetime type and add date time features
def datetime_features(_df, _col, isDelete = False):
# 1. For years greater than 2017, create year folder with regex and change year to 2017 in datetime column
# find and return 4 digit number (1st finding) in dataframe string columns
year_col = _col + 'Year'
_df[year_col] = _df[_col].apply(lambda x: int(re.findall(r"\D(\d{4})\D", " "+ str(x) +" ")[0]))
years = sorted(list(_df[year_col].unique()))
yearsGreaterThan2017 = sorted(i for i in years if i > 2017)
# Two ways for strange year data (1) change it to 2017 temporarily (2) remove from data; we will go with (1)
# because we cannot remove test rows anyway
if isDelete:
_df = _df[~_df[year_col].isin(yearsGreaterThan2017)]
else:
for i in yearsGreaterThan2017:
print("replace ", i, " to 2017 for conversion")
_df.loc[_df[year_col] == i, _col] = _df[_df[year_col] == i][_col].values[0].replace(str(i), "2017")
# How to remove strange year rows
# train = train[~train['year'].isin(yearsGreaterThan2017)]
# 2. Convert string to datetime
_df[_col] = pd.to_datetime(_df[_col])
print(_col, "column conversion to datetime type is done")
# 3. Add more date time features
month_col = _col + 'Month'
week_col = _col + 'Week'
weekday_col = _col + 'Weekday'
day_col = _col + 'Day'
hour_col = _col + 'Hour'
#year_month_day_col = _col + 'YearMonthDay'
#year_month_day_hour_col = _col + 'YearMonthDayHour'
_df[month_col] = pd.DatetimeIndex(_df[_col]).month
_df[week_col] = pd.DatetimeIndex(_df[_col]).week
_df[weekday_col] = pd.DatetimeIndex(_df[_col]).weekday
_df[day_col] = pd.DatetimeIndex(_df[_col]).day
_df[hour_col] = pd.DatetimeIndex(_df[_col]).hour
#_df[year_month_day_col] = _df[[year_col, month_col, day_col]].apply(lambda x: ''.join(str(x)), axis=1)
#_df[year_month_day_hour_col] = _df[[year_col, month_col, day_col, hour_col]].apply(lambda x: ''.join(str(x)), axis=1)
print("year, month, week, weekday, day, hour features are added")
return _df
# Delete rows with list condition for dataframe
def delRows(_df, _col, _list):
_df = _df[~_df[_col].isin(_list)]
return _df
import re
# Create new column using regex pattern for strings for dataframe
def addFeatureRegex(_df, _col, _newCol):
_df[_newCol] = _df[_col].apply(lambda x: int(re.findall(r"\D(\d{4})\D", " "+ str(x) +" ")[0]))
return _df
# Convert string to datetime type
def stringToDatetime(_df, _col):
_df[_col] = _df[_col].astype('datetime64[ns]')
return _df
# Add features from datetime
def addDatetimeFeatures(_df, _col):
_df[_col + 'Year'] = pd.DatetimeIndex(_df[_col]).year
_df[_col + 'Month'] = pd.DatetimeIndex(_df[_col]).month
_df[_col + 'Week'] = pd.DatetimeIndex(_df[_col]).week
_df[_col + 'Weekday'] = pd.DatetimeIndex(_df[_col]).weekday
_df[_col + 'Day'] = pd.DatetimeIndex(_df[_col]).day
_df[_col + 'Hour'] = pd.DatetimeIndex(_df[_col]).hour
return _df
# Get categorical column names
def categoricalColumns(_df):
cat_columns = _df.select_dtypes(['object']).columns
print("Categorical column count:", len(cat_columns))
print("First 5 values:", cat_columns[:5])
return cat_columns
# Get column names starting with
def columnsStartingWith(_df, _str):
sorted_list = sorted(i for i in list(_df) if i.startswith(_str))
print("Column count:", len(sorted_list))
print("First 5 values:", sorted_list[:5])
return sorted_list
# Get column names ending with
def columnsEndingWith(_df, _str):
sorted_list = sorted(i for i in list(_df) if i.endswith(_str))
print("Column count:", len(sorted_list))
print("First 5 values:", sorted_list[:5])
return sorted_list
# Get constant columns
def constantColumns(_df):
constant_list = []
cols = list(_df) # same as _df.columns.values
for col in cols:
if len(_df[col].unique()) == 1:
constant_list.append(col)
print("Constant column count:", len(constant_list))
print("First 5 values:", constant_list[:5])
return constant_list
# Add null columns
def makeNullColumns(_df, _cols):
null_df = _df[_cols].isnull()
null_df.columns = null_df.columns + 'Null'
_df = pd.concat([_df, null_df], axis=1)
return _df
# Union
def union(a, b):
return list(set(a)|set(b))
def unique(a):
return list(set(a))
# undersampling - sample rate 0.8 for 80% samling using isUndersampled column
def underSampling(_df, _sample_rate):
_df['isUnderSampled'] = 1
_rand_num = 1/(1-_sample_rate)
underSample = np.random.randint(_rand_num, size=len(_df[_df['HasClicked'] == 0]))
_df.loc[_df['HasClicked'] == 0, 'isUnderSampled'] = underSample>0
return _df
# Add column with value count
def valueCountColumn(_df, _col):
_dict = dict([(i, a) for i, a in zip(_df[_col].value_counts().index, _df[_col].value_counts().values)])
_df[_col+'ValueCount'] = _df[_col].apply(lambda x: _dict[x])
return _df
# Add column with bool values to check if keyword is contained or not
def containColumn(_df, _col, _str):
_df[_col+'Cotains'+_str] = _df[_col].str.contains(_str)
return _df
# Feature engineering
def feature_engineering(_df):
print("shape:", _df.shape)
print("Add datetime features...")
datetime_columns = ['BubbleShownTime', 'FirstUpdatedDate', 'OSOOBEDateTime']
for col in datetime_columns:
print(col)
if _df[col].isnull().sum() > 0:
_df[col] = _df[col].fillna('1/1/2017 11:11:11 AM')
_df = datetime_features2(_df, col)
print("shape:", _df.shape)
gc.collect()
# Null count
print("Missing value count...")
_df['CntNs'] = _df.isnull().sum(axis=1)
cols = ['AppCategoryNMinus1', 'AppCategoryNMinus2', 'AppCategoryNMinus3', 'AppCategoryNMinus4', 'AppCategoryNMinus5',
'AppCategoryNMinus6', 'AppCategoryNMinus7', 'AppCategoryNMinus8']
_df['AppCatCntNs'] = _df[cols].isnull().sum(axis=1)
#_df[cols] = _df[cols].fillna("NA")
#for col in cols:
# print(col)
# _df[col+'HighLevel'] = _df[col].apply(lambda x: str(x).split(':')[0])
# Game segment parse with '.'
# to-do: 2nd and 3rd parsed values to add as features later, some exception handling is needed
print("Gamer segment parsing...")
_df['GamerSegment1'] = _df['GamerSegment'].apply(lambda x: str(x).split('.')[0] if str(x).split('.') else 'Unknown')
# Check creativeName contains keyword or not
keywords = ['SL', 'TS', 'Week7', 'Meet', 'Skype', 'Battery', 'Switch', 'Performance', 'Security', 'Surge']
for keyword in keywords:
_df = containColumn(_df, 'creativeName', keyword)
#_df['week7'] = _df['Week7'].values + _df['Week 7'].values
#_df.drop(['Week7', 'Week 7'], axis = 1, inplace = True)
# Convert categorical columns to numeric
print("Convert categorical columns to numeric...")
cat_columns = _df.select_dtypes(['object']).columns
for cat_column in cat_columns:
print(cat_column)
if cat_column == 'creativeName':
_df['creativeNameTest'] = _df['creativeName'].values
#_df[cat_column] = _df[cat_column].apply(lambda x: abs(hash(x)) )
_df[cat_column]=_df[cat_column].apply(lambda x: int(hashlib.sha1(str(x).encode('utf-8')).hexdigest(), 16) % (10 ** 16))
gc.collect()
# Replace missing values with -1
print("Replace missing values with -1")
_df = _df.fillna(-1)
# Value count
print("Value count...")
cols = ['UniqueUserDeviceKey', 'CampaignId']
for col in cols:
print(col)
_df = valueCountColumn(_df, col)
return _df
# Get best threshold value for F1 score
def f1_best_threshold(_actual, _pred):
thresholds = np.linspace(0.01, 0.5, 1000)
fc = np.array([f1_score(_actual, _pred>thr) for thr in thresholds])
plt.plot(thresholds, fc)
best_threshold = thresholds[fc.argmax()]
print('f1 score:', fc.max())
print('best threshold:', best_threshold)
print('TF pred mean:', (_pred>best_threshold).mean())
return best_threshold
# In[7]:
# Read tsv file
test2 =
|
pd.read_csv('CoinMlCompetitionSoftlandingEvaluateNoLabel.tsv', sep='\t', header = None)
|
pandas.read_csv
|
# python-3
# Author name: <NAME> (<EMAIL>)
# Creation date: December 01, 2016
# This script contains method to generate all beauty related features from images.
# Very expensive when run serially.
from skimage import io, color, feature, transform
import numpy as np, pandas as pd
import time, json, argparse, math, os
from datetime import datetime
from sklearn.metrics.cluster import entropy
final_ftr_obj_global = {}
def calc_contrast(r, g, b):
# y is the luminance
y = 0.299 * r + 0.587 * g + 0.114 * b
return (np.max(y) - np.min(y))/np.mean(y)
def get_spat_arrng_ftrs(gray_img):
# resize img to 600 * 600
resized_img = transform.resize(gray_img, (600,600))
left = resized_img.transpose()[:300].transpose()
right = resized_img.transpose()[300:].transpose()
I_anti = np.identity(600)[::-1] # anti - diagonal identity matrix
inner = feature.hog(left) - feature.hog(I_anti.dot(right))
return dict(symmetry = np.linalg.norm(inner))
def calc_color_ftrs(h, s, v):
v_mean = np.mean(v)
s_mean = np.mean(s)
# emotional features
pleasure = 0.69 * v_mean + 0.22 * s_mean
arousal = -0.31 * v_mean + 0.60 * s_mean
dominance = 0.76 * v_mean + 0.32 * s_mean
# HSV-itten color histogram features
counts_hue, _ = np.histogram(h, bins = 12) # 12 bins of hue
counts_saturation, _ = np.histogram(s, bins = 5) # 5 bins of saturation
counts_brightness, _ = np.histogram(v, bins = 3) # 3 bins of brightness (v)
return dict(pleasure = pleasure, arousal = arousal, dominance = dominance,
hsv_itten_std_h = np.std(counts_hue), hsv_itten_std_s = np.std(counts_saturation), hsv_itten_std_v = np.std(counts_brightness))
def get_arr(imgObj):
first = np.array([pix[0] for row in imgObj for pix in row])
second = np.array([pix[1] for row in imgObj for pix in row])
third = np.array([pix[2] for row in imgObj for pix in row])
return (first, second, third)
# logic to resize the image without affecting the aspect ratio
def resize_img(imgObj, base_width=600):
if len(imgObj[0]) > 600:
newHeight = int(len(imgObj) * base_width / len(imgObj[0]))
return transform.resize(imgObj, (newHeight,base_width))
else:
return imgObj
def extr_beauty_ftrs(imgFlNm):
img = os.path.basename(imgFlNm)
print("Extracting beauty features for %s" %imgFlNm)
try:
rgbImg = resize_img(io.imread(imgFlNm))
except Exception as e:
print("Invalid image")
return e
if len(rgbImg.shape) != 3 or rgbImg.shape[2] !=3:
print("Invalid image.. Continuing..")
final_ftr_obj_global[img] = None
return None
hsvImg = color.rgb2hsv(rgbImg)
grayImg = color.rgb2gray(rgbImg)
red, green, blue = get_arr(rgbImg)
hue, saturation, value = get_arr(hsvImg)
contrast = calc_contrast(red, green, blue)
ftrs = calc_color_ftrs(hue, saturation, value)
ftrs['contrast'] = contrast
ftrs['entropy'] = entropy(grayImg) # added to include entropy of the given image: more details: http://stackoverflow.com/a/42059758/5759063
ftrs.update(get_spat_arrng_ftrs(grayImg))
final_ftr_obj_global[img] = ftrs
return final_ftr_obj_global
def createFtrFile(result_file, exif_file, out_fl):
with open(exif_file,"r") as inpJsonFl:
exifJsonObj = json.load(inpJsonFl)
resultsDf = pd.DataFrame.from_csv(result_file)
resultsDf = pd.DataFrame(resultsDf['Proportion'])
resultsDict = resultsDf.to_dict(orient="index")
expt2Results = {}
for gid in resultsDict:
expt2Results[str(gid)] = exifJsonObj[str(gid)]
expt2Results[str(gid)].update(resultsDict[gid])
expt2ResultsDf =
|
pd.DataFrame(expt2Results)
|
pandas.DataFrame
|
__author__ = '<NAME>, SRL'
from flask import Flask, send_file
import plotly
import plotly.graph_objects as go
import dash
import dash_table
from dash_table.Format import Format, Scheme
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import numpy as np
from dash.dependencies import Input, Output, State
import json
import requests
from urllib.parse import urlparse, parse_qs
import pandas as pd
from datetime import datetime
from io import BytesIO
import jwt
from typing import List, Tuple
class Crypt:
def __init__(self, secret: str):
self.secret = secret
def Encode(self, target: int, executions: List[int]) -> str:
"""'target' is the landing experiment execution, 'executions' is
the list of all executions belonging to the user"""
payload = {"t": target, "l": executions}
token = jwt.encode(payload, self.secret, algorithm="HS256")
if isinstance(token, bytes): # Older versions of jwt return bytes
token = token.decode(encoding="UTF-8")
return token
def Decode(self, token: str) -> Tuple[int, List[int]]:
"""Returns a tuple (<landing execution>, <list of executions>)"""
payload = jwt.decode(token, self.secret, algorithms=["HS256"])
return payload["t"], payload["l"]
server = Flask(__name__)
@server.route('/', methods=['GET'])
def index():
return {'about': "Visualization service for 5Genesis Analytics Component. Visit /help for more info and /dash to bring up the dashboard."}, 200
# Fetch the data source options
def fetch_datasource_options():
link = "http://data_handler:5000/get_datasources"
try:
data = requests.get(link).json()
return [{'label': item, 'value': item} for item in data['sources']]
except requests.HTTPError:
return [{'label': 'No datasource available', 'value': ''}]
datasource_options = fetch_datasource_options()
app = dash.Dash(
__name__,
server=server,
routes_pathname_prefix='/dash/',
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
stat_indicators = ['Mean', 'Standard Deviation', 'Median', 'Min', 'Max',
'25% Percentile', '75% Percentile', '5% Percentile', '95% Percentile']
app.layout = dbc.Container([
dcc.Location(id='url', refresh=False),
dbc.Row([
dbc.Col([
html.Div([
html.Img(src=app.get_asset_url('5genesis_logo.png'), # from https://pbs.twimg.com/media/EWm7hjlX0AUl_AJ.png
style={'height': '12rem', 'width': '12rem', 'border-radius': '50%'}),
html.H2("Analytics", style={'margin-top': '2rem'})
], style={'display': 'block', 'text-align': 'center', 'padding-top': '2rem'}),
html.Br(),
html.Div([
html.Div('Database'),
dcc.Dropdown(
options=datasource_options,
value=datasource_options[0]['value'],
id='datasource',
searchable=False,
clearable=False
)
]),
html.Br(),
html.Div([
html.Div('Experiment ID'),
dcc.Dropdown(id='experiment')
]),
html.Br(),
html.Div([
html.Div('Measurement Table'),
dcc.Dropdown(
id='measurement',
multi=True)
]),
html.Br(),
html.Div([
html.Div('Available Features'),
dcc.Dropdown(id='kpi', multi=True)
]),
html.Br(),
html.Hr(),
html.Br(),
html.Div([
html.Div('Outlier Detection Algorithm'),
dcc.Dropdown(
options=[
{'label': 'None', 'value': 'None'},
{'label': 'Z-score', 'value': 'zscore'},
{'label': 'MAD', 'value': 'mad'}],
value='None',
id='outlier',
searchable=False,
clearable=False
)]),
html.Br(),
html.Div([
html.Div('Time resolution'),
dcc.Input(
id="time_resolution",
type='text',
placeholder="1s",
value='1s',
style={'width': '75px'}
)
]),
html.Br(),
html.Div(
html.A(
dbc.Button('Reset', id='purge_cache_button'),
href='/dash/'
), style={'textAlign': 'center'})
], width=2, style={'background-color': "#f8f9fa"}),
dbc.Col([
# Hidden divisions to store data that'll be used as input for different callbacks
html.Div(id='df', style={'display': 'none'}),
html.Div(id='df_no_outliers', style={'display': 'none'}),
html.Div(id='test_case_stat_df', style={'display': 'none'}),
html.Div(id='it_stat_df', style={'display': 'none'}),
# html.Div(id='corr_matrix_download_data', style={'display': 'none'}),
# html.Div(id='corr_table_download_data', style={'display': 'none'}),
html.Div(id='prediction_results_df', style={'display': 'none'}),
# html.Br(),
# Create tabs
dcc.Tabs(id='tabs', value='time-series-tab', children=[
# Time Series tab
dcc.Tab(label='Time Series Overview', value='time-series-tab', children=[
# Time series graph
dbc.Row(dbc.Col(dcc.Graph(id='graph'))),
# dcc.Graph(id='graph_no_outliers')
# # download link
# dbc.Row(dbc.Col(
# html.A(
# 'Download Raw Data',
# id='download-link',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Statistical Analysis tab
dcc.Tab(label='Statistical Analysis', value='stat-analysis-tab', children=[
# graph
dbc.Row(dbc.Col(
dcc.Graph(id='box_plot')
)),
# table
dbc.Row(dbc.Col([
html.H4(children='Test Case Statistics'),
dash_table.DataTable(
id='table',
columns=[
{'name': 'Indicator', 'id': 'Indicator'},
{'name': 'Value', 'id': 'Value', 'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)},
{'name': 'Confidence Interval', 'id': 'Confidence Interval', 'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)}
]
),
# # download links
# html.Div(
# html.A(
# 'Download Per Iteration Statistics',
# id='iteration_download',
# download="",
# href="",
# target="_blank"
# ),
# ),
# html.Div(
# html.A(
# 'Download Test Case Statistics',
# id='test_case_download',
# download="",
# href="",
# target="_blank"
# )
# )
], width=6), justify='center')
]),
# Correlation tab
dcc.Tab(label='Correlation', value='correlation-tab', children=[
dcc.Tabs(id="corr-tabs", value="cross-correlation-tab", children=[
# Correlation Matrix
dcc.Tab(label='Cross-correlation of fields within the same experiment', value="cross-correlation-tab", children=[
dbc.Row(dbc.Col([
html.Div('Correlation method', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'value': 'pearson', 'label': 'Pearson correlation coefficient'},
{'value': 'kendall', 'label': 'Kendall Tau correlation coefficient'},
{'value': 'spearman', 'label': 'Spearman rank correlation'}
],
value='pearson',
id='correlation-method',
searchable=False,
clearable=False
)
], width=3)),
dbc.Row(dbc.Col(
dcc.Graph(id='correlation_graph')
)),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Correlation Matrix Data',
# id='corr_matrix_download',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Correlation table
dcc.Tab(label='Correlation of fields between two different experiments', value='experiment-correlation-tab', children=[
dbc.Row(dbc.Col([
html.Div('Pick Second Experiment ID', style={'margin-top': '20px'}),
dcc.Dropdown(id='experiment2'),
html.Br()
], width=3), justify='center'),
dbc.Row(dbc.Col(
dash_table.DataTable(
id='correlation_table',
columns=[
{'name': 'Correlation Field', 'id': 'Correlation Field', 'type': 'text'},
{'name': 'Value', 'id': 'Value', 'type': 'numeric', 'format': Format(precision=2, scheme=Scheme.fixed)}
], style_data={'width': '250px'}
), width='auto'
), justify='center'),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Correlation Table Data',
# id='corr_table_download',
# download="",
# href="",
# target="_blank"
# )
# ))
])
])
]),
# Feature Selection tab
dcc.Tab(label='Feature Selection', value='feature-selection-tab', children=[
# hidden division to store data
html.Div(id='feature_score', style={'display': 'none'}),
dbc.Row([
dbc.Col([
# Options
html.Div('Select Algorithm', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'label': 'Backward Elimination', 'value': 'backward'},
{'label': 'RFE', 'value': 'rfe'},
{'label': 'Lasso', 'value': 'lasso'}
],
value='lasso',
id='method',
searchable=False,
clearable=False
)
], width=2),
dbc.Col([
html.Div('Drop Features', style={'margin-top': '20px'}),
dcc.Dropdown(
id='drop_features',
multi=True
)
], width=3),
dbc.Col([
html.Div('Normalize (for RFE)', style={'margin-top': '20px'}),
dcc.RadioItems(
options=[
{'label': 'Yes', 'value': 'true'},
{'label': 'No', 'value': 'false'},
],
value='true',
id='normalize',
labelStyle={'display': 'inline-block', 'margin-top': '5px'}
)
], width='auto'),
dbc.Col([
html.Div('Alpha (for Lasso)', style={'margin-top': '20px'}),
dcc.Input(
id='alpha',
type='number',
value=0.1,
min=0, max=10, step=0.1
)
], width='auto')
]),
dbc.Row(dbc.Col(dcc.Graph(id='feature_bar'))),
# dbc.Row(dbc.Col(
# # download link
# html.A(
# 'Download Feature Selection Scores',
# id='features_download',
# download="",
# href="",
# target="_blank"
# )
# ))
]),
# Prediction tab
dcc.Tab(label='Prediction', value='prediction-tab', children=[
dbc.Row([
# Options
dbc.Col([
html.Div('Select Algorithm', style={'margin-top': '20px'}),
dcc.Dropdown(
options=[
{'label': 'Linear Regression',
'value': 'linreg'},
{'label': 'Random Forest',
'value': 'rf'},
{'label': 'SVR', 'value': 'svr'}
],
value='linreg',
id='algorithm',
searchable=False,
clearable=False
)
], width=2),
dbc.Col([
html.Div('Drop Features', style={'margin-top': '20px'}),
dcc.Dropdown(
id='drop_features_pred',
multi=True
)
], width=3),
dbc.Col(
dbc.Button('Automatic feature selection', id='drop_features_button', color='light', style={'margin-top': '43px'}),
width="auto"
),
dbc.Col(
dbc.Button('Train model', id='train_button', style={'margin-top': '43px'}),
width="auto"
)
]),
dbc.Row(
# Prediction values graph
dbc.Col(dbc.Col(dcc.Graph(id='predicted_values_graph')))
),
dbc.Row([
# Prediction results
dbc.Col(
html.Div([
html.H4('Training results'),
dash_table.DataTable(
id='prediction_result_table',
columns=[
{
'name': 'Metric',
'id': 'Metric',
'type': 'text'
}, {
'name': 'Value',
'id': 'Value',
'type': 'numeric',
'format': Format(precision=2, scheme=Scheme.fixed)
}
]
)
], style={'text-align': 'center'}), width=4
),
# Coefficient table
dbc.Col(
html.Div([
html.H4('Model coefficients'),
dash_table.DataTable(
id='prediction_coefficient_table',
columns=[
{
'name': 'Feature',
'id': 'Feature',
'type': 'text'
}, {
'name': 'Value',
'id': 'Value',
'type': 'numeric',
'format': Format(precision=4, scheme=Scheme.fixed)
}
]
)
], style={'text-align': 'center'}), width=4
)
], justify="around"),
dbc.Row(
dbc.Col(
html.A(
dbc.Button('Download model', id='download_button', style={'margin-bottom': '50px'}),
id='model_download_link',
href=None
), width="auto"
), justify="center"
)
])
])
])
])
], fluid=True)
def empty_figure(title='No data'):
return {
'data': [{'x': 0, 'y': 0}],
'layout': {'title': title}
}
empty_fig = empty_figure()
kpi_filter_list = ['Available RAM', 'PacketsReceived', 'Total RAM', 'Used CPU Per Cent', 'Used RAM', 'Used RAM Per Cent', # malaga old names
'host', 'Cell ID', 'Cell',
'facility', 'facility_x', 'facility_y',
'Success', 'Success_x', 'Success_y',
'hostname', 'hostname_x', 'hostname_y',
'appname', 'appname_x', 'appname_y',
'series', 'series_x', 'series_y',
'_iteration_', '_iteration__x', '_iteration__y',
'ExecutionId', 'ExecutionId_x', 'ExecutionId_y', 'Timestamp_x', 'Timestamp_y',
'Operator', 'DateTime', 'Network', 'LAC', 'PSC',
'AWGN State', 'Verdict']
meas_filter_list = ['execution_metadata', 'syslog']
# callback to return experiment ID options
@app.callback(
[Output('experiment', 'options'),
Output('experiment', 'value')],
[Input('url', 'search'),
Input('datasource', 'value')])
def experimentID_list(search, datasource):
if not search or not datasource:
return [], None
start = datetime.now()
params = parse_qs(urlparse(search).query)
token = params['token'][0]
if token == secret:
link = f'http://data_handler:5000/get_all_experimentIds/{datasource}'
r = requests.get(link)
experiment_list = list(r.json().values())[0]
experiment_target = None
else:
experiment_target, experiment_list = decoder.Decode(token)
if experiment_target and experiment_target not in experiment_list:
experiment_list += [experiment_target]
print(f"-- experimentID_list: {datetime.now()-start}", flush=True)
return [{'label': item, 'value': item} for item in sorted(experiment_list)], experiment_target
# callback to return measurement options
@app.callback(
[Output('measurement', 'options'),
Output('measurement', 'value')],
[Input('experiment', 'value')],
[State('datasource', 'value')])
def find_measurement(experiment, datasource):
if not experiment or not datasource:
return [], None
start = datetime.now()
link = f'http://data_handler:5000/get_measurements_for_experimentId/{datasource}/{experiment}'
r = requests.get(link)
meas_list = list(r.json().values())[0]
temp = []
for i in meas_list:
if i not in meas_filter_list: # to avoid having measurement tables which raise errors
temp.append({'label': i, 'value': i})
print(f"-- find_measurement: {datetime.now()-start}", flush=True)
return temp, None
# callback used to store the df in a hidden division
@app.callback(
Output('df', 'children'),
[Input('measurement', 'value'),
Input('outlier', 'value'),
Input('datasource', 'value'),
Input('experiment', 'value'),
Input('time_resolution', 'value'),
Input('purge_cache_button', 'n_clicks')])
def retrieve_df(measurement, outlier, datasource, experiment, time_resolution, purge_cache):
# input check - this order required (at first value is none, when filled it is a list)
if not measurement or not experiment or not time_resolution:
# empty_df = pd.DataFrame(data={})
return None
context = dash.callback_context
if context and context.triggered[0]['prop_id'].split('.')[0] == 'purge_cache_button':
requests.get('http://data_handler:5000/purge_cache')
return None
start = datetime.now()
link = f'http://data_handler:5000/get_data/{datasource}/{experiment}'
param_dict = {
'match_series': False,
'measurement': measurement,
'max_lag': time_resolution,
'remove_outliers': outlier
}
r = requests.get(link, params=param_dict)
print(f"-- retrieve_df: {datetime.now()-start}", flush=True)
# return df.to_json()
return r.text
@app.callback(
[Output('kpi', 'options'),
Output('kpi', 'value')],
[Input("df", "children")])
def update_dropdown(df):
if not df:
return [], None
start = datetime.now()
temp = []
df = pd.read_json(df)
for i in df.columns:
if not len(df[i].dropna()) == 0 and i not in kpi_filter_list:
temp.append({'label': i, 'value': i})
print(f"-- update_dropdown: {datetime.now()-start}", flush=True)
return temp, None
###
# Time Series Overview tab
###
# Time series graph
@app.callback(
Output('graph', 'figure'),
[Input('kpi', 'value'),
Input("outlier", 'value'),
Input('tabs', 'value')],
[State("df", "children")])
def update_graph(kpi, outlier, tab, df):
# input check
if not kpi or not df or not outlier or tab != "time-series-tab":
return empty_fig
start = datetime.now()
df = pd.read_json(df)
traces = []
for i in range(len(kpi)):
feature = kpi[i]
series = df[feature]
series.reset_index(drop=True, inplace=True)
traces.append(go.Scatter(
x=df.index,
y=series,
mode='lines',
name=feature,
yaxis=f"y{i+1}" if i > 0 else 'y'
))
figure = {
'data': traces,
'layout': {
'title': 'Time Series',
'xaxis': {
'title': 'Samples',
'domain': [0, 1 - (len(kpi) - 1) * 0.06],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': '#7f7f7f'
}
},
'yaxis': {
'title': kpi[0],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[0]
},
'tickfont': {
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[0]
}
},
"showlegend": False
}
}
for i in range(1, len(kpi)):
figure['layout'][f'yaxis{i+1}'] = {
'title': kpi[i],
'titlefont': {
'family': 'Helvetica, monospace',
'size': 20,
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[i]
},
'tickfont': {
'color': plotly.colors.DEFAULT_PLOTLY_COLORS[i]
},
'overlaying': 'y',
'side': 'right',
'position': 1 - i * 0.06
}
print(f"-- update_graph: {datetime.now()-start}", flush=True)
return figure
###
# Statistical Analysis tab
###
# callback used to store the statistical analysis dataframes
@app.callback(
[Output("it_stat_df", "children"),
Output("test_case_stat_df", "children")],
[Input('kpi', 'value'),
Input('datasource', 'value'),
Input('tabs', 'value')],
[State('measurement', 'value'),
State('experiment', 'value')])
def retrieve_stats(kpi, datasource, tab, measurement, experiment):
if not kpi or not experiment or tab != 'stat-analysis-tab':
empty_df = pd.DataFrame(data={})
return empty_df.to_json(), empty_df.to_json()
else:
link = f'http://statistical_analysis:5003/statistical_analysis/{datasource}'
param_dict = {
'experimentid': experiment,
'kpi': kpi[0], # .replace(" ","%20")
'measurement': measurement
}
r = requests.get(link, params=param_dict)
data = r.json()
if not data['experimentid'][experiment]:
return pd.DataFrame().to_json(), pd.DataFrame().to_json()
temp = data['experimentid'][experiment][kpi[0]]
df1 = pd.DataFrame.from_dict(temp['Iteration Statistics'], orient='index').reset_index()
test_case_stat_df = pd.DataFrame.from_dict(temp['Test Case Statistics'], orient='index').reset_index()
df1.rename(columns={'index': 'Iteration'}, inplace=True)
test_case_stat_df.rename(columns={'index': 'Indicator'}, inplace=True)
return df1.to_json(), test_case_stat_df.to_json()
# return box plot
@app.callback(
Output('box_plot', 'figure'),
[Input('kpi', 'value'),
Input("tabs", "value")],
[State("df", "children")])
def update_box_plot_graph(kpi, tab, df):
if not kpi or not df or tab != 'stat-analysis-tab':
return empty_fig
else:
kpi = kpi[0]
df =
|
pd.read_json(df)
|
pandas.read_json
|
import os
import sys
import time
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import thoipapy.utils
from thoipapy import utils
from thoipapy.validation.auc import calc_PRAUC_ROCAUC_using_10F_validation
from thoipapy.ML_model.train_model import return_classifier_with_loaded_ensemble_parameters
from thoipapy.validation.bocurve import calc_best_overlap_from_selected_column_in_df, calc_best_overlap, parse_BO_data_csv_to_excel
from thoipapy.validation.leave_one_out import get_clusters_putative_homologues_in_protein_set
def calc_feat_import_from_mean_decrease_accuracy(s, logging):
"""Calculate feature importances using mean decrease in accuracy.
This method differs from calc_feat_import_from_mean_decrease_impurity.
It's much slower, and involves the use of 10-fold cross-validation for each variable separately.
- a feature (or group of features) is selected for randomisation
- in theory, randomising important features will cause a drop in prediction accuracy
- The feature (or group of features) is shuffled
- precision-recall AUC and ROC-AUC is measured
- the difference between the original AUC and the AUC with shuffled variable is measured
- higher values suggest more important features
Parameters
----------
s : dict
Settings dictionary
logging : logging.Logger
Python object with settings for logging to console and file.
Saved Files
-----------
feat_imp_MDA_xlsx : xlsx
Comma separated values, showing decrease in AUC for each feature or group of features.
"""
logging.info('------------ starting calc_feat_import_from_mean_decrease_accuracy ------------')
# input
train_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/train_data/03_train_data_after_first_feature_seln.csv"
tuned_ensemble_parameters_csv = Path(s["data_dir"]) / f"results/{s['setname']}/train_data/04_tuned_ensemble_parameters.csv"
# output
feat_imp_MDA_xlsx = os.path.join(s["data_dir"], "results", s["setname"], "feat_imp", "feat_imp_mean_decrease_accuracy.xlsx")
feat_imp_temp_THOIPA_BO_curve_data_csv = Path(s["data_dir"]) / f"results/{s['setname']}/feat_imp/feat_imp_temp_THOIPA.best_overlap_data.csv"
feat_imp_temp_bocurve_data_xlsx = Path(s["data_dir"]) / f"results/{s['setname']}/feat_imp/feat_imp_temp_bocurve_data.xlsx"
thoipapy.utils.make_sure_path_exists(feat_imp_MDA_xlsx, isfile=True)
df_data = pd.read_csv(train_data_csv, index_col=0)
if df_data.isnull().values.any():
for col in df_data.columns:
if df_data[col].isnull().values.any():
logging.warning(f"{col} contains nan values")
raise Exception("df_data contains nan values. Please check names of features_to_be_retained_during_selection in settings file.")
# drop training data (full protein) that don't have enough homologues
if s["min_n_homol_training"] != 0:
df_data = df_data.loc[df_data.n_homologues >= s["min_n_homol_training"]]
cols_excluding_y = [c for c in df_data.columns if c != s['bind_column']]
X = df_data[cols_excluding_y]
y = df_data["interface"]
settings_path = s["settings_path"]
df_feat = pd.read_excel(settings_path, sheet_name="features")
df_feat = df_feat.loc[df_feat.include == 1]
feature_types: list = list(df_feat.feature_type.unique())
## DEPRECATED HARD-CODED LIST: use feature_type in settings file, instead
# polarity_features = ["test_dropping_of_features_not_included", "polarity", "relative_polarity", "polarity4mean", "polarity3Nmean", "polarity3Cmean", "polarity1mean"]
# pssm_features = ["A", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "Y", "CS", "DE", "KR", "QN", "LIV"]
# coev_features = ["DImax", "MImax", "DItop4mean", "MItop4mean", "DItop8mean", "MItop8mean", "DI4max", "MI4max", "DI1mean", "MI1mean", "DI3mean", "MI3mean", "DI4mean", "MI4mean", "DI4cum"]
# DI_features = ["DImax", "DItop4mean", "DItop8mean", "DI4max", "DI1mean", "DI3mean", "DI4mean", "DI4cum"]
# MI_features = ["MImax", "MItop4mean", "MItop8mean", "MI4max", "MI1mean", "MI3mean", "MI4mean"]
# cons_features = ["entropy", "cons4mean", "conservation"]
# motif_features = ["GxxxG", "SmxxxSm"]
# physical_features = ["branched", "mass"]
# TMD_features = ["residue_depth", "n_TMDs", "n_homologues"]
# polarity_and_pssm_features = polarity_features + pssm_features
# features_nested_list = [polarity_and_pssm_features, coev_features, DI_features, MI_features, cons_features, motif_features, physical_features, TMD_features]
# features_nested_namelist = ["polarity_and_pssm_features", "coev_features", "DI_features", "MI_features", "cons_features", "motif_features", "physical_features", "TMD_features"]
# for i in range(len(features_nested_list)):
# sys.stdout.write("\n{} : {}".format(features_nested_namelist[i], features_nested_list[i]))
forest = return_classifier_with_loaded_ensemble_parameters(s, tuned_ensemble_parameters_csv)
pr_auc_orig, roc_auc_orig = calc_PRAUC_ROCAUC_using_10F_validation(X, y, forest)
auboc_orig = calc_AUBOC_for_feat_imp(y, X, forest, feat_imp_temp_THOIPA_BO_curve_data_csv, feat_imp_temp_bocurve_data_xlsx, s, logging)
start = time.clock()
sys.stdout.write("\nmean : {:.03f}\n".format(pr_auc_orig)), sys.stdout.flush()
################### grouped features ###################
grouped_feat_decrease_PR_AUC_dict = {}
grouped_feat_decrease_ROC_AUC_dict = {}
grouped_feat_decrease_AUBOC_dict = {}
for feature_type in feature_types:
df_feat_selected = df_feat.loc[df_feat.feature_type == feature_type]
feature_list = df_feat_selected.feature.to_list()
feature_list = list(set(feature_list).intersection(set(X.columns.tolist())))
X_t = X.copy()
for feature in feature_list:
# shuffle the data for that feature
row_to_shuffle = X_t[feature].to_numpy()
np.random.shuffle(row_to_shuffle)
X_t[feature] = row_to_shuffle
# calculate prediction performance after shuffling
PR_AUC, ROC_AUC = calc_PRAUC_ROCAUC_using_10F_validation(X_t, y, forest)
decrease_PR_AUC = pr_auc_orig - PR_AUC
grouped_feat_decrease_PR_AUC_dict[feature_type] = decrease_PR_AUC
decrease_ROC_AUC = roc_auc_orig - ROC_AUC
grouped_feat_decrease_ROC_AUC_dict[feature_type] = decrease_ROC_AUC
auboc = calc_AUBOC_for_feat_imp(y, X_t, forest, feat_imp_temp_THOIPA_BO_curve_data_csv, feat_imp_temp_bocurve_data_xlsx, s, logging)
decrease_auboc = auboc_orig - auboc
grouped_feat_decrease_AUBOC_dict[feature_type] = decrease_auboc
logging.info(f"{feature_type} : decrease AUBOC ({decrease_auboc:.03f}), decrease PR-AUC ({decrease_PR_AUC:.03f}), "
f"decrease ROC-AUC ({decrease_ROC_AUC:.03f}), included features ({feature_list})")
# remove temp bocurve output files
feat_imp_temp_THOIPA_BO_curve_data_csv.unlink()
feat_imp_temp_bocurve_data_xlsx.unlink()
################### single features ###################
single_feat_decrease_PR_AUC_dict = {}
single_feat_decrease_ROC_AUC_dict = {}
single_feat_decrease_AUBOC_dict = {}
for feature in X.columns:
X_t = X.copy()
# shuffle the data for that feature
row_to_shuffle = X_t[feature].to_numpy()
np.random.shuffle(row_to_shuffle)
X_t[feature] = row_to_shuffle
# calculate prediction performance after shuffling
PR_AUC, ROC_AUC = calc_PRAUC_ROCAUC_using_10F_validation(X_t, y, forest)
decrease_PR_AUC = pr_auc_orig - PR_AUC
single_feat_decrease_PR_AUC_dict[feature] = decrease_PR_AUC
decrease_ROC_AUC = roc_auc_orig - ROC_AUC
single_feat_decrease_ROC_AUC_dict[feature] = decrease_ROC_AUC
auboc = calc_AUBOC_for_feat_imp(y, X_t, forest, feat_imp_temp_THOIPA_BO_curve_data_csv, feat_imp_temp_bocurve_data_xlsx, s, logging)
decrease_auboc = auboc_orig - auboc
single_feat_decrease_AUBOC_dict[feature] = decrease_auboc
logging.info(f" {feature} {decrease_auboc:.03f} | {decrease_PR_AUC:.03f} | {decrease_ROC_AUC:.03f}")
df_grouped_feat = pd.DataFrame()
df_grouped_feat["PR_AUC"] =
|
pd.Series(grouped_feat_decrease_PR_AUC_dict)
|
pandas.Series
|
import pandas as pd
from functools import lru_cache
from .stats_types import SpecialStats, DerivedStats, ChampionBanStats
from ..exceptions import MissingRequiredStats
class StatsManager: # pragma: no cover
"""Abstract class defining the basis of Stats Managers
Parameters
----------
stats : list of Stats
List of all instantiated Stats to be computed
rank_manager : RankManager
Manager for players rank
"""
def __init__(self, stats, rank_manager):
pass
def push_game(self, match_data):
"""Process the match_data according to the needs of the Stats
Parameters
----------
match_data : dict
Raw data from Riot API match-v4 endpoint
"""
pass
def get_stats(self):
"""Return the computed stats
Returns
-------
stats : Pandas DataFrame
Value of the computed stats grouped by the key
"""
pass
def merge(self, sm2, redundant_games=[]):
"""Merge the data from the given StatsManager instance
The current instance becomes the merge results
Parameters
----------
sm2 : StatsManager
Another StatsManager instance to merge with
redundant_games: list of gameId
The list of redundant games between the two instances that should be omitted
"""
pass
def _same_configuration(self, sm2):
"""Compare to another StatsManager instance to return if they have the same configuration
Parameters
----------
sm2 : StatsManager
Another StatsManager instance to compare with
Returns
-------
same_config : bool
Equivalence of the configuration
"""
return \
set([type(s) for s in self._stats]) == set([type(s) for s in sm2._stats]) and \
set([type(s) for s in self._derived_stats]) == set([type(s) for s in sm2._derived_stats]) and \
set([type(s) for s in self._special_stats]) == set([type(s) for s in sm2._special_stats])
class ChampionStatsManager(StatsManager):
"""Manager for Stats at Champion level
This manager is aimed for Stats requiring a row per participant
Parameters
----------
stats : list of Stats
List of all instantiated Stats to be computed
rank_manager : RankManager
Manager for players rank
Raises
------
MissingRequiredStats
If a required stats from Derived Stats are missing
"""
def __init__(self, stats, rank_manager):
self._stats_participants = []
self._champion_bans = []
self._rank_manager = rank_manager
game_fields = []
participant_fields = []
stats_fields = []
id_fields = []
for s in stats:
game_fields += s.get_game_fields_required()
participant_fields += s.get_participant_fields_required()
stats_fields += s.get_stats_fields_required()
id_fields += s.get_id_fields_required()
self._game_fields = list(set(game_fields))
self._participant_fields = list(set(participant_fields))
self._stats_fields = list(set(stats_fields))
self._id_fields = list(set(id_fields))
self._stats = [s for s in stats if not issubclass(s.__class__, SpecialStats) and not issubclass(s.__class__, DerivedStats)]
self._derived_stats = sorted([s for s in stats if not issubclass(s.__class__, SpecialStats) and issubclass(s.__class__, DerivedStats)], key=lambda s: s.priority)
self._special_stats = [s for s in stats if issubclass(s.__class__, SpecialStats)]
self._ban_stats = any([issubclass(s.__class__, ChampionBanStats) for s in stats])
for s in self._special_stats:
s.set_rank_manager(self._rank_manager)
# Listing all required stats for derived stats
derived_required = list(set([i for j in self._derived_stats for i in j.get_stats_required()]))
if not all([any([isinstance(s, d) for s in stats]) for d in derived_required]):
raise MissingRequiredStats
def push_game(self, match_data):
for s in self._special_stats:
s.push_game(match_data)
if len(self._id_fields) > 0:
ids = {}
for p in match_data["participantIdentities"]:
ids[p["participantId"]] = {}
for i in self._id_fields:
ids[p["participantId"]][i] = p["player"][i]
for p in match_data["participants"]:
row = {f:p[f] for f in self._participant_fields}
row.update({f:p["stats"][f] for f in self._stats_fields})
row.update({f:match_data[f] for f in self._game_fields})
if len(self._id_fields) > 0:
row.update({f:ids[p["participantId"]][f] for f in self._id_fields})
self._stats_participants.append(row)
if self._ban_stats:
for t in match_data["teams"]:
for b in t["bans"]:
self._champion_bans.append({"gameId":match_data["gameId"], "championId":b["championId"]})
def get_stats(self):
df = pd.DataFrame(self._stats_participants)
if "summonerId" in df.columns.values:
df["league"] = df["summonerId"].map(lambda x: self._rank_manager.get_rank(x))
if self._ban_stats:
df_bans = pd.DataFrame(self._champion_bans)
if "summonerId" in df_bans.columns.values:
df_bans["league"] = df_bans["summonerId"].map(lambda x: self._rank_manager.get_rank(x))
stats = {s.name:s.get_stats(df if not issubclass(s.__class__, ChampionBanStats) else (df,df_bans)) for s in self._stats}
stats.update({s.name:s.get_stats() for s in self._special_stats})
for s in self._derived_stats:
stats.update({s.name:s.get_stats(df if not issubclass(s.__class__, ChampionBanStats) else (df,df_bans), stats)})
return pd.DataFrame(stats).fillna(0, downcast="infer")
def merge(self, sm2, redundant_games=[]):
if redundant_games == []:
self._stats_participants += sm2._stats_participants
self._champion_bans += sm2._champion_bans
else:
df = pd.DataFrame(sm2._stats_participants)
self._stats_participants += df[~df["gameId"].isin(redundant_games)].to_dict("records")
if len(sm2._champion_bans) > 0:
df = pd.DataFrame(sm2._champion_bans)
self._champion_bans += df[~df["gameId"].isin(redundant_games)].to_dict("records")
class ChampionDuplicateStatsManager(StatsManager):
"""Manager for Stats at Champion level, duplicated by league
This manager is aimed for Stats requiring a row per participant and per league, for champion rate stats per league
Parameters
----------
stats : list of Stats
List of all instantiated Stats to be computed
rank_manager : RankManager
Manager for players rank
Raises
------
MissingRequiredStats
If a required stats from Derived Stats are missing
"""
def __init__(self, stats, rank_manager):
self._stats_participants = []
self._champion_bans = []
self._rank_manager = rank_manager
game_fields = []
participant_fields = []
stats_fields = []
id_fields = []
for s in stats:
game_fields += s.get_game_fields_required()
participant_fields += s.get_participant_fields_required()
stats_fields += s.get_stats_fields_required()
id_fields += s.get_id_fields_required()
self._game_fields = list(set(game_fields))
self._participant_fields = list(set(participant_fields))
self._stats_fields = list(set(stats_fields))
self._id_fields = list(set(id_fields))
self._stats = [s for s in stats if not issubclass(s.__class__, SpecialStats) and not issubclass(s.__class__, DerivedStats)]
self._derived_stats = sorted([s for s in stats if not issubclass(s.__class__, SpecialStats) and issubclass(s.__class__, DerivedStats)], key=lambda s: s.priority)
self._special_stats = [s for s in stats if issubclass(s.__class__, SpecialStats)]
for s in self._special_stats:
s.set_rank_manager(self._rank_manager)
self._ban_stats = any([issubclass(s.__class__, ChampionBanStats) for s in stats])
# Listing all required stats for derived stats
derived_required = list(set([i for j in self._derived_stats for i in j.get_stats_required()]))
if not all([any([isinstance(s, d) for s in stats]) for d in derived_required]):
raise MissingRequiredStats
def push_game(self, match_data):
for s in self._special_stats:
s.push_game(match_data)
if len(self._id_fields) > 0:
ids = {}
for p in match_data["participantIdentities"]:
ids[p["participantId"]] = {}
for i in self._id_fields:
ids[p["participantId"]][i] = p["player"][i]
for p in match_data["participants"]:
row = {f:p[f] for f in self._participant_fields}
row.update({f:p["stats"][f] for f in self._stats_fields})
row.update({f:match_data[f] for f in self._game_fields})
if len(self._id_fields) > 0:
row.update({f:ids[p["participantId"]][f] for f in self._id_fields})
self._stats_participants.append(row)
if self._ban_stats:
for t in match_data["teams"]:
for b in t["bans"]:
self._champion_bans.append({"gameId":match_data["gameId"], "championId":b["championId"], "summonerId": ids[b["pickTurn"]]["summonerId"]})
def get_stats(self):
df = pd.DataFrame(self._stats_participants)
df["league"] = df["summonerId"].map(lambda x: self._rank_manager.get_rank(x))
# Creating the list of different leagues present in each game
league_per_gameId = df.groupby(["gameId"])["league"].unique()
@lru_cache(maxsize=1)
def get_league_per_gameId(gameId):
return league_per_gameId.loc[gameId]
# One entry for each different league in the game
entries = []
for i, row in df.iterrows():
for lbg in get_league_per_gameId(row["gameId"]):
row["league"] = lbg
entries.append(row.to_dict())
# Dataframe with the duplicates
df_entries = pd.DataFrame(entries)
if self._ban_stats:
df_bans = pd.DataFrame(self._champion_bans)
df_bans["league"] = df_bans["summonerId"].map(lambda x: self._rank_manager.get_rank(x))
entries = []
for i, row in df_bans.iterrows():
for lbg in get_league_per_gameId(row["gameId"]):
row["league"] = lbg
entries.append(row.to_dict())
df_bans_entries = pd.DataFrame(entries)
stats = {s.name:s.get_stats(df_entries if not issubclass(s.__class__, ChampionBanStats) else (df_entries,df_bans_entries)) for s in self._stats}
stats.update({s.name:s.get_stats() for s in self._special_stats})
for s in self._derived_stats:
stats.update({s.name:s.get_stats(df_entries if not issubclass(s.__class__, ChampionBanStats) else (df_entries,df_bans_entries), stats)})
return pd.DataFrame(stats).fillna(0, downcast="infer")
def merge(self, sm2, redundant_games=[]):
if redundant_games == []:
self._stats_participants += sm2._stats_participants
self._champion_bans += sm2._champion_bans
else:
df = pd.DataFrame(sm2._stats_participants)
self._stats_participants += df[~df["gameId"].isin(redundant_games)].to_dict("records")
if len(sm2._champion_bans) > 0:
df =
|
pd.DataFrame(sm2._champion_bans)
|
pandas.DataFrame
|
"""
Description:
-----------
This script contains methods to do the feature engineering. In other words, the functions in here help us
process the > 10,000 storms in an timely manner.
Author:
-----------
<NAME>
"""
import numpy as np
import h5py
import pandas as pd
import warnings
#suppress warnings because i want to see my progress bar on one line
warnings.filterwarnings('ignore')
# Enter path to the SEVIR data location
DATA_PATH = '/path/to/sevir/data'
CATALOG_PATH = '/path/to/sevir/CATALOG.csv'
def read_data(sample_event, img_type, data_path=DATA_PATH,fillna=True):
"""
This function was written by the creators of the SEVIR data.
Reads single SEVIR event for a given image type.
Parameters
----------
sample_event pd.DataFrame
SEVIR catalog rows matching a single ID
img_type str
SEVIR image type
data_path str
Location of SEVIR data
Returns
-------
np.array
LxLx49 tensor containing event data
"""
fn = sample_event[sample_event.img_type==img_type].squeeze().file_name
fi = sample_event[sample_event.img_type==img_type].squeeze().file_index
with h5py.File(data_path + '/' + fn,'r') as hf:
data=hf[img_type][fi]
if fillna:
if (img_type =='vil') or (img_type =='vis'):
data = np.asarray(data,dtype=np.float32)
data[data < 0] = np.nan
else:
data = np.asarray(data,dtype=np.float32)
data[data < -30000] = np.nan
return data
def lght_to_grid(data,fillna=True):
"""
This function was written by the creators of the SEVIR data.
Converts SEVIR lightning data stored in Nx5 matrix to an LxLx49 tensor representing
flash counts per pixel per frame
Parameters
----------
data np.array
SEVIR lightning event (Nx5 matrix)
Returns
-------
np.array
LxLx49 tensor containing pixel counts
"""
FRAME_TIMES = np.arange(-120.0,125.0,5) * 60 # in seconds
out_size = (48,48,len(FRAME_TIMES))
if data.shape[0]==0:
return np.zeros(out_size,dtype=np.float32)
# filter out points outside the grid
x,y=data[:,3],data[:,4]
m=np.logical_and.reduce( [x>=0,x<out_size[0],y>=0,y<out_size[1]] )
data=data[m,:]
if data.shape[0]==0:
return np.zeros(out_size,dtype=np.float32)
# Filter/separate times
# compute z coodinate based on bin locaiton times
t=data[:,0]
z=np.digitize(t,FRAME_TIMES)-1
z[z==-1]=0 # special case: frame 0 uses lght from frame 1
x=data[:,3].astype(np.int64)
y=data[:,4].astype(np.int64)
k=np.ravel_multi_index(np.array([y,x,z]),out_size)
n = np.bincount(k,minlength=np.prod(out_size))
d = np.reshape(n,out_size).astype(np.float32)
if fillna:
data = np.asarray(data,dtype=np.float32)
d[d<0] = np.nan
return d
def read_lght_data( sample_event, data_path=DATA_PATH):
"""
This function was written by the creators of the SEVIR data.
Reads lght data from SEVIR and maps flash counts onto a grid
Parameters
----------
sample_event pd.DataFrame
SEVIR catalog rows matching a single ID
data_path str
Location of SEVIR data
Returns
-------
np.array
LxLx49 tensor containing pixel counts for selected event
"""
fn = sample_event[sample_event.img_type=='lght'].squeeze().file_name
id = sample_event[sample_event.img_type=='lght'].squeeze().id
with h5py.File(data_path + '/' + fn,'r') as hf:
data = hf[id][:]
return lght_to_grid(data)
def retrieve_stats_par(event):
"""
Takes a single event, opens all data with that event, drops 0s/nans, calculates percentiles,counts lightning.
Designed to be modular for parallel use (i.e., multiprocessing)
Parameters
----------
event string
SEVIR catalog group, from events.groups.keys()
Returns
-------
list
list of dataframes, each entry is for a variable
"""
# Read catalog
catalog = pd.read_csv(CATALOG_PATH,parse_dates=['time_utc'],low_memory=False)
# Desired image types
img_types = set(['vis','ir069','ir107','vil'])
# Group by event id, and filter to only events that have all desired img_types
events = catalog.groupby('id').filter(lambda x: img_types.issubset(set(x['img_type']))).groupby('id')
#this prevents certain files from breaking it
try:
#get event details
sample_event = events.get_group(event)
#get meta to pass through
meta = grab_meta(sample_event)
#load data
ir609 = read_data(sample_event, 'ir069')
ir107 = read_data(sample_event, 'ir107')
vil = read_data(sample_event, 'vil')
vil = get_right_units_vil(vil)
vis = read_data(sample_event, 'vis')
lght = read_lght_data(sample_event)
#get traditional ML params (features; X)
q = [0,1,10,25,50,75,90,99,100]
ir = make_stats_df(ir107,q,meta)
wv = make_stats_df(ir609,q,meta)
vis = make_stats_df(vis,q,meta)
vil = make_stats_df(vil,q,meta)
#get labels (y)
li = get_counts_df(lght,meta)
#return list of dataframes to concat
return [ir,wv,vis,vil,li]
except:
#return nan if broken
return [np.nan]
def make_stats_df(data,q,meta):
""" Abstract percentile function """
mat = np.nanpercentile(data,q,axis=(0,1)).T
df = pd.DataFrame(mat)
df = df.set_index(meta['time'])
header = np.asarray(q,dtype=np.str)
header = np.char.rjust(header, 3,'0')
header = np.char.rjust(header, 4,'q')
df.columns = header
df['event'] = meta['event_t']
return df
def get_counts_df(data,meta):
mat = np.nansum(data,axis=(0,1))[:,np.newaxis]
df = pd.DataFrame(mat)
df = df.set_index(meta['time'])
df.columns = ['c']
df['event'] = meta['event_t']
return df
def grab_meta(sample_event):
event_type = np.tile(np.asarray([sample_event.event_type.iloc[0],]),(49,1))
time = np.tile(np.asarray(np.datetime64(sample_event.time_utc.iloc[0])),(49,))
time_off = sample_event.minute_offsets.iloc[0]
time_off = np.asarray(time_off.split(':'),dtype=np.int64)
timedelta =
|
pd.Timedelta(minutes=1)
|
pandas.Timedelta
|
"""The plain video image provider provides image from any video supported by openCV with manual synchronization."""
from typing import List, Generator, Tuple
import cv2
import pandas as pd
from sevivi.log import logger
from .video_provider import VideoImageProvider
from ..dimensions import Dimensions
logger = logger.getChild("plain_video_image_provider")
class PlainVideoImageProvider(VideoImageProvider):
"""The plain video image provider provides image from any video supported by openCV with manual synchronization."""
def __init__(self, video_path: str):
self.__video = cv2.VideoCapture(video_path)
def get_sync_dataframe(self, column_names: List[str]) -> None:
"""
Plain video doesn't have any data to synchronize against, so the sync dataframe is None.
"""
return None
def images(self) -> Generator[Tuple[pd.Timestamp, bytes], None, None]:
"""Generate the images to be shown together with their timestamps"""
while self.__video.isOpened():
frame_exists, frame = self.__video.read()
if frame_exists:
ts = self.__video.get(cv2.CAP_PROP_POS_MSEC)
yield
|
pd.to_datetime(ts, unit="ms")
|
pandas.to_datetime
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.